From 96364bb8dd60165cca493f385dbbd288dc7a931f Mon Sep 17 00:00:00 2001 From: Christian Lessig Date: Thu, 30 Jan 2025 18:33:46 +0100 Subject: [PATCH 01/24] Fix in pyproject dependencies (#5) * Adding LICENSE file. * Adding license header to all files. * Adding .gitignore * Fixed dependencies. --- pyproject.toml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/pyproject.toml b/pyproject.toml index 07e3b03f1..40ea9e0c8 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -8,7 +8,7 @@ authors = [ ] requires-python = ">=3.11,<3.13" -dependencies_core = [ 'torch', +dependencies = [ 'torch', 'flash_attn', 'numpy', 'astropy_healpix', From eeac517d5061ad706ef11d6e5025df36ef6d7eab Mon Sep 17 00:00:00 2001 From: Timothy Hunter Date: Tue, 18 Nov 2025 17:37:47 +0100 Subject: [PATCH 02/24] [1265] Merge latest work to main (#1295) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit * Revert "Implement per-channel logging (#283)" (#434) This reverts commit 989ab6e1d6e8c0f69594414c7733adf30acd1c54. * Fix FESOM datareader and int overflow (#417) * Fix indexing in DataReaderFesom * Enforce using only int64 in data loading * ruff * ruff2 * Review * Change int64 back to int32 * changes (#462) * Fix incorrect handling of empty window (which triggered problem in IO writing code). (#447) * Update default_config.yml (#446) analysis_streams_output is missing, which leads to error with val_initial=True and log_validation > 0. * Re-enabled option to run plot_training as script and fixed -rf argument (#444) * Re-enabled option to runplot_training as script and removed relative path as default from mutually-exclusive argument -rf. * Ruffed code. * Ruff check fix. * Rename flags for parsing configuration and fixed default handling for standard config YAML-file. * fix era5 config (#473) Adding z back in * [251] Merge new IO class (#469) * Implement mock IO (#336) * Adapt score class score class (#339) * Implement mock IO * Adapt score class * Removing unused file (#349) * remove database folder (#355) * Small change - CI - pinning the version of formatting (#361) * changes * changes * Update INSTALL.md * Update INSTALL.md * Fixed Exxx lint issues (#284) * Rebased to the latest changes and linted new changes * addressed review comments * addressed review comments * Linted the latest changes. * corrected the formating * corrected the formating * configured ruff to use LF line endings in pyproject.toml * [357] Sub-package for evaluation (#359) * working * changes * removing deps from non-core project * changes * fixes * comments * Iluise quick fix stac (#374) * remove database folder * fix database * Simplifying workflow for plot_training (#368) * Simplifying workflow for plot_training * Ruffed * Working on implementing exclude_source * Remove unused code * Fixed ruff issue * Fixing bug in lat handling (377) (#378) * Fixing bug in lat handling * Added comment --------- Co-authored-by: Seb Hickman <56727418+shmh40@users.noreply.github.com> * recover num_ranks from previous run to calculate epoch_base (#317) * recover num_ranks from previous run to calculate epoch_base * set email settings for commits * addressing Tim's comment * make ruff happy * improve style * changes (#385) Linter rule so np.ndarray is not used as type * changed the script name from evaluate to inference as it simply gener… (#376) * changed the script name from evaluate to inference as it simply generate infer samples * changed evaluate to inference in the main scripts and corresponding calls in the config * update the main function for the inference script * changed evaluate to inference also in docstring, unit test scripts, and integration test scripts --------- Co-authored-by: Patnala,Ankit * Introduce tuples instead for strings to avoid TypeError (#392) * Exclude channels from src / target (#363) * Exclude channels from src / target * Simplified code and added comment that pattern matching is used * Adding new stream config * Fixing bug that led to error when accessing self.ds when dataset is empty * Wokign on exlcude_source * work in progress * Fixing incorrect formating for logger (#388) * Ruffed * Refactored and cleaned up channel selection. Also added check that channels are not empty * Cleaned channel parsing and selection * Adjustments * Removing asserts incompatible with empty dataset --------- Co-authored-by: Christian Lessig * add embed_dropout_rate to config v1 (#358) * [402] adds checks to the pull request (#403) * chanegs * mistake * mistake * mistake * changes * doc * Introduce masking class and incorporate in TokenizerMasking (#383) * creating masking class and adapting tokenizer_masking to use this class * minor changes to masking.py and tokenizer_masking * removed old tokenizer_masking * include masking_strategy in default_config * change ValueError to assert * linting formatting changes files * further linting of docstrings * create mask_source and mask_target in Masker, and update tokenizer_masking to use these, then style improvements * linted masking, tokenizer_masking * modify masker, rng and perm_sel now part of class, remove extra masking_rate, update comments, remove archived class * remove check if all masked, not masked * remove self.masking_rate from MultiStreamDS class, and masking args from batchify_source * update tokenizer utils with description of idx_ord_lens in comment * remove masking args from batchify_, perm_sel removed now internal to Masker class, remove handling special cases of masking (all masked) * adding masking_strategy: to config * remove unused mentions of masking_combination * removed comment about streams * changed assert to check self perm_sel is not None * ruff masking, tokenizer_masking * Ruffed * Added warning to capture corner case, likely due to incorrect user settings. * Fixed incorrect call twice * Fixed missing conditional for logger statement * Required changes for better handling of rngs * Improved handling of rngs * Improved handling of rng --------- Co-authored-by: Christian Lessig * Implement per-channel logging (#283) * Fix bug with seed being divided by 0 for worker ID=0 * Fix bug causing crash when secrets aren't in private config * Implement logging losses per channel * Fix issue with empty targets * Rework loss logging * ruff * Remove computing max_channels * Change variables names * ruffed * Remove redundant enumerations * Use stages for logging * Add type hints * Apply the review * ruff * fix * Fix type hints * ruff --------- Co-authored-by: Tim Hunter * [346] Passing options through the slurm script (#400) * changes * fixes * refactor `validation_io.write_validation` to make it more readable * remove legacy code `validation_io.read_validation` * encapsulate artifact path logic in config module * remove redundant attribute `Trainer.path_run` * use config to look up base_path in `write_validation` * remove unused `write_validation` args: `base_path`, `rank` * ensure correct type for pathes * remove streams initialization from `Trainer` * remove path logic from `Trainer.save_model` * simplify conditional * rename mock io module * update uv to include dask * Implement io module to support reading/writing model output * implement new validation_io routine * use new write_validation routine * remove unused code * rename output routine to `write_output` * ruffed and added comments * fixed annotation * use simple __init__ method for `OutputItem` instead of dataclasses magic * address reviewers comments * rename method * add simple docstrings * ruffed * typehint fixes * refactor names * update comments and typehints, dont import pytorch * remove `__post_init__` methods, cache properties * fixes and integration test * final fixes :) * changes * changes * changes * changes * changes * more work * changes * changes * changes * ruffed * ruffed * improve logging and comments * Update to score-class according to internal discussions and feedback in PR. * Add license header. * Ruffed code. * Update to score-class according to internal discussions and feedback in PR. * Add license header. * Ruffed code. * Add doc-string to call-method and provide example usage for efficient graph-construction. * Some fixes to score-class. * Some fixes to handling aggregation dimension. * Add missing import of MockIO. * changes * changes * removing the scores * changes * changes * changes * changes * changes * changes * changes * changes * changes * changes * changes * changes * changes * changes --------- Co-authored-by: Kacper Nowak Co-authored-by: Christian Lessig Co-authored-by: iluise <72020169+iluise@users.noreply.github.com> Co-authored-by: Sindhu-Vasireddy <98752594+Sindhu-Vasireddy@users.noreply.github.com> Co-authored-by: Seb Hickman <56727418+shmh40@users.noreply.github.com> Co-authored-by: Julian Kuehnert Co-authored-by: ankitpatnala Co-authored-by: Patnala,Ankit Co-authored-by: Savvas Melidonis <79579567+SavvasMel@users.noreply.github.com> Co-authored-by: Christian Lessig Co-authored-by: Till Hauer Co-authored-by: Simon Grasse Co-authored-by: Michael * [459] Attempt to fix ruff differences (#463) * changes * debug * changes * changes * Update pyproject.toml (#457) * Continue training through slurm script (#395) * train_continue via slurm * using __main__ as entry point for slurm script * reverting config files to match base branch * reverting config files to match base branch * removing param_sum control logging before and after loading of model weights * run ruff * check whether from_run_id is in arguments * trigger PR check * remove block to set reuse_run_id=True --------- Co-authored-by: Julian Kuehnert * added the .python_version file set to python 3.12 (#482) Co-authored-by: Kerem Can Tezcan * script (#489) * Remove print statements for logging (#421) (#439) * first change * removed all prints * changed model.py back * adding comments and fixes@ * added ruff fixes * reverting files for PR * ruff fixes * removing run_id.py * formatting changes * changing comments in check_gh_issue script --------- Co-authored-by: owens1 Co-authored-by: Timothy Hunter * Rename batchsize to batchsize_per_gpu (#475) * Rename batchsize to batchsize_per_gpu * Fix ruff stuff * fix (#490) * add polar orbiters and abi-goes to the stac database (#426) * testing adding metopa and metopb as placeholder drafts to stac database * added the actual json files because I think we have to * updated metopa metopb jsons and ets * add fy3 and update metops * updated names of metops * updated metopb untarred size inodes and end date * update names to instrument, satellite * add untarred data size and inodes for metopa * updated to oscar naming, with format platform, instrument, and added fengyun satellites * update size and inodes of fy3c mwhs * add fengyun jsons, missing before, and update unique ids of metopa and b * add processing_level field to metopa as a test * adding processing level field * fix up processing level * updated jsons and jsonnets for provenance * actually include provenance * updated to include processor and provider, remove provenance * add abi-goes * fix abi goes geometry * fix latitude and longitude * fix typo * hopefully this time lat is right.. * update catalogue json for develop * check catalogue on this branch * jsonneted for develop --------- Co-authored-by: iluise * Added naming convention checks to lint (#501) * Added naming convention checks to lint * Implemented python naming conventions and corrected code accordingly --------- Co-authored-by: Matthias Karlbauer * Correct the in-code-names for rotation matrices (#516) * Added naming convention checks to lint * Implemented python naming conventions and corrected code accordingly * Corrected renaming of rotation matrices from R to rot instead of to r --------- Co-authored-by: Matthias Karlbauer * extend format string and timedelta to days (#499) * extend format string and timedelta to days * replace with pd.to_timedelta * import pandas * ruff * enforce "HH:MM:SS" format * ruff * Mlangguth/develop/issue 251 (#495) * Add score-class to evaluate-package. * Add score-class to evaluate-package. * Lintered and ruffed code. * Add fix to io.py and update dependencies in common. * Several small fixes to score-class and fast evaluation. * Add utils for evaluate. * Moved to_list to utils and improved doc-strings. * Improve several doc-strings, avoid formatting of logger and other changes from PR review. * Add xhistogram and xskillscore to dependencies of evaluate. * Ruffed code. * Lintered code. * Fix incorrect retrieval of validation batch size in validation IO. * Final minor changes to argument-names * changes (#471) * Updated to camel case. (#445) * Updated to camel case. * Fixed formatting. * Revert "Updated to camel case. (#445)" (#530) This reverts commit 4a8bd49067d86c8c9dd2930544d52cb9db8577af. * [327] Script to create the links to output directories (results, ...) (#528) * changes * fixes * slash * slash * checks * checks * Update config parameters lr and grad_clip (#545) * updated lr and grad_clip in config * modify lr to 1e-4 * Fixed randomization problem with masking (#510) * Fixed randomization problem with masking (needs to be verified) * Making sure the seed is ok * Fixed problem with seed init. * More improvements. But problem still seems to be there. * Clean up of rng handling. Re-initalization is passed through to masker, which was the issue. * - Fixed prime numbers - Cleaned up unnecessary rng init and added further comments. --------- Co-authored-by: clessig * Sophiex/dev/upper bound targets (#526) * recovering my stash * Fix bug * Clean up pull request * Clessig/develop/fix forecasting 448 (#449) * Removed (second) residual connection for forecasting * Added init to forecasting engine to small values * Default values for forecasting experiments * Updated settings * Setting local engine to empty * Fix z settings. * Revised defaults with larger net * Revised defaults with larger config * Restoring default config * Restoring * Restoring default --------- Co-authored-by: clessig * Restore self.size_time_embedding in tokenizer_forecast.py (#548) * Restore self.size_time_embedding in tokenizer_forecast.py Fixes #547 * Remove empty line for ruff Remove line for ruff? * Replace cf.rank==0 with utils.distributed.is_root (#535) Co-authored-by: wang85 * Fixed handling of empty streams in plot_train (#552) * Fixed handling of empty streams * Fixed --------- Co-authored-by: clessig * Fix train_continue (#556) * add DocStrings to model (#268) * added DocStrings for class ModelParams * added DocStrings for class Model * Docstring cleanup v1 * Docstring cleanup v2 * Docstring cleanup v3 * Docstring corrections v1 * Docstring corrections v2 * Docstring corrections v3 * ruff check v1 * ruff check v2 * ruff check v3 --------- Co-authored-by: th3002s * Revised structure in metric JSON-file (#549) * Update score-class to support groupby-operations for per-sample evaluation. * Update of fast evaluation pipeline to track metrics sample-wise and dump them into the newly structured JSON-files. * Changes according to PR review and fix for handling situations with a single sample. * Changes according to PR review and fix to filter channels for score-calculation. * Fixed handling of empty source/target channels (#558) Co-authored-by: clessig * Fix to peel_tar_channels to allow situations where no data for fstep=0 is present. (#572) * Update era5.yml: token size 8 (#583) * [DRAFT] CLI for scoring and plotting (#522) * first insterface * working version * save json * add omegaconf * address comment and clean up interface * add config * update scoring class * Fix to allow for channel-selection in get_data and efficiency improvement to plot_data. * Avoid circulra dependency issues with to_list-function. * Fix data selection issues. * Enable proper handling of lists from omegaconf. * update to mlangguth89 fork * refactor forecast step * ruffed * add printing summary * add ZarrData class * adjust size of the plots * attempt to solve sorting issue * Rename model to run in config and in code. * Fixes to Michael's review comments. * Ruffed code. * resync with mlangguth89 + add plot titles * revert mixed --------- Co-authored-by: Michael * 'Handle list input to forecast_steps (Closes #573)' (#581) * 'fixed bug not handling list input to forecast step #573' * linted * replace error with assert * lint * roll-back accidental lint --------- Co-authored-by: Christian Lessig * remove plot config (#597) * first insterface * working version * save json * add omegaconf * address comment and clean up interface * add config * update scoring class * Fix to allow for channel-selection in get_data and efficiency improvement to plot_data. * Avoid circulra dependency issues with to_list-function. * Fix data selection issues. * Enable proper handling of lists from omegaconf. * update to mlangguth89 fork * refactor forecast step * ruffed * add printing summary * add ZarrData class * adjust size of the plots * attempt to solve sorting issue * Rename model to run in config and in code. * Fixes to Michael's review comments. * Ruffed code. * resync with mlangguth89 + add plot titles * revert mixed * remove plot config + style addition to evaluation package * ruffed --------- Co-authored-by: Michael * integrate IFS scores from Quaver into FastEvaluation (#600) * first insterface * working version * save json * add omegaconf * address comment and clean up interface * add config * update scoring class * Fix to allow for channel-selection in get_data and efficiency improvement to plot_data. * Avoid circulra dependency issues with to_list-function. * Fix data selection issues. * Enable proper handling of lists from omegaconf. * update to mlangguth89 fork * refactor forecast step * ruffed * add printing summary * add ZarrData class * adjust size of the plots * attempt to solve sorting issue * Rename model to run in config and in code. * Fixes to Michael's review comments. * Ruffed code. * resync with mlangguth89 + add plot titles * revert mixed * remove plot config + style addition to evaluation package * ruffed * add option to comment out plotting * resync utils to develop --------- Co-authored-by: Michael * [569] Load eagerly the stream content in order (#585) * changes * change * changes * Remove loading of streams also from inference. --------- Co-authored-by: Christian Lessig * [DRAFT][590] Rename metrics file (#601) * Implemented backward-compatible function to read and write `{RUN-ID}_train_metrics.json` (new) or `metrics.json` (old) * Quick fix for #553 NaT from encode_times_target, move offset to before trigs (#589) * quick fix for 553 NaT from encode_times_target, move offset * change offset to 10 minutes... * ruffed * apply hotfix to deltas_sec * ruffed * fix: associate output stream names with correct index (#519) * fix: associate output stream names with correct index * ruffed * fix: iteration over output items * address comments * fix: correctly index channels * fix stream indexing logic, add asserts * fix: extraction of data/coordinates for sources * fix assert * Clessig/develop/channel logging 282 (#615) * Fix bug with seed being divided by 0 for worker ID=0 * Fix bug causing crash when secrets aren't in private config * Implement logging losses per channel * Fix issue with empty targets * Rework loss logging * ruff * Remove computing max_channels * Change variables names * ruffed * Remove redundant enumerations * Use stages for logging * Add type hints * Apply the review * ruff * fix * Fix type hints * ruff * Implement sending tensors of different shapes * ruff * Fix merge * Fix docstring * rerun workflow * Review * Change default colums name * Fix merge * - Added ddp_average_nan that is robust to NaN/0 entries when computing mean - Switched from all_gather to this function in trainer to robustly average - Some code cleanup * use all_to_all communication * Fixing problem with single-worker (non-DDP) training * Ruffed * Re-enabled validation loss output in terminal * Simplified handling of dist initalized --------- Co-authored-by: Kacper Nowak Co-authored-by: Tim Hunter Co-authored-by: clessig * Fix bug in corner case of data reading (#621) * Changed logging level for some messages. * Fix bug in data reading and add assert to better detect these problems. * Loss class refactoring (#533) * Fix bug with seed being divided by 0 for worker ID=0 * Fix bug causing crash when secrets aren't in private config * Implement logging losses per channel * Fix issue with empty targets * Rework loss logging * ruff * Remove computing max_channels * Change variables names * ruffed * Remove redundant enumerations * Use stages for logging * Add type hints * Apply the review * ruff * fix * Fix type hints * ruff * Implement sending tensors of different shapes * ruff * Fix merge * Fix docstring * rerun workflow * creating loss class * Adapted varnames in new compute_loss function to match LossModule * comments and loss_fcts refactoring * Suggested a separation of mask creation and loss computation * first working version of LossModule; added unit test * Modifications and TODOs after meeting with Christian and Julian * Added Christian's comments and updated code partially * Julian & Matze further advances to understand shapes * New mask_t computations. Not yet correct, thus commented * Resolved reshaping of tensors for loss computation * small changes in _prepare_logging * J&M first refactoring version finished, 2 tests ok * First round of resolving PR comments * add ModelLoss dataclass, rearrange mask and loss computation * Integrating new LossCalculator into trainer.py and adding docstrings * J&M resolved temp.item() error * Second round of PR comments integrated * - Fixed loss accumulation - Cleaned up variable names * Renamed weight * Removed unused vars * Inspected loss normalization for logging * Minor clean-up * Removing unused code. * More refactoring: breaking code down in smaller pieces * Fix * Adding missing copyright * Adding missing copyright * Fixing incorrect indent * Fix --------- Co-authored-by: Kacper Nowak Co-authored-by: Tim Hunter Co-authored-by: Julian Kuehnert Co-authored-by: Matthias Karlbauer Co-authored-by: Christian Lessig Co-authored-by: clessig * Update momentum (#633) * Update momentum * Remove final GELU in MLP * Adding assert to catch inconsistent config params (#630) * Update default_config.yml (#641) Fix incorrect stream * Backward compatibility of 'loss_avg_mean' metric name (#637) Co-authored-by: Christian Lessig * Iluise/develop/plotting issues (#635) * fix plotted timestamp * fix crashing when a run is plot only * ruffed * implement comments * Mlangguth/develop/issue 586 (#625) * Add options to configure the marker size, the marker type and enable marker-scaling with latitude for map-plots * Update doc-strings to follow standard format. * Ruffed code. * Changes due to review comments. * Less verbose logging and improved handling of setting to plot histograms. * Corrected error-message in plot_data. * [DRAFT]: Prediction head architecture clean-up (#481) * - Avoid time encoding is 0 - eps in layer norms to 10^-3 - bf16 * Make the attention dtype and norm eps configurable * Fix gitignore and add config files * Shuffle config files into sensible folders * Implement first attempt at new prediction heads * Fix some bugs * Fix trainer compile + fsdp * Fix trainer and better defaults * Choose AdaLN * Correlate predictions per cell Previously this pr treated as independent * Make things more parameter efficient * Revert "Make things more parameter efficient" It made things way worse This reverts commit 0f31bf11c82ee9f951810ac6782a4b31b83b8757. * Improve the prediction heads at small sizes * Improve the stability of training Two main changes: better beta 1 and beta 2 values in adam w and remove gelu * Adding some more regularisation In particular to prevent training divergences and overfitting * Forgot the dropout in MLPs * Tune the learning rate * Add the original prediction heads CAREFUL: Untested!!! * Fix bugs and ruff * Restore old version last part * Start fixing the defaults * Deleting hpc specific configs * Deleting hpc specific configs * Defaults and documentation * Apply ruff * Clean up code * Add one more comment --------- Co-authored-by: Christian Lessig * Fix bug in loggin buffer reset (#651) Co-authored-by: clessig * use config dropout_rate in EmbeddingEngine (#646) * Make numpy argsort version resilient (#645) * Fix backward compatibility (#655) * Implement global and per-cell channel masking (#496) * creating masking class and adapting tokenizer_masking to use this class * minor changes to masking.py and tokenizer_masking * removed old tokenizer_masking * include masking_strategy in default_config * change ValueError to assert * linting formatting changes files * further linting of docstrings * create mask_source and mask_target in Masker, and update tokenizer_masking to use these, then style improvements * linted masking, tokenizer_masking * modify masker, rng and perm_sel now part of class, remove extra masking_rate, update comments, remove archived class * remove check if all masked, not masked * remove self.masking_rate from MultiStreamDS class, and masking args from batchify_source * update tokenizer utils with description of idx_ord_lens in comment * remove masking args from batchify_, perm_sel removed now internal to Masker class, remove handling special cases of masking (all masked) * working implementation of healpix level masking in Masker, with too many prints and hardcoded hl_mask and hl_data * adding masking_strategy: to config * remove unused mentions of masking_combination * removed comment about streams * changed assert to check self perm_sel is not None * ruff masking, tokenizer_masking * implementation of healpix masking code with lots of printing * removed print statements from masking.py * minor line change * remove default for strategy_kwargs * add strategy_kwargs to config, and pass through masker to pass masking strategy specific args * vectorise child indices calcs, implement masking_rate_sampling, minorly updated docs * remove print statements * cf.strategy_kwargs passed to Masker in multi_stream_data_sampler * masking_strategy random and strategy kwargs passed to config * ruffed * pass cf.get(strategy_kwargs or {}) to the Masker and update masking to reflect this * update config so it does not include strategy_kwargs, no longer needed * move asserts for healpix to constructor, rename to masking_strategy_config, update config with example of healpix * test working version, understanding what is happening * revert breaking develop merge and conflict in config * default config put channel masking * reverting the accidental revert... * small change to config * implemented global and per-cell per channel masking in masking, change to config * remove print statements from multistream * updated config for compatibility to run immediately * cleaned code, assert to fail for different number of source and target streams * updated default config to match latest * fixed _generate_channel_mask to handle empty cells of data * fixed docstring of masker * ruffed linted * rename l in token_lens * lint ruff, remove prints * add assert for source and target channels must be the same * fix config to develop, new assert, remove assert * revert assert statement for readability * clip the values in masking_rate_sampling to 0.01 and 0.99 * revert cell name to tl * remove empty lines from model * remove empty line from embeddings * remove empty line tokenizer_masking * ruff masking, tokenizer_masking * update config again to develop version * update config comment for masking strategies * update channel masking to handle non-data channels for new loss * ruffed * Implemented check that for channel masking source and target channel have to be identical * Minor code improvements * Fixed incorrect return type for special case * Ruffed + and reduced magic constants * Minor fixes to _generate_healpix_mask * Cleaned up and optimized mask generation for channel masking * changed to use mode global or per_cell, improved docstring for masking strategies * added documented valid examples for masking_strategy_config to default_config * ruffed * update example masking_strategy_config in default * Minor adjustments to default settings * remove mention of hl_data in masking_strat_config --------- Co-authored-by: clessig * Removed that checkpoint is saved at the first batch (#663) * Clessig/develop/fix data reading anemoi missing date 671 (#672) * Changed logging level for some messages. * Fixed unhandled exception with missing dates. * Fixed debug message * Make compare_run_config.py usable again (#661) * Update compare_run_config.py to use existing functions from current repo. * Ruffed code. * [595] Changes for running a notebook script (#598) * Changes * Chanegs * work * change * changes * changes * changes * changes * changes * changes * changes * changes * reverse old changes * linter * Implement regional evaluation (#652) * Add RegionBoundingBox data class to score-utils to handle evaluation for different regions. * Implement region-specific evaluation in plot_inference.py. * Adapted utils. * Introduction of clean RegionLibrary in score_utils.py. * Ruffed code. * Updates following reviewer comments. * Ruffed code. * Clessig/develop/fix loss 678 (#679) * Changed logging level for some messages. * Fixing bug with incorrect counting * using config results path instead of fixed path (#631) * using config results path instead of fixed path * ruff * Add forgotten LayerNorm (#687) * Add forgotten LayerNorm * Apply ruff --------- Co-authored-by: Sophie Xhonneux * Fix performance degradation in loss computation (#690) * Changed logging level for some messages. * Refactored loss computation to improve performance. * Working around ruff issue * - Refactored code to improve structure and readability - Fixed problem with incomplete normalization over loss functions - Solved problem with mse_weighted as loss function when mse is specified * Fixed problems with multi-worker training * Fixed indentation bug and bug in assert * [DRAFT] Rename plot_inference.py and entrypoint for evaluation (#683) * Rename plot_inference.py. * Rename of main-method and move parsing of arguments for entrypoint. * Introduce entrypoints to fast evaluation. * Fix to call of main in run_evaluation.py. * Rename entrypoint and add dependency to weathergen-evaluate. * Add missing comma in pyproject.toml. * Option for non-linear output layer in prediction head (#673) * Add score-class to evaluate-package. * Add score-class to evaluate-package. * Lintered and ruffed code. * Add fix to io.py and update dependencies in common. * Several small fixes to score-class and fast evaluation. * Add utils for evaluate. * Moved to_list to utils and improved doc-strings. * Improve several doc-strings, avoid formatting of logger and other changes from PR review. * Add xhistogram and xskillscore to dependencies of evaluate. * Ruffed code. * Lintered code. * Add helper function to get custom last activation. * Add option to control stream-specific non-linear output layer. * Controlling print-statement to model.py. * Corrected handling of config for prediction head. * Add support for stream-specific, optional non-linear output actiavtion function. * Provision of ActivationFactory. * Ruffed. * Changes following review comments. * Fix in parsing final_activation-argument. * Clessig/develop/fix empty 647 (#675) * Changed logging level for some messages. * Removed checks that requires non-empty channels * Adding warning * Fixed convergence of training (#696) * Restored old prediction had functionally. Other adjustments/reverts, in particular in attention. * Ruff'ed * Addressed reviewer comments and cleaned up minor details * Fixed bug in obs data reading (#698) * Restored old prediction had functionally. Other adjustments/reverts, in particular in attention. * Ruff'ed * Fixed bug in obs data reading so that data violated window * Fix * Update data_reader_obs.py * Restoring to develop * Fix * Ruffed * Clessig/develop/fix logging verbosity 564 (#619) * Changed logging level for some messages. * Added support for more fine grained output control. * Changed logging setting for inference. * Minor improvement to doc string * include run_id in debug log file * ruff --------- Co-authored-by: Julian Kuehnert * Refactor path-setting for 'model' and 'results' to be dynamic (no relative paths) (Closes #591) (#677) * temp commit wip * change model_path and run_path setting to dynamic (independent of HPC) (untested) * removed unnecessary set_paths references * linted * remove commented code * removed commented lines * Enable plot_train with dynamic paths * lint --------- Co-authored-by: Matthias Karlbauer * Fix (#715) * modified evaluation api, callable as python function (#713) * Fixed bug for degenerate streams (#723) NaN-robust min/max computation. * Fixed (#725) Resolves config loading error when passing a `model_dir` * Fix on loading model config (#726) * Small fix on loading model config * minor change * Detect if channels for plotting differ from JSON and recompute if necessary (Closes #701) (#718) * new branch * detecting changes in channel spec * style changes * style changes * Delete config/plot_config.yml * incorporated PR feedback * added run_evaluation (again) * Clessig/develop/fix logging 719 (#720) * Cleaned up to use proper logger * Cleaned up to use proper logger * Fix logging: needs to be registered per output stream and not per logging level * Set logging level consistently with debug to file * Fixes * Added FSDP-sharding after loading model for train continue (#729) * Added FSDP-sharding after loading model for train continue * Improved consistency * Fixed resetting FSDP after checkpoint saving * Update handling of `run_path` and `model_path` (Closes #716) (#732) * proposed solution, untested * assert instead of error * lint * incorporating PR feedback * lint * added explicit argument passing * lint * Make cartopy map resources a shared asset to prevent downloading from… (#731) * Make cartopy map resources a shared asset to prevent downloading from the internet which is not always possible * Replaced print by logger statement --------- Co-authored-by: xhonneux2 Co-authored-by: karlbauer1 * Clessig/develop/fixes hackathon (#736) * Fixed some comments that generated warnings * Added to create path for log files if it doesn't exist --------- Co-authored-by: Christian Lessig * Revised path defaults and output dirctory structure for fast evaluation (#681) * First changes to path-handling. * Consistent path for maps and histograms. * Update of evaluation scipts for proper path defaults and directory structures. * Make root-path to repo available via common-package. * Introduce proper defaults to plot_inference.py and set-up desired directory structure for evaluation output. * Rename of results_dir-parameter to results_base_dir * Ruffed code. * Allow for run-specific results-paths and use config to get defaults. * Several fixes and consistency improvements. * Remove manual default usage in plotter.py * Ruffed code. * Update __init__.py Remove _REPO_ROOT. --------- Co-authored-by: Christian Lessig * Mk/develop/fix plot train 727 (#738) * Load model_path from private config if not provided * Use existing function to get private model path * Incorporated PR comments * Fix problems with rel paths in logging files (#742) * Fixed relative path handling for logging files. * Adding default argument to _load_private_conf() * Implement first function for latitude weighting (#705) * Changed logging level for some messages. * Refactored loss computation to improve performance. * Working around ruff issue * - Refactored code to improve structure and readability - Fixed problem with incomplete normalization over loss functions - Solved problem with mse_weighted as loss function when mse is specified * Fixed problems with multi-worker training * add location weights, first commit * assertion on mask and len(location_weights) * restructuring of location weights and fixes in mse_channel_location_weighted function * fix coords_raw dependency on offset and fstep * ruff * addressing review commits and fixing bug * rm location_weight from default stream config --------- Co-authored-by: Christian Lessig Co-authored-by: Julian Kuehnert * Fix failure for notebooks. (#750) * add proper error message for source_include not equal to target_include (#767) * Implemented fractional target selection (#751) * implemented fractional target selection * ruffed * fix up configs and <= to accept target_fraction 0.0 * revert to simple implementation of per stream sampling_rate_target * restore configs * Corrected formula for L2-error in score-class. (#721) * Corrected formula for L2-error in score-class. * Introduced option to get the original or the squared L2-norm. * Added doc-string for L2-norm. * Fix sampling rate (#773) Co-authored-by: clessig * Update default_config.yml (#776) * Adding the animations feature, fixing stable colorbars (not per stream) (#692) * Adding the animations feature * keep only the animations and max-min functions. * Sophiex/dev/name modules (#754) * Add names to modules as prep for freezing * Add functionality to freeze modules based on added names * Ruff * Clean up * Wrong import path * Ruff * Fix animations bug with paths (#781) * Fix another bug in animations (#783) * Work around to allow for model freezing (#785) * Work around to allow for model freezing. * Ruff * fix to avoid whole model element of named_modules and hence freeze whole model --------- Co-authored-by: clessig Co-authored-by: Sebastian Hickman Co-authored-by: Seb Hickman <56727418+shmh40@users.noreply.github.com> * Fast evaluation for integration tests (#770) * rename module level constant * split inference into own method * use proper fast evaluation pipeline for `evaluate_results` * ruffed * remove assert => different bug * adjust tests for new plot template * Update checking the value of plot_histograms and plot_animations (#788) * pass StreamData instances to io.py (#779) * Rename anemoi directories and built backward compatibility (Closes #709) (#771) * renamed anemoi dirs and built backward compatibility * ruff * removed stream directories and updated logging * renamed all streams * ruff * seviri file name change * cerra_seviri folder update * cerra path update --------- Co-authored-by: Christian Lessig * Fix IO when targets/preds are empty. (#760) * Modify DataReaderObs to get base_yyyy... from stream config (#794) * modify DataReaderObs to get base_yyyy... from stream config, and set it in the ctor, with default of 19700101. Use it in _setup_sample_index. Remove loading obs_id attr. Add igra.yml with example usage. * add license to igra config * update to ISO base_datetime, parse to read idx from zarr * fix integration tests (#796) * Fixed bug for empty source (#800) Co-authored-by: clessig * Train continue function with arguments (#803) * add train_continue_from_args to call with arguments --------- Co-authored-by: Julian Kuehnert * remove module common/mock_io (#809) * Update data_reader_obs.py removing asserts (#817) * Sgrasse/develop/issue 616 (#648) * encapsulate extraction of source data * bundle offseting of key attributes * consolidate calculation of datapoints indices into method * encapsulate extraction of coordinate axis in function. * replace attribute `channels` by `target_channels` and `source_channels` * ruffed * ruffed * fixes * address michas comments * reactivate assert * fix typo / renaming * small fix * uncomment source_n_empty and target_n_empty unused variables * fix untit tests (#814) * Plot substeps (#789) * Create subplots with grouping by valid_time. * Create histograms at substeps with grouping by valid_time. * Make use of inference run config to distinguish between situations where all datapoints of a sample should be plotted or where sub-stepping is required. * Add helper function to get values of keys from stream configs. * Corrected loading of model config. * Ruffed code and turning message-level on cartopy path to debug. * Revisions following reviewer comments. * fix histograms * ruffed --------- Co-authored-by: Ilaria Luise Co-authored-by: ilaria luise * Add the possibility of common ranges in plots per variable and stream (#801) * Create subplots with grouping by valid_time. * Create histograms at substeps with grouping by valid_time. * Make use of inference run config to distinguish between situations where all datapoints of a sample should be plotted or where sub-stepping is required. * Add helper function to get values of keys from stream configs. * Corrected loading of model config. * Ruffed code and turning message-level on cartopy path to debug. * Add the possibility of common ranges in plots per variable and stream * Revisions following reviewer comments. * fix histograms * ruffed * update utils --------- Co-authored-by: Michael Co-authored-by: Ilaria Luise Co-authored-by: ilaria luise * Fix to io problems. (#820) * Enable histograms for data with some NaNs (#823) * Fix to filter NaNs before histogram creation. * Removed unused code lines and correct for bug in marker scaling in plotter.py. * Clessig/develop/fix empty io 819 2 (#822) * Fix to io problems. * Fix issues in input * Iluise/fix empty io 819 plotting (#826) * Fix to io problems. * Fix issues in input * fix plotting * ruffed --------- Co-authored-by: Christian Lessig * fix plotting for partially filled first forecast steps (#828) Co-authored-by: luise1 * Fix calculation of scores per fstep (#853) * fix calculation of scores per fstep * simplified syntax --------- Co-authored-by: Julian Kuehnert Co-authored-by: ilaria luise * Fix Issue 835 (#841) Enable freezing the target coord embedding when it is just a simple layer * Improve r3tos2 (#744) * vectorized r3tos2 * revise comment --------- Co-authored-by: Javad Kasravi Co-authored-by: Christian Lessig * Sophiex/dev/latent noise (#594) * - Avoid time encoding is 0 - eps in layer norms to 10^-3 - bf16 * Make the attention dtype and norm eps configurable * Fix gitignore and add config files * Shuffle config files into sensible folders * Implement first attempt at new prediction heads * Fix some bugs * Fix trainer compile + fsdp * Fix trainer and better defaults * Choose AdaLN * Correlate predictions per cell Previously this pr treated as independent * Make things more parameter efficient * Revert "Make things more parameter efficient" It made things way worse This reverts commit 0f31bf11c82ee9f951810ac6782a4b31b83b8757. * Improve the prediction heads at small sizes * Improve the stability of training Two main changes: better beta 1 and beta 2 values in adam w and remove gelu * Adding some more regularisation In particular to prevent training divergences and overfitting * Create classes for latent noise * Add the latent noise after the local engine * Add the KL loss * Formatting * Clean up * Use the same for loop as before * Prepare branch for merge * Remove superfluous configs * Restore default configs * Mistake in the merge fixed * Final beauty changes * Final clean up * Ruff --------- Co-authored-by: Christian Lessig * [Hotfix] Fix crash when using list of forecasting steps (#824) * Fix crash when using list of forecasting steps * Ruff * Grammar fix * Fix grammar * Add checking forecast steps list * Review * Allow 0 as forecast step * Add list length check * Assert non-negative forecast step integer, added assertion messages * Ruff * ruff * Move check to config * what the ruff --------- Co-authored-by: Matthias Karlbauer * add tokenizer base class (#815) * add tokenizer base class * ruffed * ruffed v2 * move calculation of centroids to base_class * move size_time_embedding initialization * remove ABC from tokenizer base_class * renaming * ruffed * ruffed v2 * add return value to compute_source_centroids --------- Co-authored-by: Seb Hickman <56727418+shmh40@users.noreply.github.com> * vectorize s2tor3 (#745) * vectorize s2tor3 * ruff code --------- Co-authored-by: Javad Kasravi * Remove cleaning stream name when logging loss (#763) * Combine masking strategies during training, with appropriate masking_… (#756) * combine masking strategies during training, with appropriate masking_strategy_config * restore config samples per validation * restore cofigs, and add to masking_strategy_config * clarify pass to per batch per stream * updated combination masking to support same masking strategy for all streams in the batch. Strategy resampled for every batch. * rename so we have masking_strategy and masking_strategy_per_batch * ruffed * clean, default to different strategy per batch for combination * ruff * remove unused variable * updated docstrings (#875) Co-authored-by: Patnala,Ankit * Enable correct reading of channels, forecast_step, sample variables in plot config file (Closes #717) (#755) * adjusted run_evaluation and utils code to take into account forecast_step variable from config (cherry picked from commit 26c26a923cabc5777bc75ef911f0fc3c61397e1a) * print statement change * catching error when fstep not present in zarr file * upgrades based on PR feedback * intermediate commit * intermediate commit * new functions _get_channels_fsteps_samples and check_metric * edited plotting * inter commit * fixed bug in get_data * self review * refactor * dummy commit * inter commit * feedback appleid * incorporate review feedback * removed sorting of fsteps_final * remove comments --------- Co-authored-by: ilaria luise * Implement causal masking as MTM strategy (#798) * first rough implementation of causal masking * incorporated combine masking strategies * include per stream sampling rate target in tokenizer_masking based on other PR * clean up implementation of causal masking * remove TODO * remove old causal masking function * add latest error message for channel * change if to elif for causal masking * if to elif in mask_target * cleaned up causal masking code * tokenizer_masking small change * updated config * fix up config * restore era5 config * ruffed * update config and masking.py with causal masking specific masking rate, and some comments * ruffed * roll back causal_masking_rate changes, return to just use masking_rate * faster version of causal masking, vectorise where possible. Need list comprehension for variable length tokens * ruffed * add log scale and refactor plot_summary (#865) * add log scale and refactor plot_summary * add plot_utils * add grid * ruffed * fix marker size * fix global plotting options * add types * ruffed * Fixed stream name factoring (#534) * Updated to camel case. * Fixed formatting. * to reflect upstream develop * got rid of regex and changed formatting of str names * pulled recent changes from upstream develop * Removed refactoring of lf_name. * clean_name with the new changes * Fetched latest changes to the branch * Fixed linting * Fixed stream name without touching the losses dict * fixed type annotation * add srun to integration-test in actions script (#886) * add srun to integration-test in actions script * add --offline flag to integration-test in actions.sh * Merge compare_run_configs.py with markdown table version (#699) * initial comments to outline implementation * Refactor config comparison script to support YAML input and enhance output formatting * remove unused code * Add example configuration for model run IDs and display patterns * shorten * Add 'tabulate' dependency to enhance table formatting capabilities * add instructions to config * restore option for command line run ids and model dirs * ruff * fix arg parsing * add option to show specific or all parameters in config comparison * ruff * Remove 'tabulate' from dependencies Removed 'tabulate' dependency from project requirements. * logging, imports and dependency in compare_run_configs.py * fix logging and dependencies * ruff * set default * fix arg order and checks * improve model directory handling and add exception when there is not latest model * refactor error handling in main function to omit exception details in logs * ruff * ruff * add weathergen dependency * make file executable * add private home argument * revert to default model path argument assuming symlink to shared folder is set * ruff * Implement splitting zarr and regex filenames (#524) * Implement splitting zarr and regex filenames * Optimize dask reading operations * Ruff * Review * Ruff * Remove stream name cleaning when logging loss * Add tolerance to setting std to 1.0 * Implement input column reordering and channel exclusion * Update stream config * Ruff * Add config file * Implement variable state persistance * ruffffff * ruffa * ruf ruf * Add select channels method * updating .gitignore file to include all development directories without / (#900) * [804] vectorize tokenize_window_space (with test) (#893) * vectorize tokenize_window_space * import pad_sequence from torch * change vari names, remove device, add comments, ruff code * changes * changes * changes * simplify * small change * unit tests * unit tests * unit tests --------- Co-authored-by: Javad Kasravi * [812] efficient tcs computation (with tests) (#894) * efficient tcs computation * revise vectorize tcs_optimized, ruff code * add typing * changes * changes * merge --------- Co-authored-by: Javad Kasravi * [811] Improve perf locs to cell coords ctrs (#895) * optimize locs_to_cell_coords_ctrs * revise get_target_coords_local_ffast for new optimized locs_to_cell_coords_ctrs * changes * changes * tests --------- Co-authored-by: Javad Kasravi Co-authored-by: Sophie X <24638638+sophie-xhonneux@users.noreply.github.com> * [810] optimize locs_to_ctr_coords (with tests) (#896) * optimize locs_to_ctr_coords * changes * changes * changes * changes * merge * changes --------- Co-authored-by: Javad Kasravi * Migrated Config to common (#607) * Updated to camel case. * Fixed formatting. * to reflect upstream develop * got rid of regex and changed formatting of str names * pulled recent changes from upstream develop * migrated config to common * fixed lint issues * Corrected all the changes * syntax err fixed * Fixed import * Latest upstream changes pulled * Fixed Linting errors * Lint fix * Pulled latest * fixed other occurences * Fix compare_config after config went to common (#903) * fix after config went to common * Change argument type for --show option from int to str in main function * Update default config path in main function to compare_config_list.yml * Iluise/develop/add io reader (#891) * first implementation of reader class for evaluation package * add io reader * move check_availability to reader * update to develop * fix retrive results * address comments * Fix minor bug in modules (#909) --------- Co-authored-by: Savvas Melidonis <79579567+SavvasMel@users.noreply.github.com> * [908] Harmonize the linter check between the CI and our CLI (#910) * changes * changes * [554] Updates the PR template (#912) * changes * changes * changes * comments * [906] Bug fix in tokenizer (#907) * changes * changes * changes * changes * changes * changes * changes * changes * cleanups * changes * comments --------- Co-authored-by: Seb Hickman <56727418+shmh40@users.noreply.github.com> * Add levels (#916) * changes to include discrete levels in colormap if needed * Change slightly the position of the feature * Lint * changes (#920) * [926][evaluation] weatherGen reader for evaluation package (#927) * weatherGen reader for evaluation package * ruffed * [939] Fix CI (#940) * changes * changes * Implement forecast activity metrics (#892) * Add forecast activity calculations and update fstep handling in utils * Add forecast rate of change metrics (froct, troct) to score calculations * update description * add next data to verified data * move cases for kwargs to score * refactor froct adn troct to use calc_change_rate * remove metric specific kwargs in calc_scores_per_stream * calc_change_rate now gives NaN array when next step is None * fix nans --------- Co-authored-by: Julian Kuehnert Co-authored-by: Ilaria Luise * added IFS-FESOM streams and updated all stac files using jsonnet (#934) * added IFS-FESOM streams and updated all stac files using jsonnet * changes according to comments by Ilaria * resolved by using providers from common.jsonnet file * changed refererrence to ecmwf and develop branch --------- Co-authored-by: Patnala,Ankit * [939] Catches failures of labeling CI job (#950) * changes * more permissions * more permissions * [880] Informative type checks in the CI (#915) * attempt * fixing pyrefly * changes * changes * changes * Sets dropout rate to 0 in eval mode for flash_attn (#923) * added check for train/eval for setting dropout_p value * ruff * rm ceil, conj, floor, and matmul from annotations.json (#951) Co-authored-by: Javad Kasravi * Add the calculation of 10ff for ERA5 and CERRA (#914) * Add the calculation of 10ff * Caring for cases were 10ff cannot be calculated * Create a new script for derived_channels, minor changes to reader_io * Remove stream specific settings add regex * Add more datetime formats (#962) * fix error when global_plotting_opt does not exist (#964) * fix error when global_plotting_opt does not exist * fix linter * changes (#1009) * Revert "changes (#1009)" (#1012) This reverts commit 2af1c09a11e6dd027d247b670737bbac0cd1a766. * sorcha/dev/500 (#1001) * lint reformatting + fixing get_channels * debug messages * [datasets] move to new cerra and new era5 (#995) * move to new cerra and new era5 * fix cerra * Removed the method freeze_weights_forecast and all forecast_freeze_model flag occurences (#924) * Add Coordinate System Conversion to DataReaderFesom (#1024) * Add coordinates conversion * Ruff * Add check for longitude * Sophiex/dev/fsdp2 fix (#959) * Save current state * Save current state * Barebone FSDP2 prototype TODO save checkpoints * First version of saving model * Fix save_model * Log everything and log to files * Remove redundant path creation * Allow for both slurm and torchrun + fewer log files * Cleaning up init_ddp * Ruff * Attempt to avoid duplicate logging * FSDP2 with mixed precision policy * Ruff * Clean up and logging * Try to get loggers to behave as we want * Makes ruff unhappy but works * Fixed ruff issue * Fixed problems with multi-node training. * Fix for interactive/non-DDP runs * No idea why, but this seems to work so far Committing simply so it is saved, obviously needs cleanup * Still works! So which is it memory or the grad scaler? * Also still works, I now strongly suspect the amp.gradscaler * This still works, I have no clue anymore why but whatever it works now.... * Enable loading model from absolute paths * Enable loading for 1 GPU only * Fix 1 GPU train continue * Appease ruff * Fix saving the model more regularly and perf logging * Fixed problem when training with 2 nodes. * Fix data loader seed * Appease ruff * Shouldn't overwrite with_fsdp like this * Potential fix for FSDP2 issue with different ranks using different model parts * Fix loss scaling and logging of dummy data loss * Clean up * Appease ruff * Fixed problem when source channels are empty (i.e. with diagnostic trainings). * Update io.py * FSDP2 suggestions from Tim (#1015) * comments * sophie's comments * removed logger suggestions * Clean up deadcode etc * Removing unused imports that the linter didn't like --------- Co-authored-by: Christian Lessig Co-authored-by: Tim Hunter * Ensure sample coordinate is repeated along ipoint for single sample cases in WeatherGenReader (#1026) Co-authored-by: Savvas Melidonis <79579567+SavvasMel@users.noreply.github.com> * Fix change rate calculation by aligning s1 with s0 (#1007) * Fix change rate calculation by aligning s1 with s0 * Refactor score calculation to remove unnecessary alignment and add sorting function for coordinates * use .values option * Optimize pos enc harmonic (#1033) * add device & dtype * ruffed --------- Co-authored-by: Javad Kasravi * [evaluation] fix score computation with empty cerra samples (#1039) * fix samples * riffed * answer comments * ruffed * Add score cards plotting feature (#1041) * add the feature of score_cards * Refactor, fix error when sample are different for each run, linting * fix bug, fix sizes when skill difference is huge * linting * changes on comments * linting * Clessig/develop/fix inference 1049 (#1053) * add device & dtype * ruffed * Fix inference --------- Co-authored-by: Javad Kasravi * Channel weighting in loss computation (#753) * introducing channel weights * tested channel weighting * adding target_channel_weights to data_reader_base * uncomment target channel parsing in anemoi dataset * remove channel weights from default stream config * Adds default config for run_evaluation (#1028) * adding default config + changing yml path locations * linter checks * linter checks * revert .yml file * updates --------- Co-authored-by: iluise <72020169+iluise@users.noreply.github.com> * Fix CERRA eval breaking with coord sorting in `froct` (#1057) * Move coord sorting inside score function to be metric-specific * Linting: Removed unused import * Return nan data array to prevent crash * fix nans shape in calc_change_rate --------- Co-authored-by: ilaria luise * [1059][eval] Fix eval crash of inference models from other HPCs (#1060) * Read model path from private repo instead of inference config * Linting: Organized imports * Interface improvements * [1022] Getting WG to work on santis (#1023) * working pytorch * changes * Fix for code to work on Alps-Santis * changes * cleanups * changes * reverting change * having issues with the latest branch on santis * changes * changes * changes * override with cpu * working for cpu * flash-attn moved to gpu * remove contstraint * simplifying * trying * working on atos * changes * macos * chanegs * cleanups * actions * actions * actions * actions * changes --------- Co-authored-by: Christian Lessig * fix crash in case of missing streams (#1058) * fix issue with empty region * fix non existing stream * fix channel order in evaluation (#1066) * New templates for issues (#1017) * changes * changes * Revert "New templates for issues (#1017)" (#1071) This reverts commit 3a6e7b826b7b29a6df4af27b6771567474302fb3. * [1002] Template for issues try 2 (#1072) * changes * changes * issue with template * updates * changes * issue * issue * [1073][model] Adds latent noise imputation (#1074) * Add latent noise imputation to model.py with backwards compatibility * Linted * Resetting default_config, except for new flag * Resetting default_config 2nd try * add modules to annotations.json (#1035) Co-authored-by: Javad Kasravi * Jk/develop/gamma decay (#998) * Update to develop, prepare for new experiment series * gamma decay over fsteps first commit * add gamma decay factor to config * working gamma decay weighting * rm breakpoint * rm eval and plot configs * reverting default config --------- Co-authored-by: Matthias Karlbauer Co-authored-by: Julian Kuehnert * Add materialisation of new modules before loading checkpoint (#1030) * Add materialisation of new modules before loading checkpoint * Initialize new modules in load_model * Fix adding new embedding networks * Clessig/develop/fix kcrps 1077 (#1078) * Improved robustness for loss fcts where ch loss does not make sense * Re-enabled kernel CRPS and added weighting options * Fixes * Improved tensor reordering * Sgrasse/develop/issue 898 checkpoint freq conf (#905) * add new/changed parameters in default_config * implement backward compatibility * remove `train_log.log_interval` from default config * use new configuration arguments in Trainer * fix: wrong variable name * ruffed * Rework method structure * fix bug * rename `log_intevals` to `train_log_freq` * fix integration tests * fix forgot renaming * fix rebasing artifact * Sorcha/dev/571 (#957) * debug for netcdf pipeline * zarr_netcdf first draft * fixing pipeline * linter checks * removing debug prints from io.py * refactoring, found issue with forecast_ref_time * deleting unnecessary lines * proper docstrings * moving filepaths * linting * multithread processing added * debug info * debugging * refactoring * linting * fstep as argument * change assert --------- Co-authored-by: owens1 Co-authored-by: iluise <72020169+iluise@users.noreply.github.com> Co-authored-by: ilaria luise * pyproject.toml checks (#1042) * adding chceks for toml files into actions * lint fixes * lint checks * link fixes * changes * disabling ruff check * change to path info instead * adding E501 and E721 to be ignored for now --------- Co-authored-by: Tim Hunter * Implement EMA of the model (#1005) * Save current state * Save current state * Barebone FSDP2 prototype TODO save checkpoints * First version of saving model * Fix save_model * Log everything and log to files * Remove redundant path creation * Allow for both slurm and torchrun + fewer log files * Cleaning up init_ddp * Ruff * Attempt to avoid duplicate logging * FSDP2 with mixed precision policy * Ruff * Clean up and logging * Try to get loggers to behave as we want * Makes ruff unhappy but works * Fixed ruff issue * Fixed problems with multi-node training. * Fix for interactive/non-DDP runs * No idea why, but this seems to work so far Committing simply so it is saved, obviously needs cleanup * Still works! So which is it memory or the grad scaler? * Also still works, I now strongly suspect the amp.gradscaler * This still works, I have no clue anymore why but whatever it works now.... * Enable loading model from absolute paths * Enable loading for 1 GPU only * Fix 1 GPU train continue * Appease ruff * Fix saving the model more regularly and perf logging * Fixed problem when training with 2 nodes. * Fix data loader seed * Appease ruff * Shouldn't overwrite with_fsdp like this * Potential fix for FSDP2 issue with different ranks using different model parts * Fix loss scaling and logging of dummy data loss * Clean up * Appease ruff * Start implementing EMA, works for 1 GPU * Make EMA model multi-gpu compatible * Fix linting issues * Address comments on PR * Enforce the ema model to strictly be the same as model Note this likely needs to be changed for student-teacher training * Rename variable --------- Co-authored-by: Christian Lessig * Sophiex/dev/ema fix inference (#1094) * Save current state * Save current state * Barebone FSDP2 prototype TODO save checkpoints * First version of saving model * Fix save_model * Log everything and log to files * Remove redundant path creation * Allow for both slurm and torchrun + fewer log files * Cleaning up init_ddp * Ruff * Attempt to avoid duplicate logging * FSDP2 with mixed precision policy * Ruff * Clean up and logging * Try to get loggers to behave as we want * Makes ruff unhappy but works * Fixed ruff issue * Fixed problems with multi-node training. * Fix for interactive/non-DDP runs * No idea why, but this seems to work so far Committing simply so it is saved, obviously needs cleanup * Still works! So which is it memory or the grad scaler? * Also still works, I now strongly suspect the amp.gradscaler * This still works, I have no clue anymore why but whatever it works now.... * Enable loading model from absolute paths * Enable loading for 1 GPU only * Fix 1 GPU train continue * Appease ruff * Fix saving the model more regularly and perf logging * Fixed problem when training with 2 nodes. * Fix data loader seed * Appease ruff * Shouldn't overwrite with_fsdp like this * Potential fix for FSDP2 issue with different ranks using different model parts * Fix loss scaling and logging of dummy data loss * Clean up * Appease ruff * Start implementing EMA, works for 1 GPU * Make EMA model multi-gpu compatible * Fix linting issues * Address comments on PR * Enforce the ema model to strictly be the same as model Note this likely needs to be changed for student-teacher training * Rename variable * Fix inference * One more thing * Fixed linting * Fix problem with checkpoint saving when validate_with_ema is false or not specified --------- Co-authored-by: Christian Lessig * Add bar plots (#1051) * add bar plots feature Rebasing * change name of bar plotter to be explicit * small fixed related to the correct color allocation and mean calculation Rebase in plotter * add bar_plot_metric_region function * linting * Rebase descriptions * fix bug in config * add average function, have bar plots as subfigures, fix channel bug * Linting * add descriptions * Linting * Fix semicolon issue (#1099) * remove semicolor from valid_time * Linting * add the change also for histograms * Linting * update fix with %Y-%m-%dT%H%M format, Linting * remove unnecessary str * Implement loading climatology in evaluate for ACC (#642) * load clim in utils.py * Update path * Add mock implementation for climatology data retrieval in get_clim function by copying target. * Refactor score calculation to use mock climatology data from get_clim function if acc is in metrics * add some logging to develop and test alignment function * Refactor get_clim * replace get_clim with align_clim_data * Add climatology time matching utility and refactor align_clim_data function * add to dos and switch to initially filling nans * Ugly workaround for ACC calculation to support grouping * Refactor ACC calculation and get path to clim from config * replace assume matching argument by try and except * Remove unnecessary grouping and averaging logic from ACC calculation * implement coordinate matching * improve metrics kwargs and handling of case when climatology data is missing * move align clim data to clim utils * refactor: align_clim_data now called when climatology path is provided independent of acc in metrics * fix logging * ruff * correct coord conversion formula * support data_path_aux in wegen-private with climatology_filename in stream config * Adapt to new reader class * ruff * fix imports * Refactor VerifiedData to include optional climatology and update score calculation logic * ruff * move lengthy climatology retrieval to clim_utils * ruff * cache clim indices * ruff * fix align_clim_data and implement faster build_climatology_indexer * move get clim path to reader * ruff * add comments for possible future todos * fix zero encountered in divide warning * unify _calc_acc_group --------- Co-authored-by: iluise * handle ensemble members in FastEvaluation (#1105) * rebase * add ensemble * fix deterministic * fix plotting * lint * fix eval_config * Fix adalayernorm conditioning in forecast engine (#1121) * pass fstep instead of fe_block idx as aux_info * update forecast docstring * Fix spoofing and refactor handling of multiple source files (#1118) * Cleaning up spoofing and related code on data preprocessing for model * Fixed typo * Updated comments * Removed merge cells and implemented necessary adjustments * Fixed forecasting * Fixed missing handling of NaNs in coordinates and channel data * Minor clean up * Fix to removing/renaming variables * Changed funtion name to improve readability * Fixed bug with incorrect handling of multiple input datasources. * Addressed reviewer comments * Introduce feature to have forecast steps and samples to be given as str (#1130) * Add the feature of str reading for fsteps and samples * Linting * Introduce assertion and linting * remove breakpoint :( * linting again * [1131] fixes circular dependencies (#1134) * fixes dependencies * cleanup * make the type checker not fail * cleanup * cleanup of type issues * Give option to plot only prediction maps (#1139) * add plot_preds_only feature * minor changes after comments * Tell FSDP2 about embedding engine forward functions (#1133) * Tell FSDP2 about embedding engine forward functions Note DO NOT add print functions in forward functions of the model, it will break with FSDP2 * Add comment * recover 'all' option (#1146) * Fixed problem in inferecne (#1145) * implement vrmse (#1147) * [1144] Extra fixes (#1148) * Fixed problem in inferecne * more fixes * fixes * lint * lint --------- Co-authored-by: Christian Lessig * Jk/log grad norms/log grad norms (#1068) * Log gradient norms * Prototype for recording grad norms * Address review changes + hide behind feature flag * Final fixes including backward compatibility * Ruff * More ruff stuff * forecast config with small decoder * fixed uv.lock * test gradient logging on mutli gpus * update uv.lock to latest develop version * revert to default confit * add comment on FSDP2 specifics * move plot grad script to private repo * rm seaborn from pyproject * updating terminal and metrics loggin, add get_tensor_item fct * check for DTensor instead of world size * revert forecast fct, fix in separate PR * rename grad_norm log names to exclude from MLFlow * add log_grad_norms to default config --------- Co-authored-by: sophiex <24638638+sophie-xhonneux@users.noreply.github.com> * Add forecast and observation activity (#1126) * Add calculation methods for forecast and observation activity metrics in Scores class * Add new calculation methods for forecast activity metrics in Scores class * ruff * fix func name * Rename observation activity calculation method to target activity in Scores class * typo * refactor to common calc_act function for activity * fix cases * have calc_tact and calc_fact that use _calc_act for maintainability * fix small thing in style --------- Co-authored-by: iluise * hotfix: use correct methot `create` instead of `construct` (#1090) * remove the issues (#1166) * Fixed problem with diagnostic datasets (#1165) * Fix NaN issue in Zarr to NetCDF conversion for Gaussian grids (#1140) * Fix NaN issue in Zarr to NetCDF conversion for Gaussian grids - Add automatic grid type detection (regular vs Gaussian) - Preserve Gaussian grids using CF auxiliary coordinates (ncells dimension) The converter now properly handles reduced Gaussian grids by keeping them as unstructured grids with CF auxiliary lat/lon coordinates, following CF-1.12 conventions. * Fix NaN issue in Zarr to NetCDF conversion for Gaussian grids - Add automatic grid type detection (regular vs Gaussian) - Preserve Gaussian grids using CF auxiliary coordinates (ncells dimension) The converter now properly handles reduced Gaussian grids by keeping them as unstructured grids with CF auxiliary lat/lon coordinates, following CF-1.12 conventions. * fixing fstep issues * adding aux coords * lint + removing comments * rollback cf_parser * issue w/ forecast_step --------- Co-authored-by: Dr. Jehangir Awan Co-authored-by: Sorcha Co-authored-by: Sorcha Owens <73587207+enssow@users.noreply.github.com> Co-authored-by: iluise <72020169+iluise@users.noreply.github.com> * [1037] Cleanup engines (torch.nn.modules and fowards functions) (#1080) * Implement forward method for StreamEmbedTransformer * Implemented forward pass for EmbeddingEngine and refactor model accordingly * removed line that shouldn't be there * Minor fixes * First step check create function and put everything in init * Implemented forward pass for LocalAssimilationEngine and refactor model accordingly * Cleanup and minor fixes * Implemented forward pass for Local2GlobalAssimiationEngine and refactor model accordingly * Cleanup model * Implemented forward pass for GlobalAssimilationEngine and refactor model accordingly * Implemented forward pass for ForecastingEngine and refactor model accordingly * Fixed sharding when module is called * Update parameter names for backward compatibility in Model * Changed to old code * Add function to provide backwards compatability when loading old checkpoints * Minor fix * Use backward compatibility when loading old checkpoints * Comment in renaming state dict * Formatting lint --------- Co-authored-by: perugachidiaz1 Co-authored-by: perugachidiaz1 Co-authored-by: perugachidiaz1 Co-authored-by: perugachidiaz1 Co-authored-by: perugachidiaz1 Co-authored-by: perugachidiaz1 Co-authored-by: perugachidiaz1 Co-authored-by: perugachidiaz1 * Fix out of bounds in data_reader_obs (#1180) * fix out of bounds access * Adding comment * Removed debgu * Fixed to use forward function for forecast engine (#1188) * Fixed to use forward function for forecast engine, and also fstep for conditioning * Fixed missing return statement * Enable FesomDataReader to have different source and target datasets (#1046) * Implement separate target and source files, adjust masking * Fix casual masking * Fix longitude conversion flag * Fix casual masking strategy --------- Co-authored-by: Seb Hickman <56727418+shmh40@users.noreply.github.com> * Add support for constant learning rate (#1186) * Added support for constant learning rate and minor clean-up in code * Fixed issues with overlap between lr phases * Changing default lr to constant * [issue 1123] restore probabilistic scores (#1128) * rebase * add ensemble * fix deterministic * fix plotting * lint * fix eval_config * probabilistic scores working now * lint * Fix spoofing and refactor handling of multiple source files (#1118) * Cleaning up spoofing and related code on data preprocessing for model * Fixed typo * Updated comments * Removed merge cells and implemented necessary adjustments * Fixed forecasting * Fixed missing handling of NaNs in coordinates and channel data * Minor clean up * Fix to removing/renaming variables * Changed funtion name to improve readability * Fixed bug with incorrect handling of multiple input datasources. * Addressed reviewer comments * resolve conflict * [1131] fixes circular dependencies (#1134) * fixes dependencies * cleanup * make the type checker not fail * cleanup * cleanup of type issues * Give option to plot only prediction maps (#1139) * add plot_preds_only feature * minor changes after comments * Tell FSDP2 about embedding engine forward functions (#1133) * Tell FSDP2 about embedding engine forward functions Note DO NOT add print functions in forward functions of the model, it will break with FSDP2 * Add comment * recover 'all' option (#1146) * Fixed problem in inferecne (#1145) * implement vrmse (#1147) * [1144] Extra fixes (#1148) * Fixed problem in inferecne * more fixes * fixes * lint * lint --------- Co-authored-by: Christian Lessig * Jk/log grad norms/log grad norms (#1068) * Log gradient norms * Prototype for recording grad norms * Address review changes + hide behind feature flag * Final fixes including backward compatibility * Ruff * More ruff stuff * forecast config with small decoder * fixed uv.lock * test gradient logging on mutli gpus * update uv.lock to latest develop version * revert to default confit * add comment on FSDP2 specifics * move plot grad script to private repo * rm seaborn from pyproject * updating terminal and metrics loggin, add get_tensor_item fct * check for DTensor instead of world size * revert forecast fct, fix in separate PR * rename grad_norm log names to exclude from MLFlow * add log_grad_norms to default config --------- Co-authored-by: sophiex <24638638+sophie-xhonneux@users.noreply.github.com> * Add forecast and observation activity (#1126) * Add calculation methods for forecast and observation activity metrics in Scores class * Add new calculation methods for forecast activity metrics in Scores class * ruff * fix func name * Rename observation activity calculation method to target activity in Scores class * typo * refactor to common calc_act function for activity * fix cases * have calc_tact and calc_fact that use _calc_act for maintainability * fix small thing in style --------- Co-authored-by: iluise * hotfix: use correct methot `create` instead of `construct` (#1090) * restore develop * fix deterministic * fix plotting * lint * fix eval_config * probabilistic scores working now * lint * update utils * packages/evaluate/src/weathergen/evaluate/score.py * lint * removing duplication --------- Co-authored-by: Christian Lessig Co-authored-by: Timothy Hunter Co-authored-by: Savvas Melidonis <79579567+SavvasMel@users.noreply.github.com> Co-authored-by: Sophie X <24638638+sophie-xhonneux@users.noreply.github.com> Co-authored-by: Julius Polz <56866670+jpolz@users.noreply.github.com> Co-authored-by: Julian Kuehnert Co-authored-by: Simon Grasse <161459968+grassesi@users.noreply.github.com> * Adding config to issue templates The issue template seems to have disappeared, attempting to solve that. * Add the duration of animation as global plotting option (#1189) * Add the animation duration as global plotting option * Linting * Use FPS instead of milliseconds * Linting * Attempt to fix the bug report template * Attempt to fix initiative template * Update task template * [1081][Evaluation] Use parent ruff rules (#1177) * use ruff settings from parent * fix code checks * check fixes 2nd round * reformat to line length * [1092] Adds pushing metrics to the evaluation pipeline (#1127) * changes * changes * changes * changes * changes * scores successfully pushed to MLFlow, still need to refactor * try to batch upload all metrics form same runid * batch logging all scores of each run_id * get parent_run by from_run_id * changes * cleanups * bug fixes * typing issue * Cleanup * pdb * integration test --------- Co-authored-by: Jubeku * Fix the issue - "Empty source still have embedding network" (#1114) * Replace cf.rank==0 with utils.distributed.is_root * fix empty source inputs still have embedding layer * fix lint * fix source empty or source exclude all * fix source empty or source exclude all * fix forecast mode empty source --------- Co-authored-by: wang85 Co-authored-by: wang85 Co-authored-by: wang85 Co-authored-by: wang85 * [930][evaluation] implement CSVReader (#932) * first version of quaver reader * working version * add CSVReader * rebase to develop * add polimorphism * fix names * lint * Iluise/hot fixes (#1209) * fix froct * fix 1150 * Fix plot_train verbosity (#1225) * [1206] Experimentation for extra data readers (#1207) * initial implementation * changes * toml * add module to annotations.json (#1142) Co-authored-by: Javad Kasravi * Correct bug with score cards and bar plots for different metrics (#1192) * Rebase to develop * Linting * Address comments and linting * [eval][1122] Plot scores on a map (#1176) * first version of score maps * add maps to compute_scores * fix single sample situation * fix single sample * lint * restore score.py * fix bug in metric stream * default flag to false * Minor correction, a line was deleted by mistake? (#1193) * fix * working setup for regridded data * fix missing valid time case * lint and fix color in score cards * fix path for score maps * Allow plotting score maps every time --------- Co-authored-by: Savvas Melidonis <79579567+SavvasMel@users.noreply.github.com> * Fix DDP without FSDP (#1227) * Fix DDP without FSDP * Fixed taht freezing would not have worked with only DDP * refactor export scripts - Part 1 (#1223) * move stuff around * move stuff around * rename files * [1034][reader_extra] E-Obs datareader (#1228) * [1034] rebase * [1034] add dataloader * [1034] Zarr3-->Zarr2 * [1034] lint * [1034] lint * [1034] Moved to reader_extra * [1034] registry E-Obs * [929][evalution/io] Make lead time and source windew available to evaluation via OutputDataset (#1087) * store lead time in OutptDataset and make it available to evaluation * ruffed * addressed comments * remove lead_time_hrs from OutputDataset * use type alias for union of `zarr.Array` and `NDArray` * dataclass to store time range information * add source interval to `OutputDataset` * store lead-time and source interval in xarray * correct de(serialization) for `OutputDataset` with source-interval * correctly instantiate `OutputDataset` from `OutputBatchData` * remove attribute `t_window_len_hours` from `OutputBatchData` * calculate for output source windows from sample indices * add source_intervals as attribute to OutputBatchData * ruffed * fix: pass source intervals to OutputBatchData * fix: use correct string in array.astype * fix: (de)serialize np.datetime64 * fix deserialization of OutputDataset from json/zarr * fix: errors in xarray conversion * fix types * Improve documentation * remove lead_time from OututDataset * implement with_target function for ItemKey class * handle potentially missing target/prediction data in OutputItem * separate extraction of targets/predictions into method * include fstep 0 when forecast offset is 1 * Infer forecast offset in ZarrIO * pass forecast offset to OutputItem * use forecast_offset in OutputItem * exclude fsteps without targets from iteration * ruffed * make forecast offset property instead of attribute * raise meaningfull error if ZarrIO is empty * fix: pass key instead of zarr.group * ruffed * Rename epoch to mini_epoch (#1190) * training progress unit realignment from epoch to mini_epoch * small naming fix of mini_epoch * ruffed * linted * Fix out of bounds in data_reader_obs (#1180) * fix out of bounds access * Adding comment * Removed debgu * Fixed to use forward function for forecast engine (#1188) * Fixed to use forward function for forecast engine, and also fstep for conditioning * Fixed missing return statement * Enable FesomDataReader to have different source and target datasets (#1046) * Implement separate target and source files, adjust masking * Fix casual masking * Fix longitude conversion flag * Fix casual masking strategy --------- Co-authored-by: Seb Hickman <56727418+shmh40@users.noreply.github.com> * Add support for constant learning rate (#1186) * Added support for constant learning rate and minor clean-up in code * Fixed issues with overlap between lr phases * Changing default lr to constant * [issue 1123] restore probabilistic scores (#1128) * rebase * add ensemble * fix deterministic * fix plotting * lint * fix eval_config * probabilistic scores working now * lint * Fix spoofing and refactor handling of multiple source files (#1118) * Cleaning up spoofing and related code on data preprocessing for model * Fixed typo * Updated comments * Removed merge cells and implemented necessary adjustments * Fixed forecasting * Fixed missing handling of NaNs in coordinates and channel data * Minor clean up * Fix to removing/renaming variables * Changed funtion name to improve readability * Fixed bug with incorrect handling of multiple input datasources. * Addressed reviewer comments * resolve conflict * [1131] fixes circular dependencies (#1134) * fixes dependencies * cleanup * make the type checker not fail * cleanup * cleanup of type issues * Give option to plot only prediction maps (#1139) * add plot_preds_only feature * minor changes after comments * Tell FSDP2 about embedding engine forward functions (#1133) * Tell FSDP2 about embedding engine forward functions Note DO NOT add print functions in forward functions of the model, it will break with FSDP2 * Add comment * recover 'all' option (#1146) * Fixed problem in inferecne (#1145) * implement vrmse (#1147) * [1144] Extra fixes (#1148) * Fixed problem in inferecne * more fixes * fixes * lint * lint --------- Co-authored-by: Christian Lessig * Jk/log grad norms/log grad norms (#1068) * Log gradient norms * Prototype for recording grad norms * Address review changes + hide behind feature flag * Final fixes including backward compatibility * Ruff * More ruff stuff * forecast config with small decoder * fixed uv.lock * test gradient logging on mutli gpus * update uv.lock to latest develop version * revert to default confit * add comment on FSDP2 specifics * move plot grad script to private repo * rm seaborn from pyproject * updating terminal and metrics loggin, add get_tensor_item fct * check for DTensor instead of world size * revert forecast fct, fix in separate PR * rename grad_norm log names to exclude from MLFlow * add log_grad_norms to default config --------- Co-authored-by: sophiex <24638638+sophie-xhonneux@users.noreply.github.com> * Add forecast and observation activity (#1126) * Add calculation methods for forecast and observation activity metrics in Scores class * Add new calculation methods for forecast activity metrics in Scores class * ruff * fix func name * Rename observation activity calculation method to target activity in Scores class * typo * refactor to common calc_act function for activity * fix cases * have calc_tact and calc_fact that use _calc_act for maintainability * fix small thing in style --------- Co-authored-by: iluise * hotfix: use correct methot `create` instead of `construct` (#1090) * restore develop * fix deterministic * fix plotting * lint * fix eval_config * probabilistic scores working now * lint * update utils * packages/evaluate/src/weathergen/evaluate/score.py * lint * removing duplication --------- Co-authored-by: Christian Lessig Co-authored-by: Timothy Hunter Co-authored-by: Savvas Melidonis <79579567+SavvasMel@users.noreply.github.com> Co-authored-by: Sophie X <24638638+sophie-xhonneux@users.noreply.github.com> Co-authored-by: Julius Polz <56866670+jpolz@users.noreply.github.com> Co-authored-by: Julian Kuehnert Co-authored-by: Simon Grasse <161459968+grassesi@users.noreply.github.com> * Adding config to issue templates The issue template seems to have disappeared, attempting to solve that. * Add the duration of animation as global plotting option (#1189) * Add the animation duration as global plotting option * Linting * Use FPS instead of milliseconds * Linting * Attempt to fix the bug report template * Attempt to fix initiative template * Update task template * [1081][Evaluation] Use parent ruff rules (#1177) * use ruff settings from parent * fix code checks * check fixes 2nd round * reformat to line length * [1092] Adds pushing metrics to the evaluation pipeline (#1127) * changes * changes * changes * changes * changes * scores successfully pushed to MLFlow, still need to refactor * try to batch upload all metrics form same runid * batch logging all scores of each run_id * get parent_run by from_run_id * changes * cleanups * bug fixes * typing issue * Cleanup * pdb * integration test --------- Co-authored-by: Jubeku * Fix the issue - "Empty source still have embedding network" (#1114) * Replace cf.rank==0 with utils.distributed.is_root * fix empty source inputs still have embedding layer * fix lint * fix source empty or source exclude all * fix source empty or source exclude all * fix forecast mode empty source --------- Co-authored-by: wang85 Co-authored-by: wang85 Co-authored-by: wang85 Co-authored-by: wang85 * [930][evaluation] implement CSVReader (#932) * first version of quaver reader * working version * add CSVReader * rebase to develop * add polimorphism * fix names * lint * Iluise/hot fixes (#1209) * fix froct * fix 1150 * Fix plot_train verbosity (#1225) * [1206] Experimentation for extra data readers (#1207) * initial implementation * changes * toml * add module to annotations.json (#1142) Co-authored-by: Javad Kasravi * Correct bug with score cards and bar plots for different metrics (#1192) * Rebase to develop * Linting * Address comments and linting * [eval][1122] Plot scores on a map (#1176) * first version of score maps * add maps to compute_scores * fix single sample situation * fix single sample * lint * restore score.py * fix bug in metric stream * default flag to false * Minor correction, a line was deleted by mistake? (#1193) * fix * working setup for regridded data * fix missing valid time case * lint and fix color in score cards * fix path for score maps * Allow plotting score maps every time --------- Co-authored-by: Savvas Melidonis <79579567+SavvasMel@users.noreply.github.com> * Fix DDP without FSDP (#1227) * Fix DDP without FSDP * Fixed taht freezing would not have worked with only DDP * refactor export scripts - Part 1 (#1223) * move stuff around * move stuff around * rename files * [1034][reader_extra] E-Obs datareader (#1228) * [1034] rebase * [1034] add dataloader * [1034] Zarr3-->Zarr2 * [1034] lint * [1034] lint * [1034] Moved to reader_extra * [1034] registry E-Obs * training progress unit realignment from epoch to mini_epoch * ruffed * check if path is dir in io_reader * fix overwrite of fname_zarr in io_reader * add backward compatibility to config read * Separate write and read functions for model*chkpt*.json files (MatKBauer) --------- Co-authored-by: Christian Lessig Co-authored-by: Kacper Nowak Co-authored-by: Seb Hickman <56727418+shmh40@users.noreply.github.com> Co-authored-by: iluise <72020169+iluise@users.noreply.github.com> Co-authored-by: Timothy Hunter Co-authored-by: Savvas Melidonis <79579567+SavvasMel@users.noreply.github.com> Co-authored-by: Sophie X <24638638+sophie-xhonneux@users.noreply.github.com> Co-authored-by: Julius Polz <56866670+jpolz@users.noreply.github.com> Co-authored-by: Julian Kuehnert Co-authored-by: Simon Grasse <161459968+grassesi@users.noreply.github.com> Co-authored-by: Michael Tarnawa <18899420+mtar@users.noreply.github.com> Co-authored-by: Jubeku Co-authored-by: Jifeng Wang Co-authored-by: wang85 Co-authored-by: wang85 Co-authored-by: wang85 Co-authored-by: wang85 Co-authored-by: Javad kasravi Co-authored-by: Javad Kasravi Co-authored-by: Simone Norberti <63310821+simone99n@users.noreply.github.com> * Updated settings for integration test (#1277) * Fix DDP without FSDP * Fixed taht freezing would not have worked with only DDP * Fixed issues with train_continue * Config * Fixed inference and cleaned up model loading * Remove load function that was only used in inference. The one in trainer is used now * Reverting to default default_config.yml * Removing old function * Fixed problem when running interactive. * Fixed comments * Updated settings to test that loss is decreasing and that it's robustly met in the integration test. * Reverting files to current develop. * Updated settings * Updating loader_num_workers so that integration test is faster * Remove default for analysis_streams_output argument (#850) * Remove default for analysis_streams_output argument * Rename analysis_streams_output to streams_output. implement fallback to analysis_streams_output for config parsing * ruff * fix for nonexistent analysis_streams_output --------- Co-authored-by: Christian Lessig * [eval] refactor grib/netCDF converters (#1252) * move stuff around * move stuff around * rename files * cf parser class * add quaver_parser * working quaver setup * check netCDF parser * lint * remove path * Fix inference and cleanup model loading (#1275) * Fix DDP without FSDP * Fixed taht freezing would not have worked with only DDP * Fixed issues with train_continue * Config * Fixed inference and cleaned up model loading * Remove load function that was only used in inference. The one in trainer is used now * Reverting to default default_config.yml * Removing old function * Fixed problem when running interactive. * Fixed comments * Fixing incomplete merge * Fixed merging * [1288] remove epoch reference (#1289) * [1256] Update main branch (#1267) * Fix in pyproject dependencies (#5) * Adding LICENSE file. * Adding license header to all files. * Adding .gitignore * Fixed dependencies. * comments --------- Co-authored-by: Christian Lessig --------- Co-authored-by: Kacper Nowak Co-authored-by: Christian Lessig Co-authored-by: Michael Langguth <62009669+mlangguth89@users.noreply.github.com> Co-authored-by: iluise <72020169+iluise@users.noreply.github.com> Co-authored-by: Sindhu-Vasireddy <98752594+Sindhu-Vasireddy@users.noreply.github.com> Co-authored-by: Seb Hickman <56727418+shmh40@users.noreply.github.com> Co-authored-by: Julian Kuehnert Co-authored-by: ankitpatnala Co-authored-by: Patnala,Ankit Co-authored-by: Savvas Melidonis <79579567+SavvasMel@users.noreply.github.com> Co-authored-by: Christian Lessig Co-authored-by: Till Hauer Co-authored-by: Simon Grasse Co-authored-by: Michael Co-authored-by: Julian Kuehnert Co-authored-by: kctezcan Co-authored-by: Kerem Can Tezcan Co-authored-by: Sorcha Owens <73587207+enssow@users.noreply.github.com> Co-authored-by: owens1 Co-authored-by: Sophie X <24638638+sophie-xhonneux@users.noreply.github.com> Co-authored-by: iluise Co-authored-by: Matthias Karlbauer Co-authored-by: Matthias Karlbauer Co-authored-by: Julius Polz <56866670+jpolz@users.noreply.github.com> Co-authored-by: Jifeng Wang Co-authored-by: wang85 Co-authored-by: th3002s Co-authored-by: Moritz Hauschulz <60788263+moritzhauschulz@users.noreply.github.com> Co-authored-by: Simon Grasse <161459968+grassesi@users.noreply.github.com> Co-authored-by: Sophie Xhonneux Co-authored-by: xhonneux2 Co-authored-by: karlbauer1 Co-authored-by: Christian Lessig Co-authored-by: Sebastian Hickman Co-authored-by: Ilaria Luise Co-authored-by: luise1 Co-authored-by: Javad kasravi Co-authored-by: Javad Kasravi Co-authored-by: Wael Co-authored-by: Julian Kuehnert Co-authored-by: owens1 Co-authored-by: jehangirawan <86337520+jehangirawan@users.noreply.github.com> Co-authored-by: Dr. Jehangir Awan Co-authored-by: Sorcha Co-authored-by: yperugachidiaz <54586435+yperugachidiaz@users.noreply.github.com> Co-authored-by: perugachidiaz1 Co-authored-by: perugachidiaz1 Co-authored-by: perugachidiaz1 Co-authored-by: perugachidiaz1 Co-authored-by: perugachidiaz1 Co-authored-by: perugachidiaz1 Co-authored-by: perugachidiaz1 Co-authored-by: perugachidiaz1 Co-authored-by: Michael Tarnawa <18899420+mtar@users.noreply.github.com> Co-authored-by: Jubeku Co-authored-by: wang85 Co-authored-by: wang85 Co-authored-by: wang85 Co-authored-by: Simone Norberti <63310821+simone99n@users.noreply.github.com> --- .github/ISSUE_TEMPLATE/bug_report.yml | 39 + .github/ISSUE_TEMPLATE/config.yml | 1 + .github/ISSUE_TEMPLATE/initiative.yml | 57 + .github/ISSUE_TEMPLATE/task.yml | 38 + .github/pull_request_template.md | 34 + .github/workflows/ci.yml | 69 + .github/workflows/issue_assign.yml | 50 + .github/workflows/pr_assign_labels.yml | 74 + .gitignore | 26 +- .python-version | 1 + CODE-of-CONDUCT.md | 62 + CONTRIBUTING.md | 75 + INSTALL.md | 15 + README.md | 31 + assets/weathergenerator_logo.png | Bin 0 -> 83188 bytes assets/weathergenerator_partner.png | Bin 0 -> 308490 bytes config/compare_config_list.yml | 10 + config/default_config.yml | 163 + config/evaluate/config_zarr2cf.yaml | 143 + config/evaluate/eval_config.yml | 80 + config/ifs_fesom_config.yml | 20 + config/mixed.yml | 2 + config/profiling/annotations.json | 793 +++++ config/streams/cerra_seviri/cerra.yml | 32 + config/streams/cerra_seviri/seviri.yml | 32 + config/streams/era5_1deg/era5.yml | 37 + config/streams/era5_nppatms_synop/era5.yml | 38 + .../streams/era5_nppatms_synop/npp_atms.yml | 31 + config/streams/era5_nppatms_synop/synop.yml | 30 + config/streams/fesom/fesom.yml | 36 + config/streams/fesom/fesom_elem.yml | 36 + config/streams/fesom/ifs.yml | 36 + config/streams/icon/icon.yml | 36 + config/streams/igra/igra.yml | 34 + integration_tests/small1.yaml | 19 + integration_tests/small1_test.py | 200 ++ integration_tests/streams/era5_small.yml | 39 + packages/common/pyproject.toml | 100 + .../common/src/weathergen/common/__init__.py | 10 + .../common/src/weathergen/common/config.py | 575 +++ packages/common/src/weathergen/common/io.py | 666 ++++ .../src/weathergen/common/platform_env.py | 38 + packages/evaluate/pyproject.toml | 59 + .../src/weathergen/evaluate/__init__.py | 0 .../src/weathergen/evaluate/clim_utils.py | 226 ++ .../weathergen/evaluate/derived_channels.py | 155 + .../weathergen/evaluate/export/__init__.py | 1 + .../weathergen/evaluate/export/cf_utils.py | 77 + .../weathergen/evaluate/export/export_core.py | 257 ++ .../evaluate/export/export_inference.py | 209 ++ .../weathergen/evaluate/export/io_utils.py | 67 + .../evaluate/export/parser_factory.py | 46 + .../evaluate/export/parsers/netcdf_parser.py | 507 +++ .../evaluate/export/parsers/quaver_parser.py | 246 ++ .../src/weathergen/evaluate/export/reshape.py | 79 + .../src/weathergen/evaluate/io_reader.py | 961 +++++ .../src/weathergen/evaluate/plot_utils.py | 264 ++ .../src/weathergen/evaluate/plotter.py | 1420 ++++++++ .../src/weathergen/evaluate/run_evaluation.py | 230 ++ .../evaluate/src/weathergen/evaluate/score.py | 1498 ++++++++ .../src/weathergen/evaluate/score_utils.py | 134 + .../evaluate/src/weathergen/evaluate/utils.py | 647 ++++ packages/metrics/pyproject.toml | 102 + .../src/weathergen/metrics/__init__.py | 0 .../src/weathergen/metrics/mlflow_utils.py | 176 + packages/readers_extra/pyproject.toml | 106 + .../src/weathergen/readers_extra/__init__.py | 7 + .../readers_extra/data_reader_eobs.py | 415 +++ .../readers_extra/data_reader_icon.py | 530 +++ .../src/weathergen/readers_extra/registry.py | 28 + pyproject.toml | 248 +- scripts/actions.sh | 152 + scripts/check_gh_issue.py | 62 + scripts/check_tomls.py | 65 + src/weathergen/__init__.py | 204 -- src/weathergen/datasets/anemoi_dataset.py | 63 - src/weathergen/datasets/batchifyer.py | 270 -- src/weathergen/datasets/data_reader_anemoi.py | 270 ++ src/weathergen/datasets/data_reader_base.py | 761 ++++ src/weathergen/datasets/data_reader_fesom.py | 648 ++++ src/weathergen/datasets/data_reader_obs.py | 257 ++ src/weathergen/datasets/masking.py | 524 +++ .../datasets/multi_stream_data_sampler.py | 1084 +++--- src/weathergen/datasets/normalizer.py | 130 - src/weathergen/datasets/obs_dataset.py | 236 -- src/weathergen/datasets/stream_data.py | 309 ++ src/weathergen/datasets/tokenizer.py | 142 + src/weathergen/datasets/tokenizer_forecast.py | 149 + src/weathergen/datasets/tokenizer_masking.py | 253 ++ src/weathergen/datasets/tokenizer_utils.py | 308 ++ .../datasets/tokenizer_utils_test.py | 64 + src/weathergen/datasets/utils.py | 1040 ++++-- src/weathergen/datasets/utils_test.py | 153 + src/weathergen/model/attention.py | 1111 +++--- src/weathergen/model/blocks.py | 259 ++ src/weathergen/model/ema.py | 71 + src/weathergen/model/embeddings.py | 217 ++ src/weathergen/model/engines.py | 734 ++++ src/weathergen/model/ens_prediction_head.py | 55 - src/weathergen/model/layers.py | 95 + src/weathergen/model/mlp.py | 64 - src/weathergen/model/model.py | 1488 ++++---- src/weathergen/model/norms.py | 117 +- .../model/parametrised_prob_dist.py | 126 + src/weathergen/model/positional_encoding.py | 117 +- src/weathergen/model/stream_embed_linear.py | 27 - .../model/stream_embed_transformer.py | 147 - src/weathergen/model/utils.py | 46 +- src/weathergen/run_train.py | 203 ++ src/weathergen/train/loss.py | 252 +- src/weathergen/train/loss_calculator.py | 320 ++ src/weathergen/train/lr_scheduler.py | 533 +-- src/weathergen/train/trainer.py | 1801 +++++----- src/weathergen/train/trainer_base.py | 371 +- src/weathergen/train/utils.py | 27 +- src/weathergen/utils/better_abc.py | 42 + src/weathergen/utils/cli.py | 144 + src/weathergen/utils/compare_run_configs.py | 266 +- src/weathergen/utils/config.py | 69 - src/weathergen/utils/distributed.py | 114 + src/weathergen/utils/logger.py | 157 +- src/weathergen/utils/metrics.py | 64 + src/weathergen/utils/metrics_test.py | 20 + src/weathergen/utils/plot_training.py | 985 ++++-- src/weathergen/utils/run_id.py | 13 - src/weathergen/utils/train_logger.py | 578 +++- src/weathergen/utils/utils.py | 26 + src/weathergen/utils/validation_io.py | 198 +- stac/abi-goes16.jsonnet | 90 + stac/cerra.jsonnet | 676 ++++ stac/common.jsonnet | 62 + stac/era5_v8.jsonnet | 768 ++++ stac/functions.libsonnet | 129 + stac/fy3a.jsonnet | 62 + stac/fy3b.jsonnet | 61 + stac/fy3c.jsonnet | 61 + stac/ifs_fesom_atmos.jsonnet | 1211 +++++++ stac/ifs_fesom_ocean_elem.jsonnet | 735 ++++ stac/ifs_fesom_ocean_node.jsonnet | 1141 ++++++ stac/imerg.jsonnet | 49 + stac/jsons/abigoes.json | 486 +++ stac/jsons/catalogue.json | 120 + stac/jsons/cerra.json | 794 +++++ stac/jsons/era5v8.json | 899 +++++ stac/jsons/fy3.json | 229 ++ stac/jsons/fy3a.json | 237 ++ stac/jsons/fy3b.json | 237 ++ stac/jsons/fy3c.json | 237 ++ stac/jsons/ifs-fesom_atmos.json | 1398 ++++++++ stac/jsons/ifs-fesom_ocean_elem.json | 855 +++++ stac/jsons/ifs-fesom_ocean_node.json | 1319 +++++++ stac/jsons/imerg.json | 104 + stac/jsons/metopa.json | 237 ++ stac/jsons/metopb.json | 237 ++ stac/jsons/npp-atms.json | 238 ++ stac/jsons/opera.json | 114 + stac/jsons/seviri.json | 279 ++ stac/jsons/synop.json | 255 ++ stac/merged.jsonnet | 72 + stac/metopa.jsonnet | 61 + stac/metopb.jsonnet | 61 + stac/nppatms.jsonnet | 60 + stac/opera.jsonnet | 44 + stac/seviri.jsonnet | 47 + stac/synop.jsonnet | 47 + tests/test_cli.py | 121 + tests/test_config.py | 325 ++ uv.lock | 3081 +++++++++++++++++ 168 files changed, 42396 insertions(+), 5347 deletions(-) create mode 100644 .github/ISSUE_TEMPLATE/bug_report.yml create mode 100644 .github/ISSUE_TEMPLATE/config.yml create mode 100644 .github/ISSUE_TEMPLATE/initiative.yml create mode 100644 .github/ISSUE_TEMPLATE/task.yml create mode 100644 .github/pull_request_template.md create mode 100644 .github/workflows/ci.yml create mode 100644 .github/workflows/issue_assign.yml create mode 100644 .github/workflows/pr_assign_labels.yml create mode 100644 .python-version create mode 100644 CODE-of-CONDUCT.md create mode 100644 CONTRIBUTING.md create mode 100644 INSTALL.md create mode 100644 assets/weathergenerator_logo.png create mode 100644 assets/weathergenerator_partner.png create mode 100644 config/compare_config_list.yml create mode 100644 config/default_config.yml create mode 100644 config/evaluate/config_zarr2cf.yaml create mode 100644 config/evaluate/eval_config.yml create mode 100644 config/ifs_fesom_config.yml create mode 100644 config/mixed.yml create mode 100644 config/profiling/annotations.json create mode 100644 config/streams/cerra_seviri/cerra.yml create mode 100644 config/streams/cerra_seviri/seviri.yml create mode 100644 config/streams/era5_1deg/era5.yml create mode 100644 config/streams/era5_nppatms_synop/era5.yml create mode 100644 config/streams/era5_nppatms_synop/npp_atms.yml create mode 100644 config/streams/era5_nppatms_synop/synop.yml create mode 100644 config/streams/fesom/fesom.yml create mode 100644 config/streams/fesom/fesom_elem.yml create mode 100644 config/streams/fesom/ifs.yml create mode 100644 config/streams/icon/icon.yml create mode 100644 config/streams/igra/igra.yml create mode 100644 integration_tests/small1.yaml create mode 100644 integration_tests/small1_test.py create mode 100644 integration_tests/streams/era5_small.yml create mode 100644 packages/common/pyproject.toml create mode 100644 packages/common/src/weathergen/common/__init__.py create mode 100644 packages/common/src/weathergen/common/config.py create mode 100644 packages/common/src/weathergen/common/io.py create mode 100644 packages/common/src/weathergen/common/platform_env.py create mode 100644 packages/evaluate/pyproject.toml create mode 100644 packages/evaluate/src/weathergen/evaluate/__init__.py create mode 100644 packages/evaluate/src/weathergen/evaluate/clim_utils.py create mode 100644 packages/evaluate/src/weathergen/evaluate/derived_channels.py create mode 100644 packages/evaluate/src/weathergen/evaluate/export/__init__.py create mode 100644 packages/evaluate/src/weathergen/evaluate/export/cf_utils.py create mode 100644 packages/evaluate/src/weathergen/evaluate/export/export_core.py create mode 100755 packages/evaluate/src/weathergen/evaluate/export/export_inference.py create mode 100644 packages/evaluate/src/weathergen/evaluate/export/io_utils.py create mode 100644 packages/evaluate/src/weathergen/evaluate/export/parser_factory.py create mode 100644 packages/evaluate/src/weathergen/evaluate/export/parsers/netcdf_parser.py create mode 100644 packages/evaluate/src/weathergen/evaluate/export/parsers/quaver_parser.py create mode 100644 packages/evaluate/src/weathergen/evaluate/export/reshape.py create mode 100644 packages/evaluate/src/weathergen/evaluate/io_reader.py create mode 100644 packages/evaluate/src/weathergen/evaluate/plot_utils.py create mode 100644 packages/evaluate/src/weathergen/evaluate/plotter.py create mode 100755 packages/evaluate/src/weathergen/evaluate/run_evaluation.py create mode 100755 packages/evaluate/src/weathergen/evaluate/score.py create mode 100644 packages/evaluate/src/weathergen/evaluate/score_utils.py create mode 100644 packages/evaluate/src/weathergen/evaluate/utils.py create mode 100644 packages/metrics/pyproject.toml create mode 100644 packages/metrics/src/weathergen/metrics/__init__.py create mode 100644 packages/metrics/src/weathergen/metrics/mlflow_utils.py create mode 100644 packages/readers_extra/pyproject.toml create mode 100644 packages/readers_extra/src/weathergen/readers_extra/__init__.py create mode 100644 packages/readers_extra/src/weathergen/readers_extra/data_reader_eobs.py create mode 100644 packages/readers_extra/src/weathergen/readers_extra/data_reader_icon.py create mode 100644 packages/readers_extra/src/weathergen/readers_extra/registry.py create mode 100755 scripts/actions.sh create mode 100755 scripts/check_gh_issue.py create mode 100644 scripts/check_tomls.py delete mode 100644 src/weathergen/__init__.py delete mode 100644 src/weathergen/datasets/anemoi_dataset.py delete mode 100644 src/weathergen/datasets/batchifyer.py create mode 100644 src/weathergen/datasets/data_reader_anemoi.py create mode 100644 src/weathergen/datasets/data_reader_base.py create mode 100644 src/weathergen/datasets/data_reader_fesom.py create mode 100644 src/weathergen/datasets/data_reader_obs.py create mode 100644 src/weathergen/datasets/masking.py delete mode 100644 src/weathergen/datasets/normalizer.py delete mode 100644 src/weathergen/datasets/obs_dataset.py create mode 100644 src/weathergen/datasets/stream_data.py create mode 100644 src/weathergen/datasets/tokenizer.py create mode 100644 src/weathergen/datasets/tokenizer_forecast.py create mode 100644 src/weathergen/datasets/tokenizer_masking.py create mode 100644 src/weathergen/datasets/tokenizer_utils.py create mode 100644 src/weathergen/datasets/tokenizer_utils_test.py create mode 100644 src/weathergen/datasets/utils_test.py create mode 100644 src/weathergen/model/blocks.py create mode 100644 src/weathergen/model/ema.py create mode 100644 src/weathergen/model/embeddings.py create mode 100644 src/weathergen/model/engines.py delete mode 100644 src/weathergen/model/ens_prediction_head.py create mode 100644 src/weathergen/model/layers.py delete mode 100644 src/weathergen/model/mlp.py create mode 100644 src/weathergen/model/parametrised_prob_dist.py delete mode 100644 src/weathergen/model/stream_embed_linear.py delete mode 100644 src/weathergen/model/stream_embed_transformer.py create mode 100644 src/weathergen/run_train.py create mode 100644 src/weathergen/train/loss_calculator.py create mode 100644 src/weathergen/utils/better_abc.py create mode 100644 src/weathergen/utils/cli.py mode change 100644 => 100755 src/weathergen/utils/compare_run_configs.py delete mode 100644 src/weathergen/utils/config.py create mode 100644 src/weathergen/utils/distributed.py create mode 100644 src/weathergen/utils/metrics.py create mode 100644 src/weathergen/utils/metrics_test.py delete mode 100644 src/weathergen/utils/run_id.py create mode 100644 src/weathergen/utils/utils.py create mode 100644 stac/abi-goes16.jsonnet create mode 100644 stac/cerra.jsonnet create mode 100644 stac/common.jsonnet create mode 100644 stac/era5_v8.jsonnet create mode 100644 stac/functions.libsonnet create mode 100644 stac/fy3a.jsonnet create mode 100644 stac/fy3b.jsonnet create mode 100644 stac/fy3c.jsonnet create mode 100644 stac/ifs_fesom_atmos.jsonnet create mode 100644 stac/ifs_fesom_ocean_elem.jsonnet create mode 100644 stac/ifs_fesom_ocean_node.jsonnet create mode 100644 stac/imerg.jsonnet create mode 100644 stac/jsons/abigoes.json create mode 100644 stac/jsons/catalogue.json create mode 100644 stac/jsons/cerra.json create mode 100644 stac/jsons/era5v8.json create mode 100644 stac/jsons/fy3.json create mode 100644 stac/jsons/fy3a.json create mode 100644 stac/jsons/fy3b.json create mode 100644 stac/jsons/fy3c.json create mode 100644 stac/jsons/ifs-fesom_atmos.json create mode 100644 stac/jsons/ifs-fesom_ocean_elem.json create mode 100644 stac/jsons/ifs-fesom_ocean_node.json create mode 100644 stac/jsons/imerg.json create mode 100644 stac/jsons/metopa.json create mode 100644 stac/jsons/metopb.json create mode 100644 stac/jsons/npp-atms.json create mode 100644 stac/jsons/opera.json create mode 100644 stac/jsons/seviri.json create mode 100644 stac/jsons/synop.json create mode 100644 stac/merged.jsonnet create mode 100644 stac/metopa.jsonnet create mode 100644 stac/metopb.jsonnet create mode 100644 stac/nppatms.jsonnet create mode 100644 stac/opera.jsonnet create mode 100644 stac/seviri.jsonnet create mode 100644 stac/synop.jsonnet create mode 100644 tests/test_cli.py create mode 100644 tests/test_config.py create mode 100644 uv.lock diff --git a/.github/ISSUE_TEMPLATE/bug_report.yml b/.github/ISSUE_TEMPLATE/bug_report.yml new file mode 100644 index 000000000..6ba186f3d --- /dev/null +++ b/.github/ISSUE_TEMPLATE/bug_report.yml @@ -0,0 +1,39 @@ +name: Bug Report +description: Report a bug related to WeatherGenerator. +title: Bug report +labels: + - "bug" +assignees: [] +body: + - type: textarea + id: what-happened + attributes: + label: What happened? + description: | + Expected Results: What was supposed to happen? + + Actual Results: What happened when you got the bug? + + Additional Information: Add anything else you feel we need to know e.g. The exact error message printed + validations: + required: true + - type: textarea + id: reproduce + attributes: + label: What are the steps to reproduce the bug? + description: | + Minimal steps to reproduce the behavior: + - code branch with ALL configuration files + - HPC + - run_id + - command run + - if applicable: node configuration + validations: + required: false + - type: input + id: data + attributes: + label: Hedgedoc link to logs and more information. This ticket is public, do not attach files directly. + description: Please put all relevant information (logs, plots, etc.) in the Hedgedoc and link it here. + validations: + required: false diff --git a/.github/ISSUE_TEMPLATE/config.yml b/.github/ISSUE_TEMPLATE/config.yml new file mode 100644 index 000000000..0086358db --- /dev/null +++ b/.github/ISSUE_TEMPLATE/config.yml @@ -0,0 +1 @@ +blank_issues_enabled: true diff --git a/.github/ISSUE_TEMPLATE/initiative.yml b/.github/ISSUE_TEMPLATE/initiative.yml new file mode 100644 index 000000000..83bb8db58 --- /dev/null +++ b/.github/ISSUE_TEMPLATE/initiative.yml @@ -0,0 +1,57 @@ +name: Initiative +description: A piece of work that will likely take more than a week to complete. +title: "Initiative" +labels: ["initiative"] + + +body: + - type: textarea + id: description + attributes: + label: Describe the task. Describe the task. It can be a feature, a set of experiments, documentation, etc. + description: Be specific and provide context. + placeholder: "Describe the task here..." + + - type: markdown + attributes: + value: | + Tips for writing good initiatives: + https://gitlab.jsc.fz-juelich.de/esde/WeatherGenerator-private/-/wikis/Design-docs/Writing-a-design-doc + + - type: input + id: hedgedoc_url + attributes: + label: Hedgedoc URL, if you are keeping notes, plots, logs in hedgedoc. + description: Hedgedoc URL, if you are keeping notes, plots, logs in hedgedoc. + placeholder: "https://gitlab.jsc.fz-juelich.de/hedgedoc/..." + validations: + required: false + + + - type: input + id: design_url + attributes: + label: URL to the design document + description: Paste a link to logs, screenshots, or related resources + placeholder: "link to doc on sharepoint" + validations: + required: false + + + - type: checkboxes + id: area + attributes: + label: Area + description: The general area this task relates to. + options: + - label: datasets, data readers, data preparation and transfer + - label: model + - label: science + - label: infrastructure and engineering + - label: evaluation, export and visualization + - label: documentation + validations: + required: true + + + diff --git a/.github/ISSUE_TEMPLATE/task.yml b/.github/ISSUE_TEMPLATE/task.yml new file mode 100644 index 000000000..2008dd81a --- /dev/null +++ b/.github/ISSUE_TEMPLATE/task.yml @@ -0,0 +1,38 @@ +name: Task / Issue +description: A task or issue that should take less than a week to complete. +title: "Task" + +body: + - type: textarea + id: description + attributes: + label: Describe the task. It can be a feature, documentation, etc. + description: Be specific and provide context. + placeholder: "Describe the task here..." + + - type: input + id: hedgedoc_url + attributes: + label: Hedgedoc URL, if you are keeping notes, plots, logs in hedgedoc. + description: Paste a link to logs, screenshots, or related resources + placeholder: "https://gitlab.jsc.fz-juelich.de/hedgedoc/..." + validations: + required: false + + - type: checkboxes + id: area + attributes: + label: Area + description: The general area this task relates to. + options: + - label: datasets, data readers, data preparation and transfer + - label: model + - label: science + - label: infrastructure and engineering + - label: evaluation, export and visualization + - label: documentation + validations: + required: true + + + diff --git a/.github/pull_request_template.md b/.github/pull_request_template.md new file mode 100644 index 000000000..7d61b5ca4 --- /dev/null +++ b/.github/pull_request_template.md @@ -0,0 +1,34 @@ +## Description + + + + +## Issue Number + + + +Is this PR a draft? Mark it as draft. + +## Checklist before asking for review + +- [ ] I have performed a self-review of my code +- [ ] My changes comply with basic sanity checks: + - I have fixed formatting issues with `./scripts/actions.sh lint` + - I have run unit tests with `./scripts/actions.sh unit-test` + - I have documented my code and I have updated the docstrings. + - I have added unit tests, if relevant +- [ ] I have tried my changes with data and code: + - I have run the integration tests with `./scripts/actions.sh integration-test` + - (bigger changes) I have run a full training and I have written in the comment the run_id(s): `launch-slurm.py --time 60` + - (bigger changes and experiments) I have shared a hegdedoc in the github issue with all the configurations and runs for this experiments +- [ ] I have informed and aligned with people impacted by my change: + - for config changes: the MatterMost channels and/or a design doc + - for changes of dependencies: the MatterMost software development channel diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml new file mode 100644 index 000000000..d462d751e --- /dev/null +++ b/.github/workflows/ci.yml @@ -0,0 +1,69 @@ +name: CI + +on: + push: + branches: [ "main", "develop"] + pull_request: + branches: [ "main", "develop"] + paths-ignore: + - "CHANGELOG.md" + - "README.md" + +jobs: + linting: + name: linter + runs-on: ubuntu-latest + + steps: + - uses: actions/checkout@v4 + + - name: Install uv + uses: astral-sh/setup-uv@v5 + with: + # Install a specific version of uv. + version: "0.7.13" + + - name: Run ruff (black) + # Do not attempt to install the default dependencies, this is much faster. + # Run temporarily on a sub directory before the main restyling. + run: ./scripts/actions.sh lint-check + + - name: TOML checks + run: ./scripts/actions.sh toml-check + + - name: Type checker (pyrefly, experimental) + # Do not attempt to install the default dependencies, this is much faster. + # Run temporarily on a sub directory before the main restyling. + run: ./scripts/actions.sh type-check + pr: + name: PR checks + runs-on: ubuntu-latest + + steps: + - uses: actions/checkout@v4 + + - name: Install uv + uses: astral-sh/setup-uv@v5 + with: + # Install a specific version of uv. + version: "0.7.13" + + - name: Check PR is linked to an issue + # Send the PR number to the script, which will check if it is linked to an issue. + run: scripts/check_gh_issue.py $GITHUB_REF_NAME + test: + name: Unit tests + runs-on: ubuntu-latest + + steps: + - uses: actions/checkout@v4 + + - name: Install uv + uses: astral-sh/setup-uv@v5 + with: + # Install a specific version of uv. + version: "0.7.13" + + - name: Run all unit tests + # Send the PR number to the script, which will check if it is linked to an issue. + run: ./scripts/actions.sh unit-test diff --git a/.github/workflows/issue_assign.yml b/.github/workflows/issue_assign.yml new file mode 100644 index 000000000..406604b78 --- /dev/null +++ b/.github/workflows/issue_assign.yml @@ -0,0 +1,50 @@ +name: Add comment +on: + issue_comment: + types: + - created +jobs: + add-comment: + # If a comment has the text "Assign me" or "assign me": + if: | + startsWith(github.event.comment.body, 'Assign me') || + startsWith(github.event.comment.body, 'assign me') + runs-on: ubuntu-latest + permissions: + issues: write + steps: + - name: Comment + # Print the username and issue number + run: echo "Adding comment to issue ${{ github.event.issue.number }} by ${{ github.event.comment.user.login }}" + # env: + # GH_TOKEN: ${{ secrets.GITHUB_TOKEN }} + # GH_REPO: ${{ github.repository }} + # NUMBER: ${{ github.event.issue.number }} + # BODY: > + # This issue is available for anyone to work on. + # **Make sure to reference this issue in your pull request.** + # :sparkles: Thank you for your contribution! :sparkles: + - name: Assign issue + # Assign the issue to the user who made the comment + # Workaround based on https://github.com/cli/cli/issues/9620 + run: gh api -X PATCH "/repos/ecmwf/WeatherGenerator/issues/$NUMBER" -f assignee="${{ github.event.comment.user.login }}" + env: + GH_TOKEN: ${{ secrets.GITHUB_TOKEN }} + GH_REPO: ${{ github.repository }} + NUMBER: ${{ github.event.issue.number }} + # - name: Assign issue + # # Assign the issue to the user who made the comment + # run: gh issue edit "$NUMBER" --add-assignee "${{ github.event.comment.user.login }}" + # env: + # GH_TOKEN: ${{ secrets.GITHUB_TOKEN }} + # GH_REPO: ${{ github.repository }} + # NUMBER: ${{ github.event.issue.number }} + - name: Put message on issue + # Put a message on the issue: + run: gh issue comment "$NUMBER" --body "$BODY" + env: + GH_TOKEN: ${{ secrets.GITHUB_TOKEN }} + GH_REPO: ${{ github.repository }} + NUMBER: ${{ github.event.issue.number }} + BODY: Issue assigned to @${{ github.event.comment.user.login }}. + diff --git a/.github/workflows/pr_assign_labels.yml b/.github/workflows/pr_assign_labels.yml new file mode 100644 index 000000000..fa3de6928 --- /dev/null +++ b/.github/workflows/pr_assign_labels.yml @@ -0,0 +1,74 @@ +# This workflow automatically applies labels from issues to pull requests that reference them. +name: Sync issue labels to PR + +on: + pull_request: + types: [opened, edited, reopened, synchronize, ready_for_review] + +permissions: + pull-requests: write + contents: write + issues: write + +jobs: + sync: + runs-on: ubuntu-latest + continue-on-error: true + steps: + - name: Apply issue labels to PR + uses: actions/github-script@v7 + with: + script: | + const {owner, repo} = context.repo; + const prNumber = context.payload.pull_request.number; + + // 1) Find issues linked to this PR (those it will close) + const query = ` + query($owner:String!, $repo:String!, $number:Int!) { + repository(owner:$owner, name:$repo) { + pullRequest(number:$number) { + closingIssuesReferences(first: 50) { + nodes { + number + labels(first: 100) { nodes { name } } + } + } + } + } + } + `; + let data; + try { + data = await github.graphql(query, { owner, repo, number: prNumber }); + } catch (e) { + // Print a warning and stop here if the query fails (e.g., no linked issues) + core.warning(`GraphQL query failed: ${e.message}`); + return; + } + const issues = data.repository.pullRequest.closingIssuesReferences.nodes; + + // 2) Collect unique label names from those issues + const labelSet = new Set(); + for (const is of issues) { + for (const l of is.labels.nodes) labelSet.add(l.name); + } + + // Optional: ignore labels you don't want copied + const IGNORE = new Set(["enhancement", "science", "bug", "documentation", "question", "good first issue", "help wanted"]); + const labels = Array.from(labelSet).filter(x => !IGNORE.has(x)); + + // 3) Apply to the PR (PRs are "issues" in the REST API) + if (labels.length) { + try { + await github.rest.issues.addLabels({ + owner, repo, + issue_number: prNumber, + labels + }); + } catch (e) { + core.warning(`Failed to apply labels to PR ${prNumber}: ${e.message}`); + } + core.info(`Applied labels to PR ${prNumber}: ${labels.join(", ")}`); + } else { + core.info("No labels to apply from linked issues."); + } diff --git a/.gitignore b/.gitignore index e34462881..0854b731d 100644 --- a/.gitignore +++ b/.gitignore @@ -12,6 +12,8 @@ __pycache__/ *.py[cod] *$py.class +*.DS_Store + # C extensions *.so @@ -77,6 +79,14 @@ instance/ # Scrapy stuff: .scrapy + +# Jupyter Notebook +*.ipynb_checkpoints +# Use the jupytext extension instead. +*.ipynb + +*.zip + # Sphinx documentation docs/_build/ @@ -202,7 +212,15 @@ output/ logs/ models/ results/ - -# uv -uv.lock - +plots/ +models +results +playground/ +.config/ +plots +logs +outputs +output +models +results +reports diff --git a/.python-version b/.python-version new file mode 100644 index 000000000..e4fba2183 --- /dev/null +++ b/.python-version @@ -0,0 +1 @@ +3.12 diff --git a/CODE-of-CONDUCT.md b/CODE-of-CONDUCT.md new file mode 100644 index 000000000..ef3bdeea3 --- /dev/null +++ b/CODE-of-CONDUCT.md @@ -0,0 +1,62 @@ +# Code of Conduct + +## Introduction +Welcome to the WeatherGenerator development community. + +We are committed to providing a welcoming and inspiring environment for all contributors. Our goal is to develop high-quality software and ensure it is robust, reliable, and available as open-source for the benefit of the wider community and open for external contributors. + +This Code of Conduct outlines our expectations for participants to foster a positive, productive, and inclusive community. + +## Reference +This Code of Conduct is adapted and meant to be a summarised form of the [Contributor Covenant](https://www.contributor-covenant.org/), version 2.1, available at https://www.contributor-covenant.org/version/2/1/code_of_conduct. + +If the interpretation of this code of conduct is in any way in conflict with the Contributor Covenant, the Contributor Covenant takes precedence. + +## Our Standards +All contributors are expected to adhere to the following standards: + +1. **Respectful Communication**: + - Demonstrate empathy and kindness towards others. + - Use inclusive and respectful language. + - Critique ideas, not people. + - Avoid offensive or derogatory comments. + +2. **Collaboration**: + - Be open to constructive feedback. + - Share knowledge and help others. + - Respect differing viewpoints and experiences. + +3. **Commitment to Quality**: + - Write clear, concise, and well-documented code. + - Follow established coding standards and guidelines. + - Test and review code thoroughly before submission. + - Provide new tests for new features. + - Submit helpful and constructive feedback on code reviews. + +4. **Inclusivity**: + - Be welcoming to new contributors. + - Ensure that all community members can participate fully. + - Encourage diversity in all its forms. + +5. **Professionalism**: + - Act professionally in all interactions. + - Refrain from disruptive behavior or harassment. + - Uphold the integrity and reputation of the ECMWF community. + +## Our Responsibilities +Repository managers are responsible for clarifying and enforcing standards of acceptable behavior and are expected to take appropriate and fair corrective action in response to any instances of unacceptable behavior. + +## Scope +This Code of Conduct applies to all project spaces, including GitHub repositories, issue trackers, forums, chat channels, and any other means of communication. It also applies when an individual is representing the project or its community in public spaces. + +## Reporting Issues +If you experience or witness unacceptable behavior, or have any other concerns, please report it by contacting `development (at) weathergenerator.eu`. + +All complaints will be reviewed and investigated promptly and fairly. + +## Enforcement +Enforcement shall take the form of warnings, temporary or permanent bans, as defined in the [Contributor Covenant](https://www.contributor-covenant.org/) + +## Contacts + +For any questions or further information, please contact `development (at) weathergenerator.eu` diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md new file mode 100644 index 000000000..1800cd0d4 --- /dev/null +++ b/CONTRIBUTING.md @@ -0,0 +1,75 @@ +# WeatherGenerator Contributing Guide + +Thank you for your interest in contributing to the WeatherGenerator! We welcome contributions to help build and develop the WeatherGenerator. This guide will help you get started with contributing to our project. + +## Table of Contents + +1. [Code of Conduct](#code-of-conduct) +2. [Getting Started](#getting-started) +3. [How to Contribute](#how-to-contribute) + - [Reporting Issues](#reporting-issues) + - [Submitting Pull Requests](#submitting-pull-requests) +4. [Development Guidelines](#development-guidelines) + - [Coding Standards](#coding-standards) + - [Commit Messages](#commit-messages) + - [Testing](#testing) +5. [Getting Help](#getting-help) + +## Code of Conduct + +We are committed to fostering a welcoming and inclusive community. By participating in this project, you agree to abide by our [Code of Conduct](CODE-of-CONDUCT.md). + +## Getting Started + +1. **Fork the repository**: Create a fork of the repository by clicking the "Fork" button at the top right of the repository page. +2. **Clone your fork**: Clone your fork to your local machine, see (https://docs.github.com/en/repositories/creating-and-managing-repositories/cloning-a-repository). In the terminal, one can use the following command: + ```sh + git clone https://github.com/your-username/WeatherGenerator.git + ``` +3. **Set up the upstream remote**: Set up the upstream remote to keep your fork up-to-date with the main repository, see again (https://docs.github.com/en/pull-requests/collaborating-with-pull-requests/working-with-forks/configuring-a-remote-repository-for-a-fork). In the terminal: + ```sh + git remote add upstream https://github.com/ecmwf/WeatherGenerator.git + ``` + +## How to Contribute + +### Reporting Issues + +If you find a bug or have a feature request, please create an issue on the repository's [issue tracker](https://github.com/ecmwf/WeatherGenerator/issues). +When reporting an issue, please use the appropriate issue template and provide as much detail as possible, including steps to reproduce the issue and any relevant logs or screenshots. Please take care not to share personal information in the issue tracker (e.g. usernames, passwords, hostnames, etc). Please use the appropriate tags to categorize your issue. + +### Submitting Contributions + +lease open first an issue on the repository's [issue tracker](https://github.com/ecmwf/WeatherGenerator/issues) that describes the contribution that you are planning. This can be bug fixes or new features. Having a discussion through the issue early on will ensure that your work aligns with the development roadmap for the WeatherGenerator project and that your PR will eventually be accepted. + +#### Implementing and Submitting your Contribution + +The WeatherGenerator project follows the standard process of pull requests on Github. If you are unfamiliar, consider following the [Github documentaion on pull requests](https://docs.github.com/en/pull-requests/collaborating-with-pull-requests/proposing-changes-to-your-work-with-pull-requests). +Pull requests are expected to have a clear description of the issue. Any significant pull request is expected to have a Github issue associated. + +1. **Create a branch**: Create a new branch for your work +2. **Make your changes**: Make your changes in your feature branch. +3. **Commit your changes**: Please use clear and descriptive commit messages. +4. **Push to your fork**: Push your changes to your fork on GitHub +5. **Open a pull request**: Open a pull request against the `develop` branch of the WeatherGenerator repository. Provide a clear description of your changes and link any relevant issues. + +## Development Guidelines + +### Coding Standards + +Please follow our coding standards to ensure consistency across the codebase. Refer to our [Coding Standards](CODING_STANDARDS.md) document for details on the conventions and best practices we adhere to. + +### Commit Messages + +Write clear and concise commit messages that describe the changes made. Where possible and relevant, reference any related issues in the commit message. + +### Testing + +Ensure that your changes are thoroughly tested by: +1. Running existing tests to ensure that they pass. +2. Add or update unit tests as necessary to cover your changes. +3. Make sure that all tests, new and old, pass before submitting a pull request. + + +Thank you for contributing to the WeatherGenerator! Your contributions are greatly appreciated. + diff --git a/INSTALL.md b/INSTALL.md new file mode 100644 index 000000000..bb6000e67 --- /dev/null +++ b/INSTALL.md @@ -0,0 +1,15 @@ +``` +# Make sure you have a recent version of gcc in your environment +# Make sure your python includes cpython (if you do not use uv's python) + +# install uv +# Suggested solution for HPC systems: +%>curl -LsSf https://astral.sh/uv/install.sh | sh + +# git clone / fork WeatherGenerator repo +%>cd WeatherGenerator +%>uv sync + + +%>uv run train +``` diff --git a/README.md b/README.md index e69de29bb..77d885407 100644 --- a/README.md +++ b/README.md @@ -0,0 +1,31 @@ +

+ WeatherGenerator +

+ +
+

The WeatherGenerator
Machine Learning Earth System Model

+
+ +The WeatherGenerator project is developing a machine learning-based Earth system model. +It will be trained on a wide range of datasets, including reanalyses, forecast data and observations, to provide a robust and versatile model for the dynamics. +Through this, it can be used for a wide-range of applications. + +More details coming soon. Please open an issue if you are interested in using the model. + +
+ +

+ Partners +

+ +# How to use the WeatherGenerator project + +The model is currently being developed by the WeatherGenerator Consortium. If you want to +engage, you are encouraged to contact us first by opening an issue on Github. + +# Development guidelines + +The [main branch](https://github.com/ecmwf/WeatherGenerator/tree/main) is the most stable version. If you are running experiments, you should use this branch. + +The [develop branch](https://github.com/ecmwf/WeatherGenerator/tree/develop) has the latest +features. However, it is currently evolving at a fast pace. It should not be expected to have stable code or weight interfaces, or to be backward compatible. \ No newline at end of file diff --git a/assets/weathergenerator_logo.png b/assets/weathergenerator_logo.png new file mode 100644 index 0000000000000000000000000000000000000000..41798ed8aeeef153abdaa4c87650da7b3155d034 GIT binary patch literal 83188 zcmeEtg;$i_7w!lGN~)C7(jkHyRj;en+jlt3UX4iE^v@&+34 z4RKm>H}Ka@8!1hD5Qy|P>d&<<#$6E*hz10f5L0nU*qHK6P+3Y`Ip5BvbpWN3-(U4{ zNF$HLb6PMO0e=pne|$e)F3rTmkn8=YL!mSi|D=xP=GeFD9vaP4jF_fe>tFjA>B(_h z=@|t|raBYjEuLFYFg=P7doDYa=Rhhh`pALzof(?$X}hERu;aSQzTt57`rPhCTAsgh zrWcWFAwS8?iJl&RA_*`})F-9&P65_GLy%8gQ)ThL<6Bq_K^XsxeV+cXeDLqrt#7Rj zul+No!S^7?`S)wv*$It*hab^rzM-}P_=r**@%;Cm`TzfD)P`hKQ2`rcK=0{k+7qj? zt+?!9^CJOvL1@a5j-S|RVPS>Wm;CZtc0;N-ukjO};UyjAnb4jD6JBiK)t7q<^Y+_m7TL!q#wq`_DS@GLWnn zD@S?P_3b|;&qB0RnAQTL)@UdF=O1lz6Do6W7oH1cGT`ARDfiYb#1-~5M_D0B<>{e$AS06^gJx-5w7mMQdAbpQT*5cp z+vO{6@@zBYa&@EMnifroCEm#C)bEdL&g%ltlm27&Z&3@0xgiieT8{L6$0gnP0s2(0 zPAj@ULRD#9MZCg0HC9{+2YhMcxnf-6rkF*t5NS`*|difqQ{j>EPIY-sQGezhcxK8` zThf^XCJWLeqmRhDy*!OZ9~4%x7jXeXbk(k8C zJ=d_DK1k2ao?|bdvH!?AH#12vJLO{dx`C^`Dg$9m{n~ORxW)wGIxhsANi?J1M-Vm4 zGRwH-v@#kjvwm_)xGeqE?F~N3r%J<8mEK^#DRdc3l6y{C6`M7fKh@AcDQbUA=g(uE z0Zi5W;~Im3>qFTO$IXt&l33`tRSbJJLl$=yrAeoNd%XZyxAue~{p14${s-)m_2p`) zT7FV)3g&eXXb7$3H*lbaD(r+a6UQC4;KW*asqxws*g~vg5Lx^>k&#KRU47xIHeifQ z$uf#I!)XcRwjRw|obd|STuWueg%Jc&B)hso){G9g!&f*iPNl5YK}P(H$wM{i`NG2O z&#*!fI~}FNB(dA9FY4Bj;n>o#4EeS84XkrP=pfLykH200m$O9X3rKBR)@&Eu_?fza zc#E);>W*_B_+?>XFLC&VXC`GI`eHe-Hd-bekf&$-Q~ z2?DX=BYxv2YS=~Us?o%Gtw}X^CGJyqRDTbmiT;%Uwa9%lz|fy6am@*S{S58u&t!6( zrQmTB_<=UmilTsdW1xPosMhftt5L(*r0lv$ zhnd@8Z7RWaHwus_Hz&)^zB30Wsd|3Wf&&E*#(w*c3+j+GrRNh+h7T3b6|q+Dz=h*} z2=?o&ydeRJdgWRH&LLZ2_A(J^3j$@mykgUjs{c+k?vwjrf&;&Nwox$08SDWlMR3fx z5e=^`T93~sqx0` zGNdhIHwE|SI?hpziff<q#j8>FA8KD7hOeOlKEK?mIG z{uM8|?XbaoR^)cQk-CDRPgG>KXLq)!M&>oqT`|qRc0CWuNZms%(ey4OYx!fU_I! zXD$%6XCa1ZKWGbCPkz?|iQY4}$O#|%bI}Y}7TvV!mL$YrwiHO()AshQy?i?hBcWU3 zk2#;j1WIm6h}B#NIw%MICg7!vS0SGcaomA~j!f>9AjPC<68Tu<+4IhHU*h9{xPjer z7CdkQv&&@GQelC7>|XpGetgdU*e(6CSx4Wl(J)>rb!!VM6GIMma7!D`DjH~BbtW`o zsM@_)ssjzgNqfc2i#gdUd5=v#v=-4bz@ZL2_rYs+ckw}-w%V1tv}VKWd<9s5ohbfr zOIceXAjTRSom2YLTe|+pLW+H(XIjE0&|8AFqIciG;rY1I_-RVNZyGV+r1@q>3mrJhdHt|M}U4651f4!ITjx z&ga~AgR^ujKLKpP+Gra0b%wkVG;0m$Iz712tJ{T|9DsmTHsI$Rf2@%sk;*a~4+P55 zV#s*?g1eZwo(l^@QUklEdqYNv@ut9oWAIoRsayxXX#to(HXK%(>djM7s385zbG6 z2bSK)Gv;|bnwggfBEG-3to4MuKa#Ft;d4gDB;;F%*Gz0>jOVmhEf{r@-#JfSBn=X} zOGIcLK9->`e~`H4b_)cWyyE0px~j2(l!&{JOHJO-y@D)AQRde_JJm}K9P5b%jK{$O zHtSEQ1YRoJIXzxwPy@TPyA&|O*kVj5Wg}$VGX3h=zhB3aUFa+&R-)j0s_5p_RCnC9 zhFubfL!N0;5Ni`!=drgTkUooNJPnz0$7D!%>;G68L&qpO^{?-kQ~eFlqU<>tDjQkG zg?>9lNh{!zkn7LKhM7~f!u`vYi^X_&I-2lhcJ4qUD&QrlLE&cLr0C74^L}StMzh}J zC&Vs8>qg_8?ny%F9=FHq6_OnIDZApR-E{fo;3Z3?J&{gZ<#6<6$yd{_rxb8mU7l@D zEi~qT?2U>8cQ{?T1i&(+zH?31r80=6*2Oql=ew zxlG$&eU<)Nu4)*+o;ATM2=@BfW^vfyhYv6qQ-fy^LRoo1yZxLgTPcHNH=AkZmR}6_ zx1nkt6r-`McgpzKKmyLOmTGp-fv7jW8_NqA{GuWIvDW@@X_@xECP*~@3L<0?4~GR( zM$>e~ZTgps9N*U>*b#8 z@k}j&q~NHnPzIpMwGGWWV9W}OSbRiZyElUw@_k|8>@lo-YJkRUrShSp&GGP^ zZyv@$HN&42COQu;#_rF{5#;gdR(M1K?9nZjb~zZ&UsA>Ctiq~R0-iH$uY-zgH~+v{ zRA^Ko4iB`{{`pShfiS0*{K%w%@k`>Nisy#;ong6-0BTpbmj9v z9m48kO>cFlN{i*sk%7IXgt@#Gv*>=*NTEg{b$9`mm-UVUh%pUwu!QlZ3@*unB=EAH zT8r(+GDI+Afbuh53CF6hf5#Tq4{IiJ5@fo>D&@;KxpB9UMen9H`mrmQ@ygu7c*a)y zHg8$IsXQ(zb_zXc;BI7J>ld!8`lx`z*eT!bw7DMrgwv+)r);>QesV+^isg?NSlN!rucDyKtgPdeAZ5%e=6IxPqWmUH^=~~&KGAuhnikg z^OR|+JKQUfcgm(XwVU~M_wJ+&^^hA4e$x*edFR1T zrNNwiet`3f0)4+ioK%Vc1_N8}cGDqH%INX0qjuPTt^m_Q8XKqO9o*5rkcU>=s(X_o z)xu*{dE_CM6k7S+T|HU^Mmt4ou=CxepG;iz*cJZS%k76apg6d-8Bp<_FNgXVnJ}_IME3Bo;{5%9iM#n9^2ieAJ4x(vcH!rCClBZVzc502u zR~3Me_K|daTSkBfOmbJVM%HF#PQ}2Zbqke@Kg*i&!o#Pk&#R;pu%D!ka#!l7q)@2A zHWj@p1YK0%8K=Q9pU}@&5`vXUfIN+U)6*p2T^WFgk)N)F3)=kZCF7R`q$YOW0O5I% zy?KI&MXF!KqeE+w$rKrt%t;$R>?8M6s?)tE6T+FrKxm}T^R1PT-Y;{WxQGygZuOw5f&w&g_38{)j zkB3|It&AEpxz`j!w#UD<9l2MHbpn&U81mIFT`6T55}ZSv&8bO}n{4V>_Q@R>G62zQ zP3mmq%)QuDM6r*KaT8&;a@@yssi*|m#V~X=Q554Ej&&-wFdS6Du=Y3JPE4n{kE22z z9y$diZ=>(4hQ?Hw%mAU?xkx+c!G3Ec?p-}?Wa7Y#KP>8 zt~o##2^(@gDvQXh9>ystUK2Dftp2H5*u;~n>^EO_d}2;#J1#-bjx=^Mc5aKKWIf!{ z(}Htf+BLu~f3At#J>5kIMSj`+NeCUc7RX9bt-vfVtQS>kUw(AX4}_2s@!xQPl7kel z{pRCUMsYh%f(INZKKH0Nal`Y(7lynz%ItEg9#7Bp;u$fL@0D!a;8w`vc5g46Vuu*3 z+dD-v2|{>ED{cyfnsWEBGRc!?*X}QrA#^RzI=H~p-rLp+8`Wq&b|zjd@Pi9dm6k@b zH7}C@SXBDpFF$&E#g8J&{|5^5P;i^m#VnAv$vKA4%r%~tdO~Y1Mqje%+=w@OBe!fK zzu9|Mmgfn9%jb}61mT$)_HuV^bOqO*5D-mAMwf9Fsfnwl?pZm-YYxuPu6dugnS+B? z`r~>YV}h2lzY&(?cbiJDkJpc(a>>u=SBRn}q<-B8c77^fOVt{LI{S|22F0+G6Qg1$ z0nQ_GuzG=u61<7r6eep5De{ZP%l)vfQ-9X1TByjL_ijX8Ay#(@2fp0s&J5B%dbyE< zaL9Vb!@d?L!vWn(4Dq&@?}fvM zl>B@b5OV1)Pmqkn+{BQH6RAR2zt@Eik$fg=0^feg+mWw7hhGIvl0zbE+`=?+S-#*E z6f^$JJ6(3)TgBhx+kCKDX?;ou$ZeBPn*pt)ORtyF2uXgX(0R`Bu$%)8q?mN&{B)tj z45f)c(z8$H5nGRBS)Gs-GV`ugNbA!VVE4=8Dxy{c;;vXXFVW#e*>u_EA^Gl;V@>-R z#U36710Far!87 zYMg<9-TKY<_TTv2efckO=_2QKrP;1$XnA+~poT=QBkzLlSOs&wT)1C31+i z0?W6kI-4@`H6T?bBQvTEm=lvK77fA(3oBkG6Zh>MZ@RSw^#;>DqCxyE5Hgs-vme{m&D&7It$MTdO`1KA-Lfd`ZUIFY%BS}%j)N^ zA7jcVhVAufH`1BS_CAItbM9js8P>yy@GDR13zVvOM=WT-dUhH>CX*^?@fNG=unI%Q zjIg>^dS#2%ebD`3jRf4#k~+DS5EDe4VcFKtusLN9-=|5JL~R7_)r|;Sr-Tjxa?r4> z;YMHk^h1Cb1gG^aQ&FM6TEV=iqHNi3kOC)~pm@~9W*T|w-sVn85a*z{O* z9DU#VYD#yPA-@sw7AeL!-*V`s09O_fpbdfsl`H3Z;i~by9GR3bFT3C-v)V_#X$gsu zAIl!IVC3DDM62kW#0nIgFM|1%n#i17_zTlzd##rR2y25B!8M1t(puH?7yz{(LGAz5 zAa;v)@a}|%W(eRU11PGN7^1uG78)Q^@1z9Z_E>+}xoxnYAth!s%oY*U3ozgE{O@}Z zTJ6jA(Z2DZ0;e;cnW>h}$+@1ic!A{!VZu_?Q`!+AT~rNNhe5|B!gW3oJW z{N%@@|0cmD_O}z86cj^B4PaB~k=Q<;`*T~dpL?H%iigng|qcB*?a&XjBO z3i2c)F7#d3=qitsof@=ml5%;iAV^Rr%Y%oobiYtmMmlD9WQtn5K@C1^&-U~?E@)x6 zVtxgBu*J9L7OIpg1VnAdD^NYJtQ=H4)nE5asT+Q;0E;E~?cg)0mv;*R(2<%> z3y-!93}{26hv)62r=YA`fX;|YHbb8bc=zo;O&15ep^5Jbu{j!>f-CU6#@E9z_EL z(HUh9s;E6I{iEOrCUWEpxp2e-^R39RTHn|Cj05XSR>|z{<@Qx-cSbBWG>>%{dJiyI zA>7i(d5B>l8DZMBk&Kz-AA=YmV$<&TF963{2n7^JzXJ`BkNqE!@{c-bN;U{|%b!fM zF}FBd9)1z61tb?*&R*^zFb>DdQZ9%Q1Fi10Sb0$HbDBw+&?i#ja%Nzq#L+}x_JBPO%G_raIzBA#IQ>_B09LS#waemRjAOLu>b zTQ%DoKlJ=+8EaRulwpGYah6eWq=O`3NvEK)WJ~FsuZ0$TctxLaEneo*4n-eS;tlq0?>4C*ntKFZ)3ODSZ`);-Z$nCPR3X z4@~TzUI0K627zo*btUHf9P7`UuGx}G%F2mIvB{@HA~b8~-j#Cj*;wAla4A)0bnx z5E>YWF-6PVVJh;#0TL zNk4;sp{HxkQS`$Lfj+hnjRIx5oDm?Guck^>K_#B~wU7p+SXY~6PF1Sd&K()&%HFITsXdbY)0;$K zX?H~XzUe_p?f8#z@94eydxr|k_t1U1O127Sp#Q}3z(PQgL_yf=KRwkNh%QG;_0OPj z97tOsscZ)eJ?3oBO+Nt7I4)hX)ZvYu8{JA3yd!{y!>V6urYS&9&hEt>FjQw;=yk%) z_j))lF|_uWERL_U`HAA$uGOzlm8 z(#xd0lEc|L2Iv;~6&#(1G?cwi%CYX)r;)61oM2e)0mwh_zxy!#z1Y`u1-5uevHFst zuramHee)ut@RW)f3(a-I<6U}_-Q}3*h|9jukI%jykYQs9p9{KfwmIlgkW?W7Puu1F zyt#JG$J_whC^uWE0ua-HqB3bMpe`z&Ux_J!83G+0dAK7w1!NhIqH4y9PvpSUdR`OI#S{Te8S^Y>=>v!J?T zR6-Ib^zDVjv-29=-y(0b_E`3I_rz{f^UTFuRSAg?nEY9P zx?vTgVC-&2WSLr;j3G1RmC< zux5K}>En?N>E$TgMa=~nY>pqsk#}82OiU1wVbM4^hxdIMS&B@e^9S~!7iC`QF!$w) zPG$vckcuBW0aaw#MesAg^+H z1{A8Rs9+^RHnZ-~i<9GR7IMwVMSqQFVxx*CP8v$%IKirC#tkJaY0i&EnV?E0t_4Q? zwc#7@3|dBT>GBcW;&*s@EW!ZJeCAG^w~7H$?YS_Pno6gz#(psq8iiLH- zp`DYe58qSiqf3)bpav8DT&}ypaGe%BqkYtLgWwuSHO;W)6*zuNVBRehNM!01l}^Wh zX^2_kSN0+Nwc9+|%K6YfP1_4Yi|<07XOdvvi@_OY3dp`JM>6jNmc*jRrO*RV zokl*&WGeMP9LR7-6Kyls{%QYDv0Ca;x}Ikys@jVb102uqx8uDEL;?+8k@)~(B5wy; zpHJcFgedk3+anyIRPLM940!m-P&?WJ478%1%i9Qgy>mB&Ls9*n{TW9lbCN4Jvb`(;eG>=t@vx_fwRwjy9F{%*vJrsqC3uSn%$nat+2 zry0#mUVnf&%j!n()vvPe(z9pRx}15_fS9@LRSbe}<%mw~??au0onrs5`2m||@a~MO zoZNd3a2zeiT^+D?qeKrUBekbhI4pDKD3%Dj=DC1T{->5MwXP4m9qkX@DM|Zx=~cVJ z;CZdM;O(S}R-Loa$I}dJx`WgA&E_Ip@v+bql}9}Jf$|WbHWDaN?BjvK+3GWEC{A&; zksy6|)Y+uY*(^(e^`l`#DR*Vvzk)NctIqll%@8}91z6d{g}9W_d(G&{kxxQ}(^J@Z z#_z{hvm|z?b;U7`2=)wz30f>?WlFS_EUm4Z$ zJz@`%W1Zzt%FQ!u+|Ln5Rs(d3z`pq@GjR7L(VD$7Ong`LzPdb{#UZ*A6^s}z=-N94 z>reH~f!NG`g4FkV0VfR>V#LTETLfyk_rYf2p^?eHKqR8u91yPQq<+tSD=FU%uxe(C z-a+_^w@O(T8uMFL0dWocQw%7N(11oB<@n2y^Zq0?8w0bX;-UM%}YW^focNAEIeoOXtcxz^$fE4i}Nw(*qEB+pC+_S$vP0RN!j5zA;Pt#Y?u zq?q|AH%CFF!|KLM7G_^LtydbyFV*Z9m9mb?2W;i!h}(j@Uzju~EE-)SQf>dx0_B-< zZUrFVdyPj148Z9ZW8YwErzanXC~QpdAIOaiH^Pl{We6ec)QkqmOFPJS6>x9wAOj%Q zq|VaVKfnUamH)T7vM?D>Y7PU$z^Cr=rt*hw$$cU3 z5PY(@5mG~iC2IVq2ccvx>3u+@(CIhB5Gfl!Z3PeaDC7n9otdzeQhY6en3@STjsUFs zk1oP1RVEe8Ql593f?a3)d8eIX7>)R8A3@=dqK5fy#k+@iOTsHI%iLT zE8Odt+~3Mb1Z!sX?8DOQL9(*D=|b~NXE7AY#s>1y?8ywMRjA)9 zKF`!BWYSxD9>pNV6#Y7e%6nip@+Bury{l86N^R3|B=D4Fc9Ox1}kMC%r zeJ;dNE@f{1#V7Bx$K_B6{nSDCDe=vgV21kw%|z_jwQl2gz{$ynH^0;47+Cz)D^fnh zl;y!9NBg7pQjug5oez9=p8X{oh{|rzZ}b&XMi1>DGD+Y93=t$bx&^R*F)wNXyAYVS z+b-X!w7?<19cGlT#dqB&#!0Xjq?2rSNWfIUrnCB z`29RE6us%V&6jP%^Z=|85&3(+Xd2bMc5`3^lnpI96i=O5w@pyp5Cmo$X#f$GG`&-D zx%ss*q71h68Yqr2Lm*bJn()3Cdd@;OLXF`iKoeGkLvP8hgcgZt-QOF>Og4CvM z>g2Is1p_+n7GiyF1Aa=IogMH*Lh}?k+W4x^W3H9=v3hViF9`!T+oNXyE8Qjr8jbum zkJceT*#Zev-Z`PFy#0|wKp!WV&H9av5WWxV-|}ZuYpm87kaKbaeb(h>SL=qS1Bf1-Vys5HKmkjWjySd*o_`yJbmcc`%2RJFK ze=p%~a$mawGqe8`&^3v)^|`6_FI)jxUuxD?=NI9rir zHeG_KtrF;sKKMwCq1Y)~yip^t@+skg5BP8S*nmD`y_^1(Gq685xKp$owiDV>D*$zM1Q#|iW}jE+0h+Gl zb9s`c>wl-Dq2(xc8P7v?`@ASi@3#gW6S19q5(W#QuAc#BxV^>aO}iHLArjC(6iIsC z8UqIz7)pNx*8GU}uMWGRnk^wF22|fCtlnK7Kp|#6LJbY?LqH))xgqRyWCgz5L!zUc z{@0or98npGsOFo5LuCvZBG7OU@W|VL%<+%L*%%-mKRWt5buag%?}?N-X7la)xptamvVhGwo>zbzl3PstyWUK6XH zs2pc5g9U&YFm`Wu6?hW}$&~W^H%;SY@$YM3T?@fS5#G{5U{!S0?tRHIk1L<#ISAYY zRsO2$o(SMqQ(d=71Jn*BpitQc*Aun;!y!_5NwVyw+~uWFB0B2403JvCSmei#x=Ex^ ze|W_1X$Vk!l4PyW@-R-3fxTvHC6)IWOgIMU060gX&xRi#WGc%!5PkfyLbx@J8tEsz z`c~TL!Z~lLgbIvmmMq)q!hYSf=HgCG$x6?ZcPji@c zN0WfHfHskdFi`7{9XT8W78zD5t z)oyg%U%Akm5DatZ3^_!&cK19GBa3_48vzJhV(^n%QYkIx6_57E*O-NVXNOoEiIrLe zqUnf}fk$AZaJZe&4aEj6pzxoK#QLO5@a`s8P;tH|1UOsZV<7wm9%!U6qJjX-(HTbX zDrRfp_B21`ho=R5u@U5DudHt!&>IDfBLSpaEK%?u{ss(L$3!prz`0f)VED8UlWqul zYd1$Q<>5I$&>K3`aPXD3|2_P4WL89~W})04cr`dSGUmS7W!nT$=A;qMps+{OCPxo> z&s!w1!$-0`_dmvJ0>txYV_B9kic!W>y@CD;Wx!eI`eT75#iEkyt_KmYc&jrJ>=I1T zax%UHuD6PxJIOZAe-cBwZp9Vn_q??P053phE*R^uf29D3sB zPt>Yg+Ovd)7`u5_0fSxf+Q>}3C>%g2)Iqz_nZeo#Ks~SNh-Zxgys3#Va{<(5;%AsA zCBt*RZd`ab{5G^Sf3z=RX#ag1VIuKQ*OT6ro>&onyO0U6qJ+AFHtpK?V{)K3D5Xqz z^rsA%@{qDmC%iAH8R(1%KDx}lZtxNU+&73WThO-VC5A+E2jR@^3{&a6L*i zy-1loT&hZ=6Y+DV-f&kO<<|*K=yW{E!Uv&zyPLd8HvBgN&D4Obp*~2PVVKFZ3b5bL zbfss&r3*nOzwTE-vmUo1d#9IU2@YSe32DzheKmCALR*%_6(r|HUjZ1M7O37o_$kE2V>xLjB%x@WQyI=QAax!;t8YHe&Z`GpvL7cDoSjQ}XY=ObBihLah>x zg#k=uZb8*=LyK$AR=W}Zu)G)!P~~`7$uDuhnI#03Z}vz zdn0eM?1C*FiH;8Uj5dvl{76oBsM*=U0lverKI01v=lq{HxDUMR%k0JDmSj!mWKF~Irl;O;~gu$ojBd_gx2X{l*5(l@@gv$eyOqDreyeWnS4=WBhJ`)g_QEt`LiievJ?r^@(0p1a0Gk8NHf$e$S9a<;5GtgD?K{$I*@8G!ri9mHL zEt}gg|1O%;^0)lWBd5MQ_Te6#Z6@cY_+T=MyHNlt9|QZtdFQWr`MASgUlT}+t4xP1 z;#aK}Jl}l(X{)P_@baue54gy|LA|@doT}c}um741=O9X=X2>Vx zLA_BuT{UfeOdo_NH|YZF&?R$3a3m>lgsorhxYI0d_88et>C9d=)wL!Gn35^I=Rtvmf^pKg9HAhEJloMK zZ*sTZ!5}X?3uSetX=D#37LMLBwmq^4VcDl~L12~FWJu3W?smhN+7->8dMUd>m?gX6 zRUqrGVkyxb>t2)ZXUt`;avFtSQc}j0pWM-=p+ORV9Eo}l$N~v`3gRTpY=6b%YrNyu zLd;6V#9!$(P-$WYjq$+PQ&lCy!J!S4PwQNn&!4prVIX{OJ{J4nMQHv*q2E}Zfl@j# z42UXu?>YbzNtj~lv5TBhT4A%P^>~d?9n5oa>%LkugfbXy#{q{TAr)+H75gyz7-$B+$ai_QFx0~>89*)z)ju^x zs~}9wG@W4qXHP%IeuvBW7^(mG;%wuteYPK&VH$Ru?(VehgVE2&#m}(?HavlBlAVAu za>(n{Z@~pucATxzwRiu<$9^``VeF4`o9Y8`pT5sbON0=98@_{-?6zsA*+pgntgSSYeK3NrPiId}?&O7vyG-ks8>09AxE1xI%YIWNh1Jq?6(BXq;A$#ZMa)({pdAy#^b4yG z1S6HHnyY2CntjQVkgm!~D+^3dlAb=3|7P^MyF2)n%00doCAecK5UFUTZ`E4kUA%`0 z=(z(OQ6F0*m6yS8IcB+l%J%uIN~?6u+{}fzlD@eV_6tn^UhllcIUD*yvWK7L2v(+v z)+5We)_A>6)H6#@JNPD7g~J9>>*Nbnq#EW4FjJCK@?y8pZsy6=U}L zU%-^IC_AkPFPZV2-lSLUk_jI*&4i>*yR-Xyo^CO&b5W0cNo3n_#FFJ+om%m#l zg`V|G325Eb(wh!0)KYih5^JL&BoFBQAl&Ic=xV($J__5!+exS_r22XIgpat)DRe}5 z+(OqW2^L8$tbaC;bGIiI5Qucov4i=Tii*Uf#G%j?hV&oEE;mUGGlbM0cS7}nKJEpQ z=g?0TuKyvSif(csbCY z)Bbq)?(SzWysU7IeC6Y#yc>Qzav)(qfWD6V5 zAJ`m6UzO914TFI0u9(L_6Dd&D7^3-oxUqo4eY0H8QMCD8Y78f|$hgmlL=E%a5AluV z@;5iqr6RQvtB~)6*d|{;AiQ`dFG!;U%iqWZSXjsbRo#fvq14AvtD{v8u9frzH#s(o zyK8~_c_A08;s;$B`lXkhqqy-?sR7K(_1xaE7A~casP0cXpv2(@ho3PO&mW+!zpQ(l zsk0_T?Pr{LHwUa`JmT4b?h+OvR4n*UUiaNpJAH|mBwgviDr3;_7F&V8=e*>d8uV>!ygb$|%D+@n9mCvr(*(|%^K1>Dh$;^Uqw5omDJf%g^G zKV16}PO6+c9vP!AIhU|;UAb8Dq;2eJ*^HB-(k^@rl?1gN zHi_c>zo}jrAS|)`bL~w9cQ-x_XlSO%J{Qq>qaT1>aBo~Y8Q^W;Hc{jTLb ztu;0!m6QQIj6nPyUb>iMm-yC~{Fm+L9U>s!_Q&&wiau2=K=NBi%7$!!as)38G;N4Iq%x|J z??O)?BcLgu?gRpgJ?OjOgI8nH07OKTEqx-5{{(Pc;Z91Eg&TTXk>Q_liKN?tjVt+- zV?7PuMGmA%Ki|eA*GT^EDs1*F6=4uvvIRAT zu=TGl>5yM1#$ht@4CoL&(1a|`CnN1JlRV?IVAy1|Ae>5Ad`n>@J z&FW1-&CP*Si$y=d@wZz~qu4%*0R65zvyZ^kH3eeL{f(fAzwh0q+;FlGdH7FjKWrn^ zKVQ?tc4FJFm*&9<+v!RP1;721Y87n^L27A|?c-*FC zPV!S;28iHK*B4W51usCer)Pk;k&dq|a!?|mJvbMlB5)t#{q4EeD3l{=b-T;gJdOHr z&R@ZF`&L(f*NH+_BF|Ya*=gLQ?}qpBJT0nc*)NGOBEQgkR~_1Lkxv*4DJjI@I8B}% zj#oR<81R1N@}1P?=+#E_82cLr43zXsVgX1w3ff@QstdkyWQ^gce= z9MzKyT-F4xsM4WQa8#2$Ekv>9pOPK9cPh<;P${DpmW4oq&%BUOXX6#|#G>zs6Ujko zB5$++v0ymhmRPawVC8c-o2fn^vq~dBkrssSy}hRnh*Vi-+3I~#Cy9p_I@tVA^7mJt zW!@VxSst`=(dc@9yL2T@JH!96KPy&(gqjB%a3agCQE+BfTQftU`a zCh-d~O%)>Ci8BpMNwFIx9IPoJ*e&a55|nrSLZ7BoG2F*}2x$Fan)~!?eb1qN`7_j+ z>m!Fl*%MPY6SY09z4g559dqRq`KJ;Z9G`iwyKD7y-$*h1$v9gg6Ll@R&jPu72TqL@ zJV|YA=yHNMq8XiX<56gpKO~u*a z)|-|$<#%ZUDycjZ1cV?P(Q&;s(oX6YGKJzE`MDGFlWRChd@)b%vjwH6*+2eqQYj** z#x_Fa%g%bRey-W6pz_XjtM}}SJoH9BUt51a3CBy(%Ou=S=8I0<^HknL@Ef$~G6;?T zmUlbMO>s&i%yOjo4WSJ}Jvve-l&c!Fk*;2W-kzM|<_#x5)ZQa+vnShjw zUs0tdPmR-yyHx8vq~y7nG$pxa@0EM2xqCk)EpsjLFhf^810=C%KAdi!K|Ndopb^WlCgJICS`mpK``SjZPEt(EBECS>n z6i*M&&J|fJ!aNspR!T`(TX>DDiZG1796l-I zb5SamuX*xSX%-&4X-x0}24*H_qy6urM36(yR?;gk^mDt3QpBZr<86fD&iz_Y<;X!Fo<1_B6~l(iayhjnR4%9U z%9}D~jOxLsXf}&k&JjYl!tQ70y0iT#2sy)B)hM}l#=WW=I|`G?U_V$_hd-ICA2v4ONF?t#`Te0Dj6kn9<+Rp=3q# znJc+Zw*SKi?lenJw+P+QqCM?t%}^E}E;nK0JXYP!nNX{OS{y-}Ritu7{OYMZj)t3t z%+iotUzQ#AN^qvW5t93_PFNG3I-bfk`OYnLp*+|A!npt1%ta#`OBXJq#g~DN&N#p3 zXZ8W~Dv-xH70jk?_euHkh63+l9#1kyYd=Zr+D{;#ILl;A3oH)Wz*D}AHjk)9licNu z!3~WM?ED^MPBPJV2CMg;BU4{9)Uv8=R(r(r#CV+9V5Fsx#hr#0PvI|@9n0Rh_VEdH zA>@G`e#2U#d$U)QE9qwrcN+K)t#(}WzIp>eo9So)?1@FYYvoFq=t)si!Ghx3LAw5# ziDfrpP8aFn9QqZ*VYT&xmdxZ6viKCmBGPSv5p#Xig_sTd=aDd8F6(=)w53eB`LP_` zR(j759+8AoeAX)AkFSP|HNYj;>1cxA;ij39`_;E%^giItvhSZDO@B>}ARc{)#UEwW z=OhVYXVgf{U8bJsoHMTI-Rj^e))gxVExBB{HL+{?cHHu|*A7Kbwub4}MmY>8t7?T& z(&+U6@>e*=AIRgT%&O*;uy(nF%V~O`b@o6s`JUk_b+YWN)p5rq%g+-dfm7TB>s?a7=K z8*ilsYt}~4eOUY-UazTT9b$f9073KJzh*9iF;Yl|AxXJ>NF=TH;O5A^Y}MM?Z;)gx z;UCRv2J2e+URxL{tu&7W@;1505P(sT#i25p*ob0KI!^E5+CJtY0H52ys`o$Qg=@M z35yAjRSmP%SIn1dTap_0=``}YlN~SO9Bl_^bU7YeK8qka zyt^l3XIWXmt^Jm~(49)Pj1ZyM^)2w@f1jcECB?L!Rcj~2qV4l?#so00N%3dWZC$}5 z!nX7_TgRu@R(e?yR`u^wTxNZtUrMIhg+jE;QIV_{&yPfvfWxr-% z$yR2VEf(I0dJcI{5Cjg-a&w3mFP_+Ic2ulht66HYO}>jGJLzqNXt{01`PYh{snpIV zb%ZW=wV{G)<7C*-U^VH6f6k@UJ$&!OY!9!8N^_GbTg?c54UQ=F#z#E4A*lwSGCnT+oPVBA_wTs@F&phZOYgICw zU_6L-#iLOl8m^TXfkPIYm-K}&=cRwBx>A0N;r~+5y%0P_ST%PN&>Rbwnm#HJL8p}A z5&N!_<7?j&|HZa2C>*?sO+7&veyqkhp}Vlx>fw)J@Ac;CmTz&BTqT7fu*GDb0W<#Z zv!7B^OxyPGd>vlmuc$8FCS7e+XZ(K%dkeQH+pTYyMoN?v5KvmWL&^YY5T(0w=n@7P zLX;4Y?vNCQ9AL-+q#FUrVE{?#?)onF{+|6l?)yEC_aB%8)^)D6ezDehLKe22^zONP zIh3}Ow=ZmLS)n(dvqJRD|wdKT^B)kNB3x&U`@*R+n(B|{}6CaV`-|=W63T&(k{PT7DGb^p$0?+M) zD)>_LkTo;f=bJ*5n`&t{22jdv(zs6RH7PMX8(M*q+es1)NjTr#e}(d0lh#IGGrmc$ zt&MFdXV%mW?kiQ0)T&~omlj+RMd%yrcup7zaz0GUhz%i+=o?Dqz-5+!I@UQdIB$)B zpF|d!uUjoJvP$TqqJrAWu|vMJlT_XeRZQ8xL@iGZNa|B0&iW4RxHDQC>XJj*> z1ew$!iiOxIE4GB5yQm&AGUCGvg)S3`;m zkeiA?J1!-!tl|%~IX$RMgNtFPkK;^o3j%52ANXNX6U%mpb*P&#cNvcapC^7>34dhu zeI*>rIyPO%ct*^6JA$DyayK{2DBiKbSuhVW9kwK!7pH5&qHO9gT>+{T$(_v%wRDiXLUwYv(Zm-*tu($w zlZ6qb>#07C88guR20@xEwsUy*Bl3TI9R>DQiLZ&ImHv?Xms%+y+L8)9-bxDBk88sI z7*kXo_w~=*56(#!;*RHdN#kkL==!iHT!AOkcTs0y(>Uept&#le`oltK;Mi@;5ng9@ zHB`jVAy8X?rlvyAzLdREsJ{uvBRoR!XBS+`m3eb=JK9d8KEt=CtZY59RvD{+?!x`7 z)Q08%^0k3l6!2c{O5C}d*a#^6;^7<}=L?V)YRxYRZ^kM=kn*nl40$n``a|EUpTooY z7yg{qW<*HgfVV4&^WM|Ot-2B6{0QwioY@H>=vE-12dO7D?rhC`=XanQA)3w^A(+pT z9J+{WSzy&BUjv1RVwX#h>b>+eV$;6EaCxf-0-QD}(5LokDVr2|)aW)e(iq9tKcNax zXGO*hg9fUzp0J33sQk1z#E!Ikr$qw?-*m~4wJO1hcH!H|28msX3W&`FSeeSQp>GvlnL6EUmKeul-#$iba0EWLrZU6Z;+)#-eTr~x&b$-J9voak&rs_ihfZ@DA zn)R>FBU5UaYEDL25|u8dy}49v%~iwGz)!s6(VlA{`Qz(Pz*L)ZFOJzgCx~ln@p$#S zVdk8DMjkozGRO?ZGL&56?nGKz7KUKGpj3;kFPQRa$S|BR{PPKlRA-0p`&~w9zUYPx z)})c)G}$+mg<2R%wyAyDtKQ=k!0F$hEvP^Al4vszG6LPIMYtPES#Pd zvoAsjz9MzL?a&g|%KQ4?jg>DU%dS1`7E%5 zMt=<%mz-S-ltUwy(r`mXogYqMz9;s3F7ZqGxmFXp4Q(!OW2oF;9rx$$ zUrL20``2&%rSn27g9L#9r0k~~YITdC88}BVRr@CiMnrY)bbF%ztm%^aNCmIKjVmY!VlV06>4()ne@9P~lcKn^a!jqAOi&9|Z35|HnDFiULbBUr>+L{P)o=#Ct^ z^_GkKH0DF9S98A0^8rJa`Y=Ny%K#I<(h}W@I;}lk(Q$5=T+4NLtBUq|?!0OK)_inX|(_Xu!k7 zm(Fq3W?D!53HUH|W=1}9=*YKHbLT^^8+6f=CGgS3;KFD*CpLGZh3@Vk^<;h7(F8U- zf_YK>FIo{Fnec7%WVakkReXIKh6~FzcwR*$y6w*8{$y~qYcmUXNZj7#_lN}%u9IoG z#ugXftD2CcY$055oTXD7$0wB_!`~iz?Nz2vCv0A8p0+$OfYePKcg)z;v=oX!k+1F|7L|+ z6-19Wa68|7eU`{HY@;gX{&fAdiAq=L{0iO4sGbpn=3gKhpbh-)D*h|JsYfDTw@KwN z2yT3wfv+|lSkUAlRbg`T=_=5nvD9(|M_X+|CgjSn z-Pq+=Ue8DvlBwNNqXntos5aUY(k7o6b|kCq5m9>AzyjLr0}Y-JT9_<$#Dvhw?}A=Y zgjg8U<(tWFl-r}U79?4@F`j23UvVo_Tc~f3Y}H5c7En{nR-fkblIUDRZw<+-pY?Qo zibQr1~8`>c$CL-|?0=tfiDw&)yU`u5-VW*)<_an74P`n?6<;NH39+llspwA^ z-690L{-_8xbkul%pN2@{j3ECbJWHtu!`T0~&q7!vffB7MBNYn9!RuB&jJ}K7&g!F! z0(78}V~&8`*{X0~O)c>2zVgG&mQyUg@$fK)3B?`?8H0#263X;ghy>HXp;dP<`t8lg zB-%D9{M%Up;VasHVN9o+pYz!^uBpG+&nl-CC)!ruF=}s$_87B*i@CP3Hm}Lb_n=}T zWn%1?-$rO1ooLV{csIvE=AAnIr8bLNBDRV?LGSYv|Eh^^g|$8@& zZvOaQl1C`{p7@fK7jkp4LB~TxmS4X0e8e-%$W0M6d{41C^#!z)$3C~Db7Ur{EZpew z+Z3A!``Tzf3r3-zzwxqVEHeQi>pW{2R{R!FZB}h}(P0USkYkQ!~k;in$jqm&e;q`>JxB?E`f#gxy%tg9#~B4OmJdj&{Uhu~m2~P62c-xYmf(Ur3*a&QTTQ z1XRg1$lLk(n45LID)Qv3V(;CNINIM(#tiIWUT zM$M1Tx>jhuj7xC!l+JC{P%z?z2NmM?}uzkn7|D&Y$CW4=E2j zj=hCoOp1=LJAFP26GFuNA31uPft!L`lRTyIKWq-A8Zua3jIIpVM>ax6RCWp$EY*X3 zQXOmY(zTG?!gx018qwIJFBle*RtF6EN}lR-v~O~m8cS*bEgyw(wb5OUHNy&8i#_81 z{`exM1p|;T0*eJ3P7lG%HetM^Av~Ry*#pIlB@5@JlLKoy1EDZ%*Zf%{g!es zzdy^1hmmHU5m}}gR+9uO@ruJ&+ZCEkRr{sYwr}#heEeJQql?enVot^j%W6@w)bj)S z+8EmYYuHx+5D)B~!%n$(kzrRbLynm!mXA$cSZQT+s58(a$D;yKEx4M{*Z$`YaUQSk! z*atG1$0V>;k(;?qPf0zm6gT^>l~6ju!paY!M3pdR*Lbj+4CR))HO?T7N8}1&+o}Al zfF+pam~lUS%187rX_%uGWU}4PDHhBH645e-9X!Rywz72XrPzW}s@(xu{qS(nU$=3g z&2))(BHVV&npVH@qG_Ay{z)qz{TyxG$tG!#slt-W;fb+S{k=a%jpX?6ri9Bk#%)XA z*epG=39DlL+C;3zLSrAd&^sn?tBuXK{RD4QRgI((p;_f&xMF?WBu$4^UAx=L-wnj$ z1Rxkh${E59TY;hRum4wZ{XrV!`#-Y)fNvzMm6`>u8k5=~2D`+o;6$fj@ixv2T`-kG>H`|*%)`EdP6G|p(zIxb-BK81-9d?L&7QxtJV<4o={X^Ry z3uHW2e~Nv5$gP5vEUrK23%B!U{GKw)c4%QzKa1O?MXQdS4@7@ml*VHOBdfc#MlHzI zBZ4X%qtCD6hhnR#InT%1#a73^awUKFJeW-V?lP$UoA$Vj2HiTVhS+&W>~i8liZGs~ zj1Ihm4XCY#yv_d|m48QKetX^+FWqyr>OZkCL`k-q?s}8_(K&e6AWo^MGBs&rD#ZYk zC{}6pahXUJ8GRz4&xn&&6X9BZJv7!+Ds6O#ZQ#=@4ODoo{L_t%mWf-O4>l+#1wB>cWY1^jp234DZP=c1nQxkmc0a4^mYyh@e z0ZzhyRF{89`3d-W=PH2o&^kM9-%r=`V)Vk0;hOJsf4ud#-1P|JsF-jHR{{|?0Bzp+ z+RNzFX>Y&se7K=TEmn($Wq9X!x527pmGhB+59>nZtv}<>xs&jl69&xfb(P^*Ev^q{ zOYdJ;592Sat3=>zklb;0W$JoqUjHehRs%$_!qI%mEE2Ha93>S{+1Oj&GS<{IA^ew{l~9hxcqwnO_RUP@Y*+??Ha80(DIrg zv$JR;74Dq)Jee_C{0Wh`kAXC`uC!b(nwH8tsIlzY{lX@oQ%r zsq6MqSIAgRQFK*`b}%;J)rAbEN|bIdHKj%CHnCEL31vMVRoS4WcnOxk?CsI2s>I13 z$v~Pihw^=gsozoJ2TrXem)c1}0kpq=@oW(pcv{-dYsSF-8`moeG+tkF1Ojped^j=E z44`6;=6A-R7ZU?f9<;IB;j!nLQ$W;EWaoBM-LrAZ%A!p@-*A*^8(fylVHcaW>&K%D z(H~YHwz`Brf&J1aI1AZ_R zTj9;__ibLAxC|TqFCUkBZ<opio1!Hv-6^?t!hu3%b zT;wlMVp<5peFDQo?FR$OooN6qq|L~Z_+{RHb(E^3r-;Fg-a>HqZL?2L1oiYqMT|nH zk65H#O=aE8z=2WsIVShY$mk-j-qPZIb7YbNSmyUnXs6@gdXnL_m&wKD3C0Z4C@?gX zkUE0{j;1}gZwBvAB;)aY#5hz7^P&QahcB_&uO~ezKmPN^_0rjPMv<^uX1a=MJ-g|% zTvH)GgSIq^$zt1sF<|&{Ev{mxa1uvL5}Bme(`omYzkB)cQxSDnamM zv-w5L(^vni^ZdKU#$7V&yIr;Yw9OJFxihm90n;5vc!dj$R_I$yRCf-hT`sOq9h}19 z8<=+~`>4aUW1>{S+B>-#-rs=b_;rByoT2f?Nk?%)DLik@qZEpkCBA995Gbx0sU&0Z zKx|_^DHePmvu6k0=noI!PDt;VOdTsEttT}fk!H=$Ub#N26*$;s+hyXJ8uMl~Y6Z@^ zCGc2u^OuW!m9yy-fbAT-W~P&rqkDLkH`c5x<)DF@;=5Z%kyo$EY{%G=Z_m-{R3NzY zb2&K|m*Wv|75TP`LSL%~gLCWj${4GIM`|eD)AE-9AT|3(l@O~R6&PHuBaIIyr+T!O zc@y0_>|cZN$DpCIVO~G1_RX|-oCg2Xh$kJ?T1Qnv#|O%WGfcUr+&TguHTcSh?%`j%s1=P#fOBv53d~^8R7<~++ zAo4+jGzXfxl0|BAB6ln4C1#|`*NH|m$GBa;Dk2+Np^}NoXGCi~0^(1lJrMDB`DbHF z>6dA^IcEZyhD~n-x>;>)8r8t>=S=Gx$}I!FNQ@N8%pU{c z6Bq{)(B0&(|B?4@sUEi*u+O}$QR1ec>DLczNlJ4+V~QB~JlOl8b|Wlq!uW{mWe*!` z*;g&Ws(hh5ip#x*c;EVcb?Wj9*`F zQe~kCME{J7f5wc(@i>Kepetq0n|Xb*+`h*Bq&!3|Fy53}I?jGwKn>!3Dp{G5+bNB9 zPKvmw@ce|E)ltx7DY7t?dvdQ#uX%z`;q7T%bTqbOgTHrCJJloF2oZFGRR=T{dYBU6 zi5B7hRSd`Qp$n$MhBKe9<;xOyV)qef%HGr=F!q4BwsEkXB+RUqe&3 zhYzQ}MKwFyp0uIvv0Hsch@91H?0Sb8B?YLd^_<9He}0U zCJYdMSN9f5;xx;fyxqCyF$~VHDZf2$Y??M%t4O2e`?2=)eW9o~#$^m0st8%qgy zqFC13HpH9kwR-0!a`$lJX3N8~zEwP(oqc3SCZ`rxRlo^Ss-W|$NzuM|96*n1g1EGd zRy3bqC%jlY)W0`Y{PNTDs~7Jg`&XA;i#+u%)F>?xXVKj1vvm0W)J?AjS6^A(_GZly z0FIaXmEj}h0#zE7i$^Q7N5(&h9CpkH_cTL~;{KMqK_!9-y3&S*z%iIRB93|n49clM zJ$^Yad@5JLi8?zX9lxG)9T!eY?SFu6IXH^p`x3o{En?zLy=soZn7eowcL>tchi>sk^UU7!JuK4vvQ zVZqNZ@!*zzionRGrQX3D=AMR`5{I`G@Y>7ldYVB|Th5>GtR0@gcFiw{%WpLrOGxsT zbVLe{3mGQY&Uq$ND(z6Z$tP`&$4FwnUb&3qE4AQ3v$f3uEx8pBr`L-o?Q!*QFF(e# zKFhMLC?qLVcE|y3ZPfsVP3ey1X0qv>u+aUt%LRzL1u&BzpPFfeq5uOR!k>c;wxlBF zuC@@bwnaSa+g+tfq(M-AWUNo+HhHYSNG$=%qb1}z}Z0E9`)!?)r;u0C#O`{K|UR} z>e>O}r@G9@Y9DBTQzj>7(4aOUfYe@cBrXYbPc*D%jC zbAKOQPe74YbcX-O%m)cp1^-P^RocdrY@@=(G(XcnkT;EdSaq3QEhS)cznI-L9&xcqj-{<9k*xI>&`mN<8SMk-)@yA7l=8RG~ z-n0c0DnOHRn11P)oJ^lCSl39ufk7eFShQ!e?DytrcvVB?IDf@>*Bt27=bKGSX-WD# zAtW20TVtw<>yf<`_|6~TyPf%Zd*B+au9`9-F>by#u;WS7*QMz8{{KNT6g0AIZ0_&G zkVlUw zI1=z}ivp`zA(~0l!<9Zt<#lrR%mapvMTwLBPBnm0F&P=7C@`IhRLtDzl#gq)BJQw? zoI0FC=Rt2%%A{T)TYcp(9mb;X9F1B!4HXvfc`bGM@;DTK>}H-Roqpb0KkE*5^)~cU z{_u}pEqUEWl%|F>D7lyPl3-AFI#vH>?TOc~MCuA`K|9XSRB@EHy)19Bp=M#ZSAsJa zPwfEh!@aTFt8P7W8?rHJ(N9)I8oZk~LXJy!ZId>v=YIY_&V$*+HMF0u?L}(YaifeP z|26F;#@Kuf@m`f&7IbI0Th(xs;UH1)sggzAb;4|l@EjSXl@#cvr{pSOi_Yquh7p_a z$<2XQE$@1Q$U8Lwu+sBP)0;-QA1e1_EyBN=t zoWA|(O`Gl5OJpa~{@yPwv^;#pHre5BISd~GkuudXhF#klxe+z6iI0`ej!T(h5$&9j zo?r~6jR0CF4lomCF<#dhA%aQDDtrZ@jd}IoTGl8N#Q>ye>nUMg(b$_LNvxQs+ZP1J z#7#TIJau^p%j_(m8kxv~HNQyAwlbjtTxaL~!Ttz=fSdj!~D)eJ-Yxd^ecMX_2 z%DsC&@U+F;$)qnW-ihSlG@bT*Bfbs{*1bbW|E;3a$YFao@0I;;-2#>Zgf3af^;Z6% zrxf4j3f?yJK?dujJ$mS3sGtBr^{&9?G{%}{@4YUsADEY|S6+TkukbPIQBg&k!aM`E6hy&2&gHxI0j z_QWKWE{ZM~P5%dU{OwUXjnA1}3+NVvOtqW^q4D`Z#~A&%g|~L|$v03-?d1_8URi1l zn9p`PFGJgr%P!C^2{bkWADl^hr#s>1VT`Lv7My-B z(Do8TI<1NjrW3<4rB~hOoH!%j{A0LsV4L()n@g%Di_M;%X)(&fZU7My6xkU>eila7pNaM9c#VG{mwA+2eb3KfVj^ zPsE?H$FB9a6mT)RT?bX0=+IASl^)Yq@KT1OMfo_v;6K!N#~5%2S9gp(F@EZpP?`H7>UP+n2{O|R0EA+#q)>Sk zV}u|1&+}KrCw$nf-sMDEU!xhGluZ?~!%h@jrh3x#HI!$;OFPFqal{jEe$X<(EeC_| zS;Wh_AWx{!eb49JjcxR_uh-baGcR4*uIttK=bqgx4`w(*v|_^&m4a|7PYxDV>$8@1 z=`R_zh`iI#^R=S`cIQR;n&jP*pF0SyzfN>?Ncc1XOmnPL=CI1;pZ|k0v;!u`>*?2z zAW8$?Vm`XG>Vxp1 zg66%NdCX}4+l&{Rjw&^a6@)_l!WsdVn^n@2Cur_Y#YaALxpHu{%k9VLxo>1zp_@mFd0()6kkOamevEe0>XH{M7u))+;w+mk*!&Jj{&F zA9k0JioZw-cR*TynEEEz0Nv5U=G3jsKdZ<{>>LwK`>24FB7yH@pWDQ%)GfsCRfT8# zo*M}|9NR~Gd>DfY3^r=xiD^NO$BpK+WtxOr1=ZNy&4|6StV_T{LMKM%ogJj%&U_Sx&|1O0r)s#^Uv>9B9? zQBKo*R8!lB6TnV|T$B&xo|-uQz3QT&t%aZazWfSv`zfCT%RJH|kmMRW{cuc{7mLgZ zIkTCIM+qqM5cF6h0s|i^gI(9kqGy+5_J!#0jasyw48wlOdQrRkne0<6Xe-sCwSd1B z4)9TN3vZIZcB!Qwi<;$xgM`zbS#zN$HvfDr^kYr&!R78^!GwK{R6#u{*<^ zEE>mL>`A?JoX1*kSzP~<0`V%2RjumUNoUgS^=1{C*R!!I;)NoML4KP59({)FBb%|RF0P&rW_it|8S(+Efl`NtY`vuc%Lr`;RCh$2}3+=SjkF3sP zFqp7_3G`<_k^l;M4up~QZdU`%33Z+w+r?bH8y9~hc{zU{Yp7*CdYX@hpTF?K^sfpy zNH3L3(y_SPjGtvSz5>T2{>E+`Jjcw&@W@*hFLxu%cB->xzh&Xk0zr}_o`lb43tDMo zJQ}Z>gu4Yw7SlvzSYO2-#rP$=c2wgeyN>k?ENK#azjj&ZYRi65JplWp|hDszxi4(=cL%@a|I)y zJ4Qq`_M>t;rnGTiYc#{ESjHL0Z9E@TM;#1boP`{32a7Aj<-KI~m11L`rBBIXE{F3Q zxb8I9wBQbcE^xc=O!ua(z$Sw-!kxv=w7@XZ=B(PkxFZ&0QeUD9A$1i<$4~Jd)t0PI4qG8*&TsIFDEmO)mp{X zF7Gd9yU;F8I>F zHGVP@dVL%UDlF)2KN(`SNQiUb>>e_>Z#zWXBe`!v&~0TF?udWVwCp!W*7(I-{}FaOb6E20V%};;%3l~=W^xsLLbIWgQ>+bfq@mjdPm5|Y1P4?{>0%YtbgMQUC z$!EQlcQbmef6TM~*X-h-+rM@m*vaKF^?9BA?!p&G>@7R}+UA2}QJMav#65TIR134C zI1YooKxTmeMmYrO*(jYHx`X(-?g4F?->zX`m6Q~o=q(Zw(B{EAP@r4Ppr6sn-C#+L z^ubP+d)xlm@Q?uUAsVZJw8~T6#UnVV7<>Gi86ERg8+i*l-Sq@GZ~|4G7}R8d(l=Ys zS98MfKbr&=FAC>C<3aC<65 z+r0^y;<@`<7qXSK;q`?dh(B4s=F7mvb{G(I+|2A}1&RgH>r|yX4&_kQ5IbqK9#CTR z(v>&4VVyN-u@7h5Gv<8xZEo>9$7fB>ss)=%DWBlkaypr~?H$f@qWeqUdhb!_tP7x#U=O-N%`CVEbyMV0mS+o%}HzRRTohmTj zTKNR2-(mkbiFfNZqhFs>2wm--mh^t!dUx`GdH@)64}fZ+I)6e*ko0A-TS?jmxOX7v zZelh;qTuO5gTNd{(_utaK$(aB6XRb(tSv34pg0x3Ix{cyi4xv^ z8qC?NrCaQ|TljTAEq#+$`D@DFMzc93!d~{uYxPJx$bz+fVAXwp?7e;MqGXVvhf@h! zw^99L{4Cacc3N<>jvZ;k38}2l$KgI;49~bUik4FQK zMB<4aE`e9ok_z^Y76)7XswPdB^075PMHK%i*jnOOop^s^&CvA9^Ichc9w(^NKH)xV zi8>9o1FWNrpgq$gRVvY9R$3tK361z^r3c_XSp@t|wfOk$WBz9r0H9s3Q^mm!cd+|? zVUZ&U;PMh29)Ve}bKYVCvMRE^8IMs#ah5h~H!#Ci0W zGR30mi#2;#*p{_nUO9$oQ&=DZIg;W^{)6-R8_d0) z81UUPn@o2-621JY9WR;wZE;2y)jF!FUuCF}F#!?}hk4AoVYeaP+i83u!neJ^XAF5M zIjGl^ZPt9~i#00Ld&)daofrQ565F%^k|K*w|jbL5%(Gm6f7E;MHwkG*oV|Rg{fAojS)90lw1Yl zLDt4t%`TS`3+YS5&ze3Gx^R<&N4N4?y=vHsL2=02hA_eRzMH0v7`bmiawbL>CmqH{ z7><{gA|H0Y`)6qNXjXo~U-b7E9H(d;*ujsXd_0g7+AHq-TYwDy%j>Iuq_XYskqY9^ z^?V7i=+6g6a{v?b*sw(IW=`7&4w37(y%wIQt&u?^r&)vxeL-*e?&7F@_LM~e<&c_Q z!gZdivPGRAP0+xf@z||nr&XQ)F{yvbYlAp0HGasP&yhiqAi>|*4)8tX4ee8z3 z3NAcW0^EwG%oB~|Q+MK%%z^$1$QZS!Vl>Hu2LZ9&zw*i?ueV#pTdvT4BK+MvAgPo^ zUsn`I&C1}hPppz81uT^6^Yv>Ig;mvDh$5D>#}7iV34oWdQ}zv9)_hGa^pOFT0l z+6hm)3(UyFXdgg~s>nq*k~RZ?@`e9pUba3wuyVOGFL8fbS_fN(>noyVHNw(oT8bBv zf~i~N=%#qk7QZBDs*hAq-a$x$SHfKjv_<%kY`6YKQ(;1;v(5_xOD84Muc&qZrJzac z5gB<3L4VpwzhV-KaZvGg(u+$|mmNO%C=?TSA3+Fej)h&{ACrn9J{WT+vHVAzXPZ4IA+Z} zK0!F>wg2%s$f?Hgt;9scWzAH0r6V$acYyfY>KL)9JN6z;uaz-dj+T+f~&Q(MnZY>!Vv}7Td&QL@qlW&7tNS=fV3&d%EOOJGiH6Lj^ru%^Cpi$N8_tM8IH5j*=z1 z6_kPb>0QTJLANu+?rgl7&PvCc@cd|5oDrwDPTgqJ-qQ@E&`#D$gax^Y@6HC z{e`OTfPo1>mVSEWzF7bsA+zMhuJbuYwIb~4f&KK?F~FoWE6**C^P0c$GY`?h0>5%p zz;N^Ji-qvZn|sUcj4mOZtmFFHM*b5_7P-D(;B{u8Hmvd{>d<;;?F#n~pp##ERASWA znCbrQ1`Di|E#`d)7h&U@(LCe!oZq%&BP@nhLlN33wKevLdk!}?-yO2@WVQx|cQvP8q_#`C zO<-j3SX4dF(0celyD6GJrbrw_K9ECAylK&mIOurzKz3RqD)@QNoGjy5^F>TM&sT%( zzU`EwzL-|^G5y2#SZctgcDqC8DsL~e{yBe5tvq)1O1WsNxLZUD+n{En47i*?qS@uC z^M2nh24&>6#&o+ayEXke&H^RGfKa05k8IFYv9=`df@KkisV}68oxD7wh2QH$?kx>> z2~(O{M>0sKw6iVu$#0?4b<5VlDvAc2Lgg%tB@J5~?%4C5Q7%AB`QEs`o)n;614jd- zjHa#Mgm$Obo|1dHtRgSow(|GQJziSGo{zw&c6@klkR$Zv=O?F@)tq%^WuK86u!No0WVS>4>Kivb7!0>=mDRpS6D%?&BwZe>eXij_;_gw!#&(_xFFHv* zrb-dp$}+?MHA9*nE|RkS8K34`ClqrHByS-K9B{)3zN z&*sVRiv&vTdJW%RQTN8bTx^?nz%oKy@|s+6D;h3y0Y%&3^sUHQ(Ud`U&6C%U)e~@ z4%aK#Z~UPArE(v>v}fRYST3rODK$#^u~8qE3Lnqt4g2vq(!5~zJ|O>xp2AZ^D~lAv zrG=a@fsVB_Rw(!%5hBLtpW971V>Jb%KnDISv*uVRrUa7UZU+6yyH?GwXaA~96$QoD z#yP#D-5Az>M?|dySyyx0gp}rZ97Hl))1E$P^8Q8CopYp>H!y=APudC%5UfUS(fD2C zP;fLYJb4`9s^pzV)M%_oVJLB}AB`?82Jzt~vjwpZ8X2EFU~<|{d{npB8`)Yqb~KUU zHIxAj92(c(hn8z_V3KJ>RmPI=O$U#c`7uWYu<_e_lWTf~AvzO^uR|iZRtO$W9gfx- zG;)YDZVO|uY}JrDcXglKYat$0Ht+~S#$M2z>$^VB4~|98iz|qgj`dysE>qBAQV;@l zR|l(+p+Dq%%O@?W;1OhDR!EzM-3?;hUXnvImU00(KU+`Jmum1^lutEN*%E?+`{;st z(Egsu1jI<3DifnPCKncZ{ygpdhQiB-|I#5O9~AXkCYBrdiTsPsNTwG6553G`U0p>- z%=U-S@}vgqR`=;2b<%S740mZHeeA7uL)3jy_?X3q_gV*0viRJ`of4$hyE=IK>42i}ysMH^XN>KGw9b`*rX$KE0eWiu=?7jv7EQ3F#ilfywx$eS zOE7qsIx20h%aXdU537JD&{g)BL;qT#^!t;PKg!!M0Rk={!n7SoetJE&Mv}yThPq3| zb-uB{J8*CpA(XMgpX&qY2^82uW}{139$ev$4=;GvK3LH%aLIUn3;bfk{0S$Q7(z--VEIZ7pcqS0sK z8xpc?nj64axUH4zWlM5;|dx7+C56qpL*ZUx}-ZC-461M2^uj<5rnXTaeerxF#9K9IP2>t5K2dzZ(^J&;f>2g_-3H^>80W{rY$>e4eKVx@YzqAz!YK$yz1&? z9mU90kpHz1+xI!Eh~5ddARABIapV@B(G-jQ*8Teg$TSCt28suP<-4CK7f7FV`D1T0 z_LKA2E8t?4H#HTz#J3H2q8_6hinOY>Qa7auFHV(SrkJPoN4SEa5T>h~bR)m1lKfjE zY6_2Idbu7R9jEz^?G0JuhxUl+H(U>SO@}iA1a5$9&5Q16^`ykl4CHRU%C`UMdd`~; z@+LKGeX5C3(Au!+7w0{lafqj)g|Y0!QSuk7)IHK{7Hvx(P-fpPP0&Myh5 zN(?%ua4DPs-~k(SyhU0qER{d*eE0t(|BxkGrYgRor{~@}LYv86i># zc7n>Tw+D5Y5#$lmF*)T5vAdYtjaW4<@j~x8clMeG;(^z*KB(+8Iy$yYVfG#!kW8@n zr{a89;hXTajuGRNE^nHq(?}?t`?Ec7q;k&t#J=J9lg@rr{Z>7c90}juYjX+$K_@0m z{Y31rXd{3P^whw=hnI}g6%lt{bjkV*zB}iEUGuUL1`4Gg2vn>9Y=YE0N6_UBz*1g` z0q((wVbulTprFw8yr}RD4vW0d>CacApQ=PuC#ROel}#&DT@vHC3|PhN4IhQ>tCqs| z+&Bs((!++QLXsWoSSYKA80!V-&uI51cu}z7W4tw&`!}@~7+If^~LVWNgNcCEj(dx<0+Iu%!qi>EQJ*@Edd| zN$0gi9^KT=SW0k!J)O?$?P}W}8b<|&7ux&@;T%u&dq0*W+or3zt~J7PVOdgkWL34~ zN#-VPQtg2}x5%lAZ=l-Va%2tQQGY2bqr2N;E#1M(l60W0BK`TiD+052SDGYQ)Ft%B z+yIB>zv(cb*z?R0a-NOhWLKG!R(u#?$%caMJzDzP?T%j;)_8>8P{;rWF9S-q$v@;l z-pFskofLB`pkUqc+rO$%Z;Es1`O*Dp1DkjCCHWP8iMth=>T#s)4gRM6&V!)c4cwYt zJc;r8Sl=8bww1)q4kO{PF09Q!TykOud1iVIg@gKJIu!jwe^Cz3_esPlHB(qrFDAQ^ z0(ksSwOgaDeeY2LOG$9Cx{b<97O|)LyJGh5!9P5VA5|02IfV}u*9+XoQGdI@FM;s< zJb7)$HSWdEctCAM*$ewfjw}R~8Gr&NRB!xzFsl~*gWU~|$;}l}p39_N5MoCUZDG3L|^Mn!)948PdS z*gKReFqEYn`~hlPA;gc|v4OJ|SHGYLP`U4|i=r4MbkgASo}0Q_pX1r4i*RR48U2y&Y^;8%nP}b*gDE6ddHO+jB ztpT~!)aV({q$t2Mm(dWn)k?uG2zT0JSdi2UiYh#^X2hHLCP8#v$;CE& zypGoQ76t;;Dkd(mG`a6Tnwer)o7C%D!B)yD@166?+)}LS^RfzMXS97!pu**GgLho3 z5h;OE>AcRYD*|-$=#+NmZqwh!9@Jtm&&Eas>m8G+uqU|yyf>chC`p7%n8$Ub|2ZKI4cTA;>nPQB$9K*QXZ}93$8Tu{oW-B{ZhQ>#k!$+ zbmI`;MCC<7VsNqKpD1F>GnPkYC-kx%DmZFwHr%-b+BCL2+8g$i*`Mt*N!hy;?zg2k*kD)Pn3L>p@<>RmtS1tbcMz3^m+koY}=sg~` z)s>H+DbLYzd?TM3W?MpO3-6V$Vcw`nWvlj8kD}$@nDo5-_qSfM5KNmzFjcxIg6cf0 zw$0Ir=9RSfV#>vzDL4T6etUgwpDTry{IK_92(9ww-bTGhX7+c4YM=Cjb-VF?V?Rb+ zzQ5`Wz<@^A2-w&k$5D(X2dGL1&U`Beq;G(XIhvT;O_Vf#@{W%yR$FTprN0MevJESe z`L1#x_C71y>&=fXHLw35R$3V} zy?rP8jD7UT8@8OyzSN=%QnOd z|K{^wvlWOGLN#i`Pv)7Ac|6Xhv8$L4^VpIYH= z$jpv5pBP(EM8BTJ#k8bxZybfRB+LJx-CFs~`i$ z5EDxegD~|OZ$uCTc2AEp%))?^4t!?!<{u1$H-IOM4%fflcWQT&aW$b`y?*~sf{TkC zsn8s?Gp1uD^Mg*Qx4OHo(jFjOBdVF|^&4DYr7us`bYH#Gz(+;u!iN+0kpZc*_t!=q zkZhHq!;D3br3q=_tuf?hA=a(2Y~a#aE08}8cy@$XV_$IrKLmv!ZU>gR#Rct~5uK~y zZISerMYay;c86X)2s2NhMtQieGTiU%hMbt zOJOF8a~Yj)Z_=kOZxlFM=DVr8W{q!OJdJB&Y^OfQzSgGzpYhscRkC+cA(R@0+PjNw zFVXTk{o0M~T(m!RNTX1}>nX917KhZzQvGM@iZ3Vl)j`{hEm2pOgj35X=d=sxMghMV z0A8#GLl2r--h)jl=^34uuc;R~bo;z-3~kUym-(?tGYs3KDXyGuX7kx9N)(;I z$5}QU&h-J2JcRk;>o2}gSOe#p>>UT!!d4|4q&u~d9gi{AB4fl>!IIn?pridkzgAwY z?Z%Z)!qKwQZGX3x1=Zl|FJjqLOmt#AK5b%7N8#IyQi}qK9-HVFne;KUZ~y5~yyIqP z)us5Tzk5*1TXy=}`F;DZ=yufOouE4g*BKlz4%bU79et5WDe1Xl7b2Qb?fBS;P(zF{ z{od3dNZ*G`3=19Y)bIvxh8NQJFUqfK8m!8y^ZkYUi~5t4wVN*m->trQZe`XvkE49W zJX^eTg=GSL z_P44RRm3mDfF8?B^9#fYEDmRFwM~st(~&nu2pU>ca&Ulf&2`Oi3eU%wGG?W=ZDEMp zICHY@<8*a>9yh>m#9km|v`Umj^btc_<52JKdmZ>M-Qpxc+pu-MLAwpM>s8JKYPs`# z#O^cY=Sl?9-wzoYCJ4Q&tNos65c;9pm)q0V3HPX+7bV^_klJ>H~!?RAu!^D7z)M1$VEAnkScqGHr6wXm5O%flyq zUcbJxFe{xJy>_&~pK#OrSm(YIChl3V_PJN(%Thv$uGH}%?jhYAzW++lBCh|#!&WMx z%)k;nN5yQ^%;QHYzw8@LYMK4V&j%RlOD{*pt5bw)aC6Go!N{JVkXK=~cxSEkce`#| zK87p^MC#%6>As%UN|Q%E91bc?O(b^V{lH{r@;pU%jUhdTbHAUHetj8hNhFBhb=uGt zyWopz{q1T_xAeo{xWD>kgj=40VF7(Mw0tdAXQh*~pu@0wK(;?EuwcR|kH}-uPDV-OT=X4`p{0o2$#11-msrha(-csijI!Lb+XCB9zm4xtod4 ztvaXKTD&2jrJ2(*)<4dMwAWyc1=_d4?VjjiT(3Q7n=hWq(9FIHxi~x6sGa#pmlGyYONC+I zCzm75pr`#PxbRb&CL`$$tpvb1k5=V`MCX|`zGml6o9NI%L^Eb#5>rYwAi$}i z_;dRGvHVYQCbY-dNCE(5*@t|!M-S=#FZQ>^-d873(bk;!2K!f=aEPuLdtQzQCxKM6 zR~s*NWPG{Q_ZxbZSHjnDGcfX?VPs@7O=k>1L@imFgiyMHxMhFM)JBQB#g5kXAd);E zX{G;{ONBTawYX}l#Qw+xf~sYcCn$wW%NiQ@Q@owCu68d{__riRscdTU_owf506e2! zI;V-L>NP)_l%J;FEM`4-rur89bo9Zu4wNd)6F4{Yn$5&@Bvtl;$r=`y`2Ag<;ik53 zjAgwj3t__?lOXoo&@(PR(0^ynOZp$9LF`59XffoOwffH>ytftN;_fA zw%e;vQxm)bR^^c6V$g_ox zI#Nf#L?r92fF^9wBS>fW&8SUmhcwYit8d-m7JCWg11^sr!o2$>2ba{he-EN332u32 z++K|Jao}+h&)$Ax-K7m}h>CriJjkLF$M~JZpsCe#c8oI!9?pJ7@lKzb`e2Oq2fU=x z-7eMV!V2;Z>5sSJO^TW@+AebT(AJ>JZ;Y4^AQ&TLgqLR==&1a9Xje*p5ACdC<7u8^hiHawtZYlO~&yKChIj%wG2gT;-NZO0v@ouY#Uoc>{3n;F*c*^x@1#N9eIm`qc}^>|#k}WiEvwxYH^3K|;%5F`K!@ zyI(~Hi52>^nXz>nx(a22LwT8l=8zT%M>3&$$%w4G($xFf*cO`|>bt~#g-EyytLSYV z4X9ch3N2T(Z&M-Uz&ZqNob0o@Ue>;4v^=UkxaF86H4<*(qO)}-;@ukU%~RL5Mryau zL-opD?qovxz6QKGpU71^)rV)njmYNlN9p`Z$}Ns(Xqzat`dbAT*u)#(nnb%MeXm)# z+*SM$xY6;lpSIBBVN(Qqh+{zPvg|-awV>)|UW99FS36&AZfhQn+o|Sy?KGOroY^Bj zjS!s=C=%HwnOfF0b$wEE{d2e{PHscS$>06SpE`jiQUQ%|uUpUhH+yvO6$r#nj!bdp z{rSAY@5!W2kYH{Gi~3GPMSC_h1qA$SLph}XBs zHw@>kQ0dKlqW7%BK~9DVG&wdq7o(ul0wG}pzL1@>nbFp8m|1=qn9lspEaTKE*5cU! zZuA97dz~NeokJo5pKNamw&u39+)`P;9SQ2>@|ZNG_zIZ~M0)Nl)FlO3A}hqf*GZyL zJ6LTAM+`R&b1BF;@x0;2_XuKE{%TP8gMr5O+ymRhMBeK2D7V-Y`xYbEEwq^Wol#4b`mO+FN5-w*r>bh7A&}3WJ&H!bUf@^wzuBs`7kL@)`75^@(oeQ=)`cYf3_$8 zBDTjydJk@TfYF?9)`+@KSe2{2%@Y4|OQSiLu5$9c%9#&wlx|2km*$icn=Y1An?;u2 z5W2$I2+q+`f zXO4|mD6q?GV-V$B2j|`{ZpvpGa$nm$T^Zx8)krV2lCE2#mC$H7>TIA7L(TaycjmOs z^S_QP(wx&VSvs#rFd(B3@=y*9>kAH7KQ#;&cN*@|hW`Hg24zNxHTJhpkXRTO|Fip; zr&A&G5}!Cha5Q@R-Ds|P>oXc|`{|l?RYO6Qk?A*%mccet`?E7uPMH{5CMUeqs?T1Y zIDF3pBw~^>lSBjAD$G6jiA(g+=?~@k;FHWx-=906ilG5HnHlH8<@m0 zHAGmh^S_y%ygi9IiH#{GT@c80kkidf93Vr0UkS|hG8SC|A~iNFkXdE=bpZj zu1SdQ?z3`09e=xSkMNUMVJ0ci?=4L&A>OCe&?*aWw{PmlnT3N^p$)}nU9CwWoa4FL zwVXRLi1Tjxnw!>6SfXHf4p{c-?EH#Ag0Ywimax98xx&d570KZh(}9=FVdTxn+cNph z>>F`x^sP?)qXRct7?ittx5=q{(OU}p51K1{sc$d+Z-R7A7Qx4_<9xqCcC`{gnzct0 z_37Pwa>^*DKEFk9U7-NaN#=bB-C&L9gN5`sG6Wm;O!HKTA4lKtku)hI_76_Gi%b$6 zDlaQaAzSzVn56uBYTCjBV2`GrIwSr>Oxrhh_q! za#rLO-E|@=ocxoh-F?XN0p_2=Fy7W%ycH2LQ7L?-g4Nvdqh~s)F4$pz88g{sHQ*J* zNfeX$inDdk#Z{OdxC6iiqqoElyyeNzt&9EiyCmq2cc;3{8FA;-ZCoDjUaqCw_h`h|gr;wpBCFisH@U#)NY7)~7 z!PHGB3&Ig%orwA(J;?645T2K=aYmKHR;@^re@U@ep>a~AFYwbq++b=qr<~7XeaUSz zx=8hnw^_fwi^EF|B1&0Sd0DL55T4kb^7QS$=VN z4=f8EsH9C_Li5dh_30F3Zq<0cWRYuyil@_sOSfD`n6PhNI*uf1kXBUG0g2#HlA zKpg}FbNqZ4FQVV`!J7W!zG#wTX=peRyR+SmbaQ%FGIF~jycIy8h=cmTdGOr`=M`qZ zYP4adyxXzBURnC*#y4h3z@%Y5=Hcq%g9XQIZdAK;E{8cm!uLYHr^Vq7O7?sr_xH)( z=`2;xNWHBIUOcJ&sBXlwV37Le;#GyL)?jrX1@WU$#z_QER}lTp+9}vU;1K+ z#k~EOQoQqInCP_k{DAeXpSH=0itJXtxA87)GZv zuvJd_i{0g}=0k>|7BaFVZDtr>{=ZKuAsdIgHKzq$3?r`{;_d)PubAIPg@)%UChw6z zy|P2`E3CI{03dEZ*B03N`I&VSy@Q^!5WiYoqdM)y=MIC~;_ISz(OHmukXJt`oQoi2#mPHG1)$xx)j>F&Aa8+s69IEkIa9S4sX` zzgOd-kr01Vw|}HRWclwB!qMuB+3BX43nQ*K zOs(?o3GXn+z>zFI=&Xx3c?m1r8RbDi+%-NS90PIUNUMdentm&71+SRxIiCNO4n zr+--si+|VXiLUj65Ulii&|%-+MeZS;0^a3xQ18}<%ZL@FbUsu%>?XH#$cM}p)>tb% zgbpdMYIv>Z<@{nS)iq=J`H9hw8d&>V#{L(&AHuB8oJmo)c*^>`#m~-i($BrXbqNna zit7>#Xm^@fu*|}5Q{RV{^Q}}~6rVm3yRrq>V%zD=D`L1ke_Tu*v$JdmWS|c^>)e{~ ziSW0p=2H0(w15t-f=NfmpNJ7B5WTQ{;f+?2{+3Nz7n+EEscfS*4$syPusCK#5w!jN zA$~J0esb?)XOY-JA8eL()FNvGC#zMdC%V8{Tn3(K;-c*rlSwq15%mrY=)7T?{~ zt3TZZ7Zzu23dpWPlpKlG4)@%gLj3}F>w`W<&04{$%zE>n*`G%l12O1!djp5p zidH}9h4cB8f4++yd-(z~hg~9oaP6RXO4mDY%m0i|aS6%5Fap`QSUc{BW;GL{2VDet zed*lXf8j3c=OW)yjH6RhIpuA4{Ow&PhVk50>#XJlTgQBoX3`lW`XYNj z7c%+80dtk}#(xAFjX^|HyonC+$dPMi6+2vO%3mp)V36~k>f9w#ul6iL-}rlay4Ft! z^?D^?m_+}e@aMD(DqHN;W-n~dX-RLia>O%< z>U^o85gEKha!>ST2`eHv%-7 z8SCPJi%xAQQIK(iK%M`nC}iF1o${r8Fj`ok4pX@%yM(K}Hd)FbZ|6&>e+BAfOTS#C zozq00KowaAW9hlwW>gZ$aExU?x0d$ax4&We62PG~t*bnd=r9*d;j05OKksJfRjzKBsB!X@mA83y`lXl= zTmPY{Z5W};$(+d}O_j^zi_qn z)Bk*_$7$8nVaL*H~}Y^Sz|-uOf8 zao-*F?%`-c!YfuL1JMFhiQ-9jnu#TyFI+fM`S5!z=6?1*59R%w!_VxX`f`gO)i3r< zPt185IEQyr7n02neLnTMT5KHEaKY&gq00$r-!DY;1-+lSZc^5V9uy@lx=sXNE%+6{ zU3wF}Esk49lJ;f8T%$j{`SL}`Z@ek0^Wcp2v4j;#I#ku=R!JfL`RWo9*i1j^>8w}# z(y!vsQk5`#zyuYKeOG9mnkA%HtN>#cf!9@CrZvtE^= zu!l3mB4))SjJv&NKaS!nm${0YHG`qEw5i*4_|hHs`!>Qe$O}*;InTLQ($GTp+9lZM z(Q0;kVTynNi+f!Q%zem@W%TND7+D8Kcy_N)?&SV$;)Z0!gjY3cx48VqBoB^egD9h( zf3YMG>o{b1m-Rw+fEQ0Y^<(8{`B<$7K{l^cK=lY`hQBo#n}TEiK%xF`!QxC-jrE}c z8MY>6+~(GFg{cqLj`Z@7j(K@C^iNqT_j>xt4?*A4Ciodld4?TctU^P{s*Xx>g9xt2 z2)l6o?;GklZ&TdgLUvH0EhUE`Lln5Iw$*yh<9LUl6E~~b%;Mo3Y{)&Hjac}-4odS^ zpIa3%ch)VNemj-tLUhRx8sD_2`wh*+iV4C>AKXyMx$`lFZz}tF(1n_6pR*pB&H@d%Xrirt`1Su z4G*rp`pU)7&QsT|rG3u7KqP#-JFRr~Iex(WD<^-^GpAn~^ObaDO1|E;uwr)oZNbw; z78uzXIIn*@Rq`hrl1qQuWI`PDqz89}wMn^m)cK#8Fs1o*b}2Qa8?#VbK9tty8{9bE zP+%Ua59v!+b4wW}O0jZ6y`InWrxQIBiN{hsoI*?-l@kl`c%@M=$n2)xZF%vC8SHZ^ z7y6Kl;bJ*T_^?JG&8DgN}Kk&EH(__cUQyDpsy`B>%|YY@y5tk+vk1Lg|!RbzBGIb?F0Icz`ytOfP~SDfJ%i($JCh z0kbb9zrfEgI3p=(HefI}RS3fx?R3|OHTvtl$5zP(aPO=Bt^Gp0h;o|(iNv@!5sMqH z2X-Nvt-x)%{*5$N6 z4e0`PW;X`xv1H8`Ixgw5+$64brbLS?-i=7VFUp&M`WfXedHNYhxIA;67KbN$G7-xJ z{UP-k3wo?A(|G(yn5{du}eU4E-pf^vc}auAgVe zrhNQOkLjD|BGIFqy%smYP7*IG8Yq6)cFfi7FiCf4CfE^(xg>PNg{A#|d;|6FDekci z$)dX!u31aov^G|ZnlerFSL*Os&-H!FSit?+*hmnRDB09Z?86T2-6>;ymqq3YnV-v8 zz{Six;Fqqm)jVXGu`M687*LmJvN$r)5qUy8Fbu$?<-q|S3OqTBF@JvjP%`04cnfDk z{cZf{03rBF^hQQtTv{h$IDpl~>5K)Bv1(~k%1?6+ZNw|OEWg3o{yL-o?)oNed(Cq( zP!(}qp)a$1&;20u3VZvA2jIZPuY;_-b4XOn3HFg&?4x}97l{*Db*EMtOC5n+!iC1; zS)O7wYZ*M#$aPDsz}4vK)0nx~iz|?k7mnrCCs=AlY|_+W$A{&ZJJ(bkQ>rk;qDdiw zvbO5)w4}|;*gn{h`_)miVuF;^v`%7pl2@V1_1S5n?pzSem}t5oKOYC0*`(f5WXjD= zZT;wP$}!*@1nR$z)hGM$Gjtq6vS~6$j+kZ}-pFKLjhQs{zg`Zh4_x1@>KVojLbu=f zfr&)p6)pAx-f$9fviBEH4OxI0K%SqoLw$u_9$9H>#Qydf`x^_keYH0*`j=UaEksNd z#-~47&Ml|DDXb!k$NopkY^z>zocu)fH{J6=epJVAS?P>~<#wzP_dmH;_6U#0Q%RO8 zmp0j-q_B>Ljp~$(1RUqa8wM(OIM+KtNTr^7KLbvRAlK{(tdn`2h{)c4+G)OSP;p4? zulo9$+}hHZ!>d)JR--w@IdjNe#}eyUd@AnEw550gz@#bOpwLt)jYL?vmpVbEpX@B7 zHrAHR!=N}r_iVySJ*o1nR!+Eq^i3eE&GAxsLS*HnB!)Kh{y<&qi;stE4u!) z3Lvf3RVI|25jr`O>1c)8TC8uGBq56wJ=G{QeCIVfGr+WD8a^W{hecp|)W<2M&a>7= zy}>S*_ChQk_H-V#}hcJizkWhapfpVqOE#;a;HhD3@tdAK#~mzTqP8 zr%8XLuEdQUpslvD5}rrY-h7^a+KbVN-weOw2kC>oN`g$3E|6_n2jV>Ao$l@Z+}0VZ zHC-(<;7a6(4k3^NUcLVayt%9VsQ^5>j#ANWDlPTQ!|q9%561%+bd41fL<8VH=L(flFYUW0T8Fs@(ReMEMy&eyBrqB?+5t92WHMw zJ12KBcC-XwA#eRtaU<2X>_=nH6vT)r31X`4TGlQb$>Ht;u+O)d)uHWAPUfkJn!%FPP22t1K7E-eaFQ>{$?W!;fe~!T ze71Iz)!<&Sed)kDLlhn?Q{+;_iUr{1`ZI^}aL%W6R-6kLj6CH_g@tz*JpL1(a(P9aQ5 z+_ipjxG?h56TvWZ19SI=p995h`}=x3kI2Jf!~UDdZoO5AIy;K-tl5Z&otv(V99-10 zWcB&VHoxVAmRFyv>oK|X4VmlA>L6ORXrUj%Fwiu+>m3ZLYB9mJ6xmzB#bR|vQZ`?) zA&=n^9UO}~rw{_vXlxmU1|89PBvKbWOaACT>O#&NnUH7Lsn1A#!+Suv za{!j4@tMb&`odk`8br}@5Mmr0U|KXgc#O7na$3@3%%t7|T3uY?-*aj{wk<#NI_&QY z6L;P(IVx+0R7sH}kPkn;>XtNdzb3g~#@c}R8ZP?RxLH1u_CLJpeiRNq*G0`74KQ-U zf$2UM!T-~6@6xK`|6R}tCI*(SqX2(s&)#1OgMUiwr z-XYqByk;Z?-`#?(q>Y<#_1bNM8-mCC_Pl2J%i=`J`OtmTj@&0dRw&B_B$1)8sz{U7 ze47=;(`oI$VWMsW<_H4}_ny)B5y?#TSDy~HAU5KtL8HYtJ5S4n9>l_%GK`;*r@c@h zI%^T-;F;D)m@-sR{2wiVPUYBuPjw3JH$kT_qqxx-u8!q+wUDufg?5ErmTFc$-WuCx zLxhW(q_TcZ1qBmRR2xjozMAA7*Up*PMC5N&qN+e57<%;Tj znrz*x^>tREA1*5N`RGRu7H*`LrdJ!*-+lLgd^ii`4EC_?`gR!dPCWar(lGcg=mEF)S*#BO)fiLO zYHX+{Y&{XB_J-f$bwwas3zx@p|7e_~SIqToCHRZ7#sT;IE3d%;^0hpCv_8H(uESJ0 zovdYkRkWOk&C!N$R@HdDju{<%V%G)d}YQBjA8$N%P6idMv9D0 zweK!1fuNCpl*FyK6dY=?GIg$HBeM>m`Uy$TStp{@~eFi(WfyzUf0>OUp<4Z7Oa;F}`w(;H)J9I-kM4 zwuj|_oD%O5bSA;=0|q5!G+r8k*^>Sv9To?IecWb--Y@jzv%iSNnTGy3?eYOt4 zkJILHES-n-1Z0Dv@^{6E8yRTZWi9ErB|0}H=1EoQHF5nV(RL@@Ws>blh@Uty+=8oV zJs{b#|M~UK8nB{lP*tvsV!qM&aYwS-miC5aMLSd|sq(Gtr$#+nFS;(fYfsAu={}kA z9bqn6N94&|`y|%K7n6rF`AP&YZjoy#Aw z-v{`v8oKmi7mi|RRTGNi{u>RYHpQ?8?INpDMX(NuU*#HUo)fd(Gj#ubZ5K>#$=ikm z@ofv_pJC{@N`JBZ0mg?tE&ePa%K5CD_xLc#hXA@Dt;~S#Xi-@NgKkLSd3^g2p9Q&> zPpa_+g2rY5PGs$h+!=0of=;C{_&a0FmkW&|Txhp&mAOcAc{z^7i`cDTZBt}UWx&>D zTckiwMZk*QWM+TPUny8#+aH1Z2hC0%gxuhbDBrjIkF9kzNU){{ZR6Z%Iu`a!*D-8%ZU<*wiUDoG8@drL8#Q7ab+)|9NsKu zAn#-0Ya{Ool;exHUau7j1y@hYSPrSkKX*1&JaY`I3N)FX7e0_4p-scEcVdTHDf}$1 zI6uH1-@&Z%9o;A63IEjbqzdW1%qg6tlkz6q+R*(??2+G!-c^IQ7xt1~#a}Z)<$d7( zA0qnHI-XhWA_&*oh*rm3k1}$GP!ct}b0@P6n~}LH(*qd`-g;EwN6&Av(D7&*G9lrs z<`#~V;R~DQ@^Vm`YsD0le$&23mI>ZU0HgSEGe zk2vEAks-Nu-pKh({(sBk@9?1zcT&sc^Kjc^&RUD9(qVTt-0Zd@*c9}K?$i?}zLw17 z(8ioC81$3?>GM=i+HYRZ*HRSV!HVh~u)*A2VK3(SDdyY$tWGoHL3k1rf8Ji9k!A|| z?&sR4qO?mhtoO6@Ux_O;EMCH1H+64A{zz8+=-1;;YxqMJ+8(&g`UOnO{OA*}bLIgMv#SliJrwnBb4fFJ`8!%?H8Yc%p@I?{m;YgxS6pHhCKAGXj{iF z;*-_3#`ejnB+-Eu_1H*~8r5fPknytItjqSb8MxDP>6J7JU%mi-$2OKrJsg6%#Z@!a zZ(e;Gg1VOKr6qNqBpL3~j*EKE?iOsfm^%IYoWdjlu@JT2ozr~qC=UpEIWbH~Xj%w@ zj#f)lAO|`7dEU;gwAVdVZ+B_m>F#AUpj_|v#-j%$k^h!_P8m8BVn(=b+Um{9WmOl(uO zc6Mab#0k~{o#YYPGu{AB&mAj2FSM&K8RW{vTev9U!D%8&@bY*GL*j6jN>S(nb0o}$LPxm{Vomc_K{S29)|cS!$-@jw=!n2`QjKu-;`wQ;%NIdRgKhKg<# zcrEn3+&2^2n>gSJ?7IXgYs=4aJ_UBXaq|`p;j^1kDgWz&c2Nq?sLtk#Ab``U-qklu zkhI-3B9V*{+v=>l#_#wo;x{2skz;^tz1&1nN3%poc(kD7ReAe>5N_7|fJrHA#UXvm z>HyBd;5gM{;yyEnAc z(HFd4hW^h0Pfl)57W}W&Q~Ss$-O`o1=%6S?suEHhXYhK_ANWw_C(2l0Ih9)|(<9#O zPcqGEmzG7=u`DU-0^hlalVkZDBH11+aCqmjQkpaIWg3<3PIdJ&mf(5^qip^z?mMa2 zRD|eqp&nt+QK_D5w<=V-LiZnR$XNPR^*g@f+Vv$=quVT(3W^34f6G4H?hC?0 zK!~BUB${5^2PBdtq9opn%%xWuYQyQW;xZE2@?Qz_Sk^~A`B!ZSSRVS;!?8d=X}C+sZx* z!#F%EJqIi23ZlZYZ*UQUY4^yT5Y-3S^IQ~CLw%S>$_P}QR9`x=xL)lR?9mQ|{4JGZ z(Gb3DVVX@K8{bx)4YXo#U3^Q1*(Y$oSQQO-t!Jj^qB=Wh9wnS!tpG`wktu zBn@O{2fW?0)<0|W(Z+!#IvKJnO(wnos%84UIF4rxaNVHs>`dA7Mh~j*@&h-;5+MUU z2ZqES&Bc%FORio_99-mQY$@rPHyn=HD5_Mn$dr!&ha+}C|L{@~2NcQ0REEpq6L z-*TUb6koqcGW!b8mWf6Bw)dH{L4@&!$TNsI!X0yPYYi2LW{RZ{uH* zx11k~#PX`De~F#waUshu??~wC)~0#_|e-4*`NgZ zLGzHEr{5bSxkK;);SrCz^{`zGR7hq8EqDxBh$~nL#h=NkV_4tl&%aJB5W-bTY&7{Q zMA#NrgDVX%T;xKtQOU*fKieQ|e-mAzvXZEMaAGHRc7zQE6GQ9jVRfw}ty&f3f9h2o z31fkc85`EpN#j8`JjDE%vw3YcvXR-Sv#hST)n4-x>~`969oSdrTjzYX)aarX;gUMwqhJ2`>VtCLYvMcLq2pZSHv*k*VAhGOnA!fHOdEoQytd6gTtM4N3 zpc4qx7SFTh`+n|ZD$R;vhrTjU#w^*sH(A!_*b&A$ndiD*c5l}JdRWf@1HoI-Bp(9=k;)Y^ zeK3FJ^-4Eh``>{KGy^WeGuknd$gB?Jy1qm1&pK1*#X$07*jAT(W(Mh=WPK_+UG|-- znNIwOnvyu){vuBH&B|biN_%rSe#eG=?@TEE{1&rmy^Z^R=*G{?9~QoRi)wmTi72tQog5jtXb%DFOKV-p%nfQsX-DZLc)5e1(sxxE z?q9in&b84xroX!Q+{vu5l`4w{s8_V`Gi~Tw5IMg_4Sy|x(^-J=Jz-h@A%R%p*AK&*u z0&3obf4YO8A|TnyJ|cE1X#13;=-#)zkdla|kTgO?%VODYGb`rb1{}3HgsnW@gfGek z>NQ({wI$nP8O3R@!gr&qeWYDyI&@PB-(B{k_ORHiUXHU~{g%xBwRKuUPWuwmXI7~M zO`xP<+%|g1PHdg72|Wutq6fEoAe^zLQmF;2$ZSf_F)-)TXtlU-Kko*)^@X(`U%SsA3-Z3{D9fv=ek5L-w`y7Mae80Td4UncP{PIz# zS=1^-r}|a7;R-h@X^P>2buU>Y;9M=+)TS4?8o>a0Q%;B3;z(ljBT+Z3W9g-rwx%Y; zL_0o6R1mT`N{dMwXlIKYVHy74i~s-Us{H;>Do1HZyxn}kNVD^3ERA_caZTWQG>(l_ z&8<9{|Gny6OoN;4cf8Y`Az%`Q?2Qrv_J^xcnjd$U$ee%y(DS`0k+cs~DFCv#*bWtr z7qYV-%IQfBHR5MRw<0>|UW;z9;< zmhfcaby)9Mtsoi6X$h8u!={@X7FW1#9=f!Xf%<8Rg%8s9Es_Czn0{NCPGBsYTG7!0 zICIdbm3<~mqu@qv->O%w;WQ^M;~|P=L-QNu@O}mDtIIeBTNWFmQ~e2&M??aPQCja8 zx_p3{_bz1Fct*j2lS0i&e*W4O$4+9>hgD?Dq zQT`OF`O75{dsNqfb7W?^vLxCx^A}Y?-VS2Xy#zvZHGD~YeVSkkB|mf<1LLoS9l^SG z^xPF=wpRhdl@VoC-*G01(53nvb+34Q)VZfq=rs@KGD!@1r zO#v;E=%Nl@gsy8H8#pE0=9 zZ7OSK`GDCjeLGW)H2R|^2l5lw;qM(U+lQhlTApI*cG$E0k^d-~Vh|-XI${NTq)O^s z$qdYV{7z8z*KLVO`wt6({)Yr!q zV3|Vm4O=_C5%s>s=zoNl{Ym5~0vQBcx$fT`k5Ac^LM%(3hw#@-svIt#2(0A9BVJxJ zFcCjQDW^74cnf9u=xfza4j4=+J5383ElC&;qtz=>m*KZVZaK5yoJg3GN#KuYcER%X zD^A-t9*7wolbVypyYB$w00RRlpmm(%Gd4|l!nj#H*}vE%Ohf@AwCxi6`8)U|!K9Q| zU7c=(uA9wX8__^+p>&e+jPXrU(3C%^K#d#kFP+R(p;6qa(D11sSOaj%=%Qw{r>f>E zH2vX^w$gtKOf$Du^be6+Ri`gg&FLAq%8%aC`qkLx-zq-j9Rm*P#u$V4Tu{jGR~eAa zy_{U34V~ZaXm_RBGP)dcH;8L$clacKeE0v9M0`~ zTabtdk_ZxlL9z-{KiQWgJCZcx}%ov^MEz0P;k8{rN ze{!zZCm&pw{p`K=D))V_we7~Q^~&~~pmJX~ezG0$260RhoqSLwCe|>`4 z>wEX6h*KKQ72l-SQ7C>d_MIKGdy3IGxtja@4J|dpBBfQpovQO2nxN6sS?%ckLJXBa z0GcxG>P(^h*;A+C-uU=PO&J+EYV=6!%bTE&LJ3Z9}_j+lc8@veQV{4(mfDnRcLqm=tIg^6~+a7xugqeu`Sy z{t%Ezr2MB)c;)wRma$J}`g0%mgrcU{XIl@Dp5z0Cf{cur_mRZ~K}(LBZ?3mn0$(3h z94nQ7^&_29aFUf}ByLJ$~x}(G?^Eyi|L5i<9fUO)PD^Ysg4*5jk zcAXp-FV90%|+so z?I(equiwm-d3Z>UG*aXiF6_)Y#=Xd&g2#c&{Q8eiKl2df!SFUPoTRcO~=U$*HncL z6K)5xGBTd53|R*G@1DEib+1;9dlhYhc#RWNYAYp*G0&`T#liXZO?pCFE!K8aPrn0o zWQTY0eS3Y~PzyEuCfEJ<1!IyjX3a%l-WmFo%Jp&p^j!t=0X*88rixAfyZ_Azsz<`X z`@ihnHoS_!0Cxn8u>hXK<>_5byfRU>+}M(n?8vCZLdpJ33fa#bohf&YR?2=tjPZ7* zbqXgTSJpM!mneOi?sDj^QTXpolU=c%CY3$FJP`~Vr9~C78>?nA3v6YL+sAe`#;$r*Z2R8%L3jJcGo^Qv$PP(O3 zRw_GR@Y>qtqhF|E2{X&m?c&_|9tQ^$RQpq9Ri?Sfm@`)v1lv0^z|2`0ciKmY{d@5$i3OREMMQw_84LrRp9? z{Ac~4h|%!LT9Z!^fPeaX{cK#RK9`x~1>WZL(RE)a-#lex_;+uY@aIr#^(%2~y%mOC4^KPd{42015h?_6>E zMQefY(;{D(@;@y=B@E~GnvpSU0cemNa*n;Rc`~&t0rX*{*Nkcw{^?^k21o7IC*2(! zmo~@ezlSaY?Z?24P(=xNUU3>p+4ZL!@b2P-e0waQsrVqMGJhd?%ajdKkqB=t%v5P} zNp^P`IQyWLk-?DCXt8EYa53&qF^J{OYU`AhL5>RWH{jX%Hi7sfbxbt49tu~F-$(Y| zH4aAGMV+!5GEfg}IEb~^>W2+Kng1Ox{{**+h|GHS$#9}=h~&u?M`2$YMn*TMns@cs zZ(D|_NAAdat#xG3wfqx$_dgF~{831Nl!d8f*L#MTb2cOa2S96k9R8kP`|A)`PR=;T z>WQWSi#pNa84bognmf*vwT|T8EYu2BZ_GP*Fq5@=ekdEM0zfVH^mh31-j27>&(VZ) zPu{4W^))zG^qxh%;Pv5CDb_0-hei&rJ_; zCyNB30QV|mQw{CIbLy|?Sf%f@LIMP@KdqyY#{9ki+<+i{AS?i5XaG#Zt)$*peNq|- zYAfCXeSXa_C)}d#JO5e9?bVVeZpU-?nhVxH0i&s}Nj_~Z=p%_jpQ+3sIya^O)@df} ze(4p}BWsL%PU@_wp~OV$fPJOKQNH+h_N}D7w4XtoTWpQD64xR& zs+*j8K4O<{$3IQ=`B9byZ`w=Pk8B%IfcM*t+Y8lK4>BU`LCLEe#fBbz zqXYjl5mN>^CdT3N%YH=>u`is#%F)&4N1N9_J^Q8&bCjw;WXE(Wbti31Jb6M4K-)@O z4A!2`Sfw7tc`ovkbIrX0<=gj-xnLI=E#YYs8V3?`9hV-5CC&Q^0M9FO{&)5n-T0Sq zpTk-B21Xsm;itr123h$D(2~HDin9N>9HMs&>&Mn6_S(16K8nZ)ruBwfw&9sO?cC1= zNdOS$s@TcI*t^L7lle1-ipz^_J-K-b@^Iizk+nB*Mlpgqy(G1hecVDbz0F5siuddd zF#Q`~8ff8Z9Y*Ocbzz5)Zi>sZOD{J+@Jp9o4w#GwKWP%ihDYk9^M^3bY#ndm_b2?OnaQ^9h(oM9lHm&%7>6W=-C8TjaOzE2J~Dv_^PMjh<>vj0$aXcTM6=g=Y7rvqQ znkl$OO6m9Vn+)SUlVa*NJkX76VFzQRPfo|`l@nm!pk4x)M z_%a~elOH-MO{dK>dKT2OQ_~AnM~sZ~p`rEX-X~?SV?93#56H>0Z8Xm0O$Q0j$*m8> zbymIK?~~4$H z{#-mIY%Xxgn>=cE#b}&+OF0qU6_JV)VxTCAsY0y#t^ky!o`|Q|_D)w2L1^BC8x7vw z8o(x%qPP4*KHb|qX^BK^GM~|und>San*&prDA4N8uDzjg@}@B;U&0@K=}(G)=StKv zJ5x%}#2aF28pXeJC=B4f?bvzr)xP||I$bY}oyHNl7DTF%eNk#CVerB~#E_`ruNiy1 zS8vQla8(?8nKa=SRbb4G8j50E- z3ydZ14rl_A?LDKJZ+Qo!Kp$JoutHvGvOM zGg_>a`Fymu)~`Sl7VXK+G4yyZXlwfMCLa`F{ntu{ayDIVW?7BU$!8+V8HdeZlh$fSHcL!`&^}A6 zUC63+!#LFv*vscSmt_i|KA?Fc-wFT?T5WO#8Kw zo7&AEFmNVc`xK=~W%*`?N&`|U5cGZA&Tie(v=Pgeghyh@|IQKY(#cAvytGy(VrPrY z?tWPVz9e(rW?ymfFoCuhXjsdw@2DC(3fo>-GepaXg*tt?js+7@&Fk!Dkg10>8Qbh$h|7 z<5u(GJND;;$}H-h&J;FoXtUQ@{e7GJfH;P*AAS^=B>$N4q}PX`_8ef1-1*%-@-y$0 zl(f8xDYEew57h<9zBfBBdAspGo1M2qw<*SAYRmX+iesjaTh!_&dCu-B_+qlfSmZ=o z2q`4l30yOv^WZX#16H9BRj4&nK&5QeSP6RJgd!pn}FhH zrj{c2veu4LpIQ8M*cnj&jxzudW4K?9k2#yO<{fM>qDTxK@W$1Znk&iOtlEUlX=8w^ zC?$GPwmffj=T4OKI?lL3Ldf4zck+QIb5A?JyMVvPy`B5Ww-uiNxO%w4h>hP-KcTJ1 z{t#hGhJSu?pSNQbPw4w&X*IktyEBop^N*x_krodInYG(9dbrM+%T{{^0MLD?eDgQO zyBwvwD*MRevD|;B1`CIo@Z;XQ`dtA8Tdu%iVSj4NO39dUoc0|*dh_de(v93KB}q^a zf9hlhc;CT@AD{?3L-*>FOCFJqtv~){)bYKj{igD_`cTU>%{3<(LM>(}1K*WEqM*u- zl^v0TRc4c9%-mjN?gjbAla^KSR1%J)5#A@JsfS2};(C*78%CW+KOei{ck#mMC^d?}gyP>H%^bK#>x9L!mDo(>jcr^* ztcUoE3rcFztBC+U@$&&~A zxfvS#SPE#wGp|Vu^wL+*RMaPbri{9(Y>u|`to^`5dD1kdDxZ&*Xs%bbQ{tj{I+N1(RMZ@-4&Yh;!TwtBS(q~TS}md7rd0D%3Z8Py znq`hRR1yPvm0LM)aMK%+tyjC29D6C(##yi-AKO#oD;+35*GxIS}x>i>`9Foor3?*rYoppK%W=xpHnY)G@@YJ#GusaM>u1 zeMbf$E)dzw$8cA9Mjq<|HR3-%3c@nZBF7hetG?V#X5S*^8!6cS1-f^eK5gOi&hD-EM?LB&PHhAr!B1IYQB+_s>j+F(=56HRGUq*J`t3RXW z{w(*_To`Q{I%U1itJ%O#h?7?gOn@EZO7K}`t@mThM4O>Nd@ot%7PsGzUPnq`4=^_Svof5|brXesReT4t4zlj1#H?r^} zsI6)LYryOIi}Tz7tXPY)Vm_KDN2#D{x?0zf@Nzh*8~K7n-Sb*wzXQaS)k@ajfaQ#D$sM|9^|yeOgNA; zt*bRU;#_DOf=~#7HAbD_?QAX9+ZH-Z-pK&V1fldajZER>@MeR`6)?X)HNBolXw6 zCd<1kdBLaDbNEFBqxbic@K_0aOlj}m*T01%#7k(;^YhZmejDKBbden`SR`InkT=D! zDN4;Df}QXf*nR@4c~ENz@wm!49u5 zH;yQ@LBEtnPBX8lgpPG7z987PhwaJ!zMU9R?la~L@;h|A#!Sk&JiQd_$j~4^yyUyv zH~3LPF;!#0uZs=jC^qXZ6(2zV;`#9%P~?M`50IbhXs~X|(fo1c+!f{jtnJk`qwuSL z|NidzwtBamoX+!5>UR;Qi)+u)lx6bj&c9_vHvz`hf?vn7T7m=o>aC|r(4;&EcrQa+jT8A|C(MowRi*$TIk*H4lY7EN-XxZhR zs)-o$Zfu&`2QI>+O^o~NVRGE)+a5zNv@e+z*Ca}$OLN_gbDHypjVCNyeEUs8Q2&`iAN zEpX)b@>DJQeDNbgAQxe{_rbOr)1=>?<=j)sVqTHnX<=Tuf48ND1RWk0!F3&skC(lC zYYg19gZ_~1M(bIy@ml$KLbhr9v_|GLk56wrv zwS>4Du566=DQMVjewtJ-QpxNz;FLGp=$z90K2<83u5Gvw7wcd~WohIanF*0TY0%E> zWphTpb#Yu2k;nLDbsF{=J6t5|7vC`tJh7z4s$_AwwZI3(}m93V#`2r%Y|^#guW`wVed^`RMU~ySVSV?ZF|`4&_C5K<}ck zs#P^>2Dz}J=xY^BW<%o3_plpYr7#Y){K*pESr5gl-SYoh$Kd26xbnSGwKgI4g^wz< z^^P6i5=WnDat`bY*>EG|3s|HpZ;PLBvA7x<=Rr&1am7E!c#RDnjrJO?9(dKsvM{Xb z{F*i=l0Ewcy5fwqw=m-_xQ`98$g|o=w|%bFpMeoXh4@(M{EA&yEhgC0fs*m|+alXHFkIFG?CaJZV(oseKZhj!9)VCh?ixxICtM!^ z@;WHICLNE$cW-b~TfQ?$Ba(yv>oG0KKek}Ly!pAmDr$IQv9PZ)piuUaPM%NBWLvcd ztKV)WjJGE_zhiO=IKs;CRZ6x}_u{9*juPx=%MXewlfN#bPqpl_GIzHWX?DDROS7E3 zEZ(~pqSy&easU4KAg$pXF{RpgrsqBq!R`L*ILA3o9tYMD!H7n$yP$TeXP(iDCZu#1 z6{=(TJq=krw=D-E&txm)a0Rmce$|xo@853(A$C7^vNvsap0X!YM!QBfzOT%f?7&pr zC*$HB^xi}Bt^5?1w! zHYaHvSem9KdCLt)#>h=ALl>X!4+XO@7t@P820V@N!#_91&W->oHZs6}e7|bqYko4U zZ~CLK!bI;YSvOT$_v%n?8n&HQ7*wL7$$8$?{dp=zQ{PbBPzSop$|7a0Q}w)nln<%UtJh-3qqFU|6F(L9wd}s=|75kcp=YVkag9WLGvd{q_EdQ3qnOp} zlJ`=;JSD|x3)aq6FsqvUat#wYwfsRTnvJv)Dad2y_@jz->)VPk{fYXzoP%q`AgK)2 zdJ3Wzl}y?{#OfH4I-)c=hwSGZn5G22d5SJ*Giq-thdPLh&?>9+)SD4PY;}KEKamKh zrfA@Msr)Pb1*lj_UcIiKz9)atbN8_k|9kEA;SZ z)gzt!{W?R-H(%D7v8SzN7f%LRm7hXf-H>VIP|^hxSn>*$`?4N~wbd2sdZq)0v&3xC zm;oUC7hC4~)29h>SbMy21`-3E=f6RF*+gmZ8{qF8qCj?%-SH&#CiuF+z-g}8xv2eA z#o#ZzD>uo>zx){z|4f%4w)0MgXQnf;icI+i@N?d_%4O466Ur^#4#+O@=S$>k(_98D zx+AOWX=67Z!OK{|DpMUT#eJ^OWq1X;k)78U86uh~P%BEJ=|h@V=f96R9Ots>Bh5CW zFEG2qQp1)uJ^HXhi0vhVs+w)g?vV$~ws5YuY)Wjmu!2G9oam@A$ZGvmS3I%5N_#)q zSzEC`!Otr3kti+Y7f=!&~!R+hGM`nksymaYRRzys4ga&rX9J?BLzj^;Z^B z^c?A{7aH_BCKM2wL6z0+!gKY-8mRP50 z95_|bB2n@BF%WapIGtBNX=+|lCdb`k(Pa$T;dU__TeB<)WuTF@>}{&T$rv6@znuDM zCPF&Ox(eSDv1Yu5~k+d(b>&=)ssk9a>(OodR}t=+oQArHo>XvbVqa@xOB>9 zrbDjE-giWcXvNuEHEjk?eh;Mu+i_S}*{$m>-$9c9TZ~8>R+K+xr&gfBiVj75^GUR` z<%R2!qD-LUA_lzveC~Ow3y8!A4o1S`*3ik9{A-(?A%sX0Q^#N*e$)Dx-1wfV@9)aL z#2ojo$k~ua!x+~KY_BdYNcJsaAO&<9s#_Ju;a(pP<=0aOIvZyR10Q#h9vK`BrKA^D z>iD1`KhTEkJBR@l*YPVFOA8(4+323|rM*PXP0yqk{wkrxqBleO_0F7J=u>n)o||?Z zmHJ-C#KNVwg?5hX78UR0^V?#^3g_U7Og~mTf?b=+4x&Z+&nS}p=v+ylnXQ$IsH}RC z+B7@6{AF6~?xy%oJ?7LQ%+ zWaRPV5&U+aI6$)_HtNwz`x<4a{$8;Y<=6c$e(&38-K|y9?ZRYHzlJdSI?Vi3RY97A zx;?*5M^=w&R(S@GL719fu#_ZqJvsjNS{SF-CI0oaQEuwSIn+pOn2SZUwfa_Jd=|td z9kXtw+WoTh=OTk(qKC_!Zmv%lxzT_hD)Q8^(PMM`F!QsiABkC=`rnLW+l5hWBFLrl!>RBj)7@Ii=q-=r${4H)h{OV!{ZF|GH?c3dB(=z;OX z{E^YB0Fw*6qXhBKq3fs%AmNuN_MIBh6SB(HSsyzjMGI6Z_8i;iw0S2wuar3>Rr z{47G~pRUxgq5UZu_U}}nhzwM;W7mn1hAqNvYM7au61t4v{gdqd1%LFU1orCwOez03dCziarn)Q@GJ3p1 z)SKMeXkvd}v`0GZslz^EM?R`t_M#gjM9h)vqpq^byCa-wp|snf=qN9U`F={E1{-)n za;p8@utVJoSEy4uaC&>)tH?L2f0Kj(wm&zV`hC$1IVp>(v?Na(3mhSRL&aUwBwo znDDJg#X{5G)l}g1*eaueRse&--a%ZQwVH-;sHo0qT)edfLh?IL=~lrA#4X7vFA;2T z!q=*e>PPqnj)>hTT>lRC6yE9nG{H^6bi#M0SNU5*-vr(Js>@ATDJ<)>BcaklNX9OF z>!T(7x)ZK*;tFr%R|U^V`HS+gnWFJ&Wz}mFYNCc6i86t}W1n;hLH^ShW5K2T9$^NN z3n?dX`X4Lr0|l{=eBpvL=eVL6R5!!pWmTB*SNBkM$IX7<)}I4Dzh-{UUb++$#+YS_ zMHhET{6x${X&yY*kVUIWog9R;?W%`^~4^$*14vo~4%Seg=d+^ghuOcd*iwVUZsKt-8?D7oM0y^b9F!dq-}EXjD0xv=?|Wzvpw zr#|nvQE*bicErenMF{>mm-1v^x46(l{ud|f^*nyNLYv~*1CemolG z>O;vsvAU!YlWuF_R4;B(q>P!e7YE+s8gkGzaAFcvf-jqLezDq5E}k@zmp5HwTF5t+ zdp|{(J5M3x@+?r5u_O)b0zZ^B_Jhj@#co!&P-_big6T5>X{9-xqI}dP= zo6is@Kq6sa>QTF1#P+X!OT#M4m57+a!^jl>N+`g0J0l+~7QG!!2BnQ=$`pvId2!m% zbnN@luII46F|0yO)F%lN3gac~ms25&vhR1+nk)~l5F{1Nu?Uzt(y}ZWpbt^Os^dJ^ zlwx+2R>IiKNYk71#%_)Qh}1sSn4>v&GQ2vh62?ob2Zx4|XZSj9)>RWA%8f^Qc@up* z^k9+wW#!c(%~>``7j@Nf{kf}_v9C*CMlfj?=~RNG_=U53(r>f6t`9rO!1!>8L-lLzdo)s6UHs6yAbcq((T!!P>5%)&*Dfeh*W z*k?OCb-ptK`T8!Xj;qw8AuPi7@6K+7qMAujqtIzRVP$ZA&G{SFrP$%FT>p2{%d!nL{|9YYndZHZin_W%JE3!-S3b%5CKtd#xAp~I)D(`? zTE#5#rH=J9!(qBUsN-VAHjIpCrQL{b8H;>H{nZ~jL;+@bA6bU15f6b3JgJR^2LxMJs1 z+w|5k_5^gW9yGUcls5qy*1l!uVeRHZ45&%&(_6;Z*1elYQjMh?SF5^14zP!EpAsce|uL`sb?4d;+k~T`eh9tN3;&^m&0s2w}LX!RVs= zYuBTb@UtJ;qF3UD_jR7DYxLjPceBW*del4caPgNYGbr}>hlbI7Z@Z;I|A#V;$prcz8-^b+%HQfhNzK37Nns9)*(onv~?)rK+& z_LzO%F8oWQo=zJ`Z=^e}UFQx0;fV~jyIb>H3xe}EETs01uh*aHy%)Rw1p0AAnF-T8 z0e#f}k@aiNGjk<2#8!j%qxgo61efg6qI)j|A;c24H~S))=!E*er1{K#ILcth6KUNI z1dv+Qn%yR9srW%f+tD~U5zD`LhLt5hZlZ02w!y!h4w$80-v3O&@Jk#VYOIPHVa*$` z9O`b1^~iWr+3?1#j#!dKVa<-llDZbA?0_w_IJghv`JR37m4dn|<|DLu2puXt{#1+G zCdu!I>P?i;*Oclt>&}J+2d~11QAz6$m!iz%qu~Vk#p7S6twiB8@>eZej~ThHKV9PI z?_0TJyEWEC=NXV4FH*2I#wn|@F`1Ua>iz|G4^!mugf@kf3x+bPQ}21Rge{;dRwHMt zkD^O9u)mjJ`{nO)CdX$_Zy(OnEKv zCfOdB==!}Vlv0q-B4RA$tUdgDbj4`YUEwp6;s6!*REqFDDnnTVdA**n@jh))Rd`n) z`h<;T!iL!_rr1@ZysP_YS~bdN3V57lAQgVKKe)o{q{|w<3bm?cxY^I=mNYP~is$J>qN8xn8o1CX!B?y?8+2HJl?QK-ac?)-Ic$i1a zwpUqD9Z+BU{;OCZs^cbrTEBTjG4%<3G~n&Jz72`GBK;x?Y9+Oe;sOo9#!QrypY#n!eH|R zD$+j7GCZ@tGNePz@fxVRzgUjE=aq`bt7_M0AyXJe{|3OnM&z1+|40CkOFkCKVH`u3 z5QBu>_w=UG&lpM=|L`y{erVq~uDLgb*xO6lqu78?y(#hLgS?9;(tVjt>#w3H@<+sWPIO5Je^yH2 zG>aE*`egZ zR;+j9KtL6*yzDZ_wJ5dwCFJ$NBvvyYZB|hzD~X#@`5&{3H`q9y8a}y=WZsyP^`UZu z*|@n@YIH49&@T0__H2xngiG6~R`$)^Ep9frYwnr?(jB!+s-UmmB`k{HD&(RqTD)>xjQfLv$`_c;ohJ z`H0d_@dqrUHZ_LYn)-Hm#HH}+7kT4ecw<7_xuEST>H^o`;wN?Ls*1_wRUaLuDxj7XZ&$4{k`taCQKUl7?Y3Mtg*Fx(-HG1aI3~(Wa^rI;H5x+Rxxu*pLA&@(X*H<0AZ5AA_37KS^ac)#MVbV^ z_39MHGpd}ZnrPQ9b_@(vKqEkin=jeCpHKR>BeuBm%VxGOZr!w+URNhb$2DHGp>8^o zYAJ;w4Hj)1JPk>_;B?F@eta;8&Kn|@hXv>fhga$jl3nCZYn@BC`-JKDOfo!YGC4tc z=2e|_Q6qMZD-6s`VdF<617u+QK}G4^$NFkiW|xL0OqDX$nOsFR#lH| zs1XH_{$!SeDWaj9%lS>^)W%*KEsgHe6#uw#JV;jboM0|pD-LUr65hAbo`C(V<@vj+ zEWhfzmy1-AV6rmGs^Vc*JcxCX>r1jOB&lSv?fzu`Dr#gMZE+W?G@O~$V3iH`-=A-g z9pj=H?zJC!TC-n_f{b)g*9Vy~)KlJdGavA9nAWRdFN__j4(ZIFh=eZMc+hRcuD^2< zYNGSjF^F|tDW+o{TIq#jib^h8K-bg)unySQmOYt2%V^X21uBJ$$>Shq{o)V&s2y$! zmx`8D>YNWmWjbyj@2DO={qU+^I5F^Dj*#E14aiFjLT8l>Y!e%h2(68zadfuhQY!?EHxwz*TcmCrl^djm6O(1af8UkET>Uo0+QLBL3gjAl0og=tm%| zBdmljKn=S-Vm!4Yl0@4H|IK9q&glA;*k9NGM8|<=MbzL+UD3g8RkDks=cM??c`|D6 zq=4K*?dz3<@Fg;A#bJc_Z!BY=HPY_y^a3w`h{~0#hFcgr&9SH|KU7{ip{vz z3cm45QNQnhepy)kRW~cJC*~KA7J=`UT|0mHfB4atFX4@c!|{s9owIra;dPGprh>t| zg@(U4421fH$gv)*h5_59sGocS75QF++~CKRE}Vt;I$Q<<(pVv0?~q-JjzJ&>HQ1J_ z`%i0Con1k7;cLl8l733{1NpA6j=H)g@Yh}EETbMf_Pv)JJ})aD>8!@R+B?ailJO{( zE^9&@7~Xo+&w(3rK~=0Axp`$Q#?t1$=}7E}+3fOzj0Tihx9YIit^r&jy)C)=bSR(J zhD1Y$AHv>#(NNfh%0S+Ht1yn5O9*Wp6|zAPAJ$I}mb&o54z4lz_?&J6F)wzCfA1K; zkv1wezlT#{H6Uk zTslMPUp6&oX5{~LM$#bo4M07?N=ⅆrta`UoJLlxR-yCGH|iz)6~3d`u#lU)7epd zgJ`bi89Mn;xO=ielE=vYs;lQO(ziM#XcqPgF{u64ZIl+8)GI9sag zbzDxF<;~r#@Q~H6DzKb|W3XgZt)@>`lFcP3If7>Jn1FtFJ*sOI8{SI~H_G{)^MaEF}9d3G!+N0`Yz zCkY7iJuXWvssbjMYM_JJu)-B{F~PNxwa@$tSW3TVa!=}KBx#6vCJCR!G?FyGgY?;{ z8=e2GGddU1%RN^#ITMV`NkJv5Hpccl$5ODU#0wJ~@81Qc)~n1|1gba~IS=CB%{JND0Pc`K|tc!B9fDurhL2y1kh`o zM6T7Bce(J|z;*C-p`MKzvBK2k<{ob+zt(o<43dAT>l{`A+InY&Xff?Yv8C+9dpY_` zNo#RROSC!g6CDEtM?U*C8SK1EnS6zE6jsR_x44#1Y9im+DA~7xO-uL; zIicuCD!fOxI%!E3$aN)pFITpjKg-Otei9ov1&Hg?nSgu5oL27PbYZsa@{DB(E^-U` zAY*$%Bg;PSBr67OKI3e)vmB~<|8ApJjlgH`(OK-IU^lw!eMll|9Q!BIn! zfDX)Aby*5;FG;$?6y$hRFTIal=CZ&oG+zW{0OGbwCzwoII_dNAj2YQxy=xQx*6#(9 z(~eo9iykfhbj3&bFn^>NU@awvNtZZpf)&<^J$GDwe;NM>$S7~4cyQeZqW6)XN3p&g ziP8MviWiVa|Kdw`b8z8&fSpEX7pJY}lihu?3fqnMLt4)*5 zd!8uwXDA&|g^hYa-KE$@|8*(m*FT4YoBulE^Hgkqga39(k80HZurOZlWW6yT>k7`8 zo?_CpK4|40tZVsn@Z-Y{SiBL(%G$hpsE#}MdsPh#aCK7D_#gKCwF9E3Y65^K26^a! zXBRdx55ENKO*tbvwj`Mjn%&>K0wk-9mahJtU7(%?vVOnov zg60(1BsByjCmaa-q{tE`%05rn1&}eA68{w{HloQi(dkQR@DQ%#(BUT6_J+H z*$O`({HRBpr0jqyYDM_~P&yn1XEQMPynqOH9^f*@LUGV{zuI@H6umkQjBiaY2%{+r zZ5g6gW`QM%W=Q;wem9grGkWUij2Z~J4{|{h zoV3)1jup>DX8>g)bEEmdNCt^yT;kqQa?gd-u=Q^IM?hc7aVgJReli`UY-+sQBhid; zJ-yGYmVe16#s|p7iTmW%IbUo!kQx%>BhN-HnZZX*EYj4&Tv!LskIVm8t@*I)@gp2i zvdyo!p^?Sd9Sn*Y>^YJTaBG8_%8YU!-U{`GuTg=JW?v%8Czs*~+(F3MI!8bkkw3sy z4@A7HG+tW~t(nQ*9ruzLK*yuBYOnnKWCHVkMwlC&2WRdfvdIVPUk}Uc8qpN65%M5x zrG|4u>EmA@V2lvKjh$_ZB?THfI^fFbT!=~kvsn^}-z`nIu+FcB+o(n95n;IdMnVr) zn$n8mULY&xjUtH#H?FrqKDjr#_-99^)jxwgN{DgatT$4=HhE>L(kv!hEp;b#@FO1F zVS2V8+V~odEt;(*@8=h1;R~+sH(FyE}j4y;7 zoEK&TVbCI=YGj zuZl1uVW!_eC_NkW00~dTJLXgab6O89@MiR^$mjJLK3S2^!U9(g5_73O@|q^gEhAqf zD7?lDE?))Q_sT<&9BBNsAT%}G@ocmjSSHr~WEd4)a)j1fmI8`=g2LDasxkk6L*ji8 zzJJxPPvJKl&gV**!LWVJ3=->7cJaOEAVUWlV(|5MMyAJ<4f7}53PO;E$G&9;0lz}o zq8%``dc!+y4c{=rxCef6!-lxm#R~y(to|$N+kL9h-%1Tz(tGg$ z!j500?2Agr6IbbuP0%wy0wjFkqp`<+y}MysGe5hAk_F|VA{#wtBRi~u)EO4`U7NsT z&OB>}s<5uy6%rGpM5aLX0j~wh8;od6i}>fzUj?dmwMQ@{L{}|plaDwalwq6C6hS}{ z^Y+rQ(>(djN_tsLqJP9F&WSLYVMDYy2Bom{F!f`R)-GS*6#VTSBk zhVMOePWt=>U%&N;*XzEo`@XO1c|EV^b-%_vWzb=v!H-#d93TnVh#Y}>bj-dA^T{Tf?6}4{dFJt+&Pm+7-cv_$fvyN28?I{N(21q5Lm9;z-*3-6R5>IRH(&gR!4dj0_~Oc@ zq+Y6*Qi(Ct_!N`zB+Gh)M&;(im#CvWvl+ZgX00wKMRZH)p;vU;63t6tic3Jd&x$*8 zkQ{fOUBT;WcHBFuXN9*=eCW{BviTDImwk(33pUaOdWV6jIK@lH#K>~Uo=_+|gT{?D z5;9qZt{Wg;NdK_*Mc->1=XZ6Dd(AclnY5L#q z&H$Qe0t9hRvH5Ct%@(Cr3Z^7eDq<}?BqH;+SL&e2G`kbASbwlQE~UJxNf_FoJysD< zZ*JR41z(IEc-|^^688^-TAS`w6n5~h7B{~)ofT1fU{{b61mUe_>Wq`AAN>%t*Wc2I zaYAwc^x^LqbqV@vIO#6WDfE}rtauu|5(7E`cq*ehm%f}Bv2gJRO&y0#4l>O>OB`nu z4u;%@RiO-3J3@~B)uVPb2lGLBtFY?{f?0%3MDU-dE)sbC`xKLbc0K+Q}7~M6EWZ3lsp3LDs6GwD9co!a7q7 z?nOLe)Q6Q{6sGAb6ucg}sY&eq;BRy_yT4sT7MgL-+^!Epck&%LSeX=9rteDb*H4Ts zyynZ9lV05H8s^ zdNL*tg)UF#r%;nQYHtyj1nJK(q;YZ3QV~p^Mt9EpU&*Oj?7r(L%Epxo?S?QpJuw-R z@(a&nMp-Vj`AHA#lW~`C_Z77qyJeG4{=1OpJ&(N7MT+1oy#({+-IGEk_jb*IN7*$2 z4b(4!;1lZd^1#aE&$It>pBJv@+F6BH)htwFya@F8OxA-|ZFV}cvPIQXpGsnxCVAPY z=`o0P@knT;oonPGTH2?tBBC=MCzdF6`ohD+GiwncE3ZOT6_zL@m83wQ0Sx>K+w!CtjGkLGt??+5atM8#d3d8WCMGg)ly31+*%2uW}% zhMl7%>0D$3es0rpt;NNUdXr&lUD_H4L;VEO51QUR z!RUtZWi$}Y^3)g-Q4bp)I_>?NZy_^L!HY){HvG0qK}ptQSZD!4uxRjy#=mIis6D)R zY9YqA?O_!JXepVeP`P@c&`kZpTSeM~(noN@Bx<+QlrdMUxutlzrmNjk$HB`z!+uit zqV-b4AoPyx+4#kNgF^eEw{?qDJvi?zOMXTqshnQ7d+nsq$oVUmIlQZMJei}tU*IWW zyd9|1j{ikJ#I(;Y$h7o?FGor_LWSrf$fo8?KO{g=jlb;SFO)bg;=TTeG%kt;x!!bp zysP{dY0ES*rlRXjmQdwB&_Dxp=P`?)CD9-tv90!I55<4>WVF4M{_n?~pq8uIN=W~B zAf-h<$#1qW4|o=XXLaLb)w3IXT}RW1;^|7Gw54&kUo55-dE<+$Bgq{dG*Vt0KmEhj z;w=9WaUT1Y)oaX)7cSw4<_|%W#TA4Q=R6wF|5BeZnaO3Pjd5Jd>iui41Hhi6wz3g4 ziE+ud01#u+l_2$sH|Dm-3!Gi};awis%UzGP)i1(ob4TI)Eg+O?n+4Ek<&%~W%}i)+ z*PNd$738P?Xnv`_SIIA8wl@Wh6`)u8x@BY7nRC^QrM;hG7|dfr8fzW*#@D(SOi&#p7@UkUs_FB|jf!wpIfjl>yy9s{^j>oN z=$QKfreEvKca`bSs8yRfe?-onh4ng=PxK#zEV}V#%o=YX-2uAg_I-~~OIGFTUD@Zx zuZwMyD)ni8w1JktliFSWQUJGsx>EII);M+EUN&)pl^Wxv+@9nKZ3}7pYlalX0FYZ4lDE1Sz#bmC7H4jh#`}{{1 z=x8DyV~Es;mI&Azs@r%uI7732xm?GT-(lm&$9`P}uIWXq^5i<>2fZy}m&q84?W#n;ovO>XL=;iq$?hEDNozmzf+Z%x=k!fecVdoqC(bzf zq|xbnwV`m)o+RgQQw}Mo0vgwv z@ZyT14h$?G9=Tww-+?zVKj*MuBltuN6TT~@@;%38;e9b$6XB1R&h4DTEG4XkzO4v!Oh*}%{_WLpPaKU~!k!0do5HTR zlh)eEX;8fa;|Cl6EkI^jjFiq}G)bo*N~#)h)?RLVy%wK@G)1cUk_aXcn$H&>o3+ON zREyNda0nFb@TR=x*&(Y!^5X&?d^w9I$0-bgK3wk8;Wz}Rf}Kox4ilz?!UCUrk6l9{ zMk21Giu^~=<85pE)GoE>R(dG`wENv{R{8#J`&{C3>US%Et-SKC$V~7V_%dEx7=yDI zMb&k052uS|W@PIzrh@)mL)V2(U!-Qr#&-0zri@H2*{M(xp3?E5aXD!TUeJ3)#)v%Z zx;c~5IxS)%H>m8_WDE`3OzZZLw=rw0zIyX{U!&&nx4jMFaLetFF2dgWm;ib?H;9rR9pQGr~d5qpy@9Rjz&_8MRLFyczhhNb?XSqH|+ z1VugD1Kpaf*is=jw{di=Av}4iG%oV?yTAuTi=%L>WoT6nUC@%g{ai-xbXrFvr&qv) z4|LXdT^tS zy{^Dz!u8KLM|9@A`lriRvUVoaMG72#i;H5Eh z(&VOxPqzZQrg1o{V=7A-*8A;U_f@QScRyr&}^SRkBqDXS@ zTm!ztiscQ6>|23|qk;w3hXN+{fS15E+sWN2TJaZSar6jDfDaDuN>6}HKio-4Ucg}h zrU+6GF{|+<2|1%}6LzV(uFt~a7hjrHVIX#nH;sl&9Z})V36H}IRPqV`q`^cKmg1B} z6H=qfh36)gbZu2H1vQqu4#RwT5`qd`BZQ-##kjuiqHD{Z`QJmqJsPOfF}3r@LWCXFoYLZ zTY|%7o+L?y71|t3l_}d_+LK~iY!2&fcht@&zkak?f>b8MNBTAheVP4QTre?8ajkDB zx3=LWK+nc#sw056;HgXzvohCb;3_C54{haO*sk5WCdTD7+CLDBFuKed}EK5`G_#;=yNl6`rWp z$_t|sP8Pz4HWwlz)hn0#NfU@xQ}wXN2AM6ywX<$=ZS9_q zTporTLhPzed_YuzYM@kHoUEPT!V|eeEKv`41%^K0&%XPV*W)Y4>nh36)#dM2e$4w zlzi%FlKC@H=^+$S zXyARj)alvp!|$sT}nMDja2=b+`*b->-qvs+#Y& zEfc38IfWJ46^O%n;d)0rg9rL%G2qaq#Te7|!Vl6bmwp>bcvRF8%Y`vPP$DC@pk%G zFMfs0W96JIX6EPu-&#%#*U?CIVD%e`2vJX}p8?g87U#>iWrJvkp|h16-#}33{cRj1 z$=7`yuIt&D9aphgW=rGar{A6w-j?-FUQge2RgPVx35AvRy%r-@n#6VG?!$Y9 z(E8&yB@rF=snM|^DCDNKXF$vHKw2!KH}%7FpUS7Q%A&F!pnMK_Z$aB@M-JEkN&J%}2;_-<>XM2{8MlT@dD3h}JGzqV(+aShscp-3$( ze~jl3`Bva);t@1Y5mgRi+pahWx|fKIToZtZik&(OcY7Tum|>QvO)5sp^k8gQrZlDktX^IrB7FJ|oYVKq&teagI)fAwc?liA``&9OjhVdP<-YdE zZa3P8=NeE1IS&_a@yM+TNv(T5wVl?s0AhnTs>|EavPzz8uiIKz&j%`42$B_wbXq1& z-J>k;7Nu5A$Q*PMQ$_Owh+FO)M5hHK8zh*R%Jwux9Th2M1e&7yJE z8-QnUQo@@M!2tqPUjKWXsgcKwDwIpil#f!*ar9aHHfYSCcN(pKKq0rLFTaip*P1VJ ze(|+KzRPj%+iu6JBxlMw1qA_SzYfo~odhv6fOqgg zr)3XAjcXq~L-kR=>sI0zE5|hct;UZHWZoUh@u;hCiukJ3%)tRrX+3@9xora5{BU4X zZ8V~@zX4QFei?5+x$nNF@N(FSqL9Nx#C33=ikef87)q+l{UPcp)-D~ZMtmB6-GX&H zP9h{EHNa)lJkg8T&`7m@_tD`scN5mpZ0uioa{^jN)l`u=MQ^5TP(_5mCk2r-zAL}E zSe`HA;+w|A9v#}<;dO&ak3e^QsCOKGdqZ^9BWINaDADO>q8C3II)6P);#~&+xHkuv z@`#8|5*^h)-2G1{U$>Xu?c+$KW_(|AEnXLlOk=M>$kkmwVi%W|arYX_<#h2r_j+@N z$V?0&H_4g9zLMm@0X6pM1SS+W&BBE289L0cT?X^&2{+m>NSr4Kp^$J|Qk0a^@K@WX zcTPfEt51Cs)AT#pXZuMuV>Q;{c9&|hn31pp$X5RJKzhpjKF6tLlNAj>O%&@5qPgtT z($jg&4$SdK6hOeH@-)&y_Xp?f8Ks6C`mb}_r-y^6K^`; z=CIUP@P!0yKG!bYb5uCZ9w}(_QS+3IkyUw;^tFCt9Fk`FF@k}eRMN?W^#qiP6K!PZ z1gbGjJ_j-bzGE^pPwu$@P1nya9gn-|{HOiWLP=m>PoZN{M{;e&gxQ1cw)Sua2~qhE zlIyow4E=KBr1T~{_lBa)@>kQo8vmNx9H1LfHQBvnZw;jI?}aV^7=ak3V9 zs#5cL9w>UdsS{mD4NLd?soe#|!*r6u5uE&1xx018)i?cTZJpxu^0BA!b_vRaia|uB z{jq_u-}X8(f53G}++jMrR(H0>C^XH^cpw2+0PXDI`WTULrO?RYQ^6&1Y?6^`J7d<9 zP-KSZMjZxIYSuJWp;fR{w}4h(tLWjU^diRQ=3cuy&H4zW8UJ{sd*^PigI|M8Pbk5% zgSVmAw8E4a%PTL9kh9-aIsvd~sH6IxH-mz3n9f}W_5MHFtDWs7u;)cm+Y5Mdr8VtL zk`I0nn9jX3XevOQHj$@3_6M^Mb}98+DLt@~ROj$?HTlywoz&Pu9k(RYJW6X4T-s=) z2V#izI%Y#VfK|)G%MHqFSyohSCypk{3R&ND%oY#-W_&_@^~#&Ervy-NST_BcGO2Ks zoX;11QM^{)Pc{2CD^;{b5+_*p!meO28dPu!!0l1UU2y&^*Ff@5C?9Nk2PK_Kd9G@^ zq|Q$mI-Z!h207@Iu&Uv**{>cQN&^@5Y`aEq1P%k?YkEtI`P{G|Kdl-xC+wtaD!mCg zXgK{|_9J#q*=%Qv|Nl52kHN7-O14PB?BBf zM6^35ET+Z(mQIJ$!XO2xOH_6PLc71uSbCZvq z$i05wwWl@izW{_jfUVFhg*{2XMf6joOLAn;CKiKRYJ_#iZ$Bqk=~E8Ik-k4KD_TMD zT-E0LbNpCOGk?l6;Bw=CTf66{tLQy+u>5N&tM0%ZJ&N0D7c~|N#cuOI+IfCO29TZL(uNGP0`k*|dwQJeteMN!r*(hy+6?Xz02E^^ZT-!1p_e(<7epASb!Dueb&Hl2jivzCvQ3m z(d!j>o)K$NGc^k;0XtmcRDw1GV9Itw^e&~1PD1; zuz%JYb2Q=^@a8L+sk6#209Kd(A9;2_o;_nFoR;=C@t?Yx`*ktf*#lDyoWbDdXz0Y? z0_zW=#5U{Wd*YcDBmV=y0=nK{7W>4YH?HZ$JcM6di-)!)rBFNlNI}~+RnbTgdh595 z0+1?$fRx??l#~D6O$QcMHwR6v-wN_uL4E3Be`2xdbvWMe?+A%tHBBZr<;eL@cN2q; zoM9sV;TCo~>L7Gp_{JZyuoVQJ_a-`qT8|5N1U?y$SN_J-l!^Zbn?SV^`e7El3*8T=_;z~;n z1Z`~Ep~Q1#u^&bkpeQV9dVeXNCmi=!jH&oM^u72c?nr zBH;b%PQ-Q>g?uqby<{Umngj_DQeE@%4EkT-D`ZlzIZ7BZkZolKH9lILeKVLCa%})mw_r7-Kxp~`KZ`r&wzUDn^ z?yu(+t5&4bedoOTZGjOfOT^455Hs(K9-y7-)vey_8iBg|6E)^Z8PEa3^18w?!u#or zUw?d2#F;v|QH<+=d-2e`5nQ68z*NGqJ)W1~sKZBBP=^n6P%)Tdl60{%J8RdjSrl~G zBa*HCH{GrdggZ99grbT$yrO%+OxR=Jl*8?LxI067P;Zul2BB3SJt+qYMM_TnyGoir zr&sctsqc>=Q5MNj%Is6Ealz3RqO;A?HLug)DJ9YeQt3=IaT|hGm14$4?t~(qzhqe1 z;?n!%Qz12gS6sKYo`4lN-`KwD{IeJ7}rxD#r_Zng`C{!9AYiea; z>Qf=*;c6GYDU1}EPsQc3PK4O5VonrkV|zB=J5`O`_~#J(GRg41f|uts*(lx5bU#F@$`vK$w?!AnR*iLzcT8o| zLs)L%phu6!!K!MNl^v31QB)b$;vrh~*5~Nnz3@GxpZk>ha{=B-Z4+V{2mbw!e@%1g d|L0%W+#gY@tzjJGvVH3*^rrTWBGvnk{vR@NMG^o2 literal 0 HcmV?d00001 diff --git a/assets/weathergenerator_partner.png b/assets/weathergenerator_partner.png new file mode 100644 index 0000000000000000000000000000000000000000..e057de37c9f4f2c3f73a0361b52aff64529578f1 GIT binary patch literal 308490 zcmYg%1yCJJxAouzCm}$P;1=B7f*GKgkIQa)Pyfel776rS-#y3-b$s z;w-+9mPLg`h0GVb5G6w~D<(j}2b;z|O5QzAdU~r#-ti$Oj`O42c;M-DZ?mtf3;*ss z$<2t|iwVJx0{Nfos{ON2Hz9ri7(f6<00|4A0D)k1063o`w`gu^2(aDf{#ReRWEorP z+iwnT&i~v7`bD;`n*E;K@;5}xJgVrg?bu0Fgrr%4o2;fe0-x3>A@ENkpZ|T$ECZ@O z#L&{Pfg^Ve8RK<(>_L82m0fBMMOeWR$M-M%C?C+^0r`ByEWso-?0_lKPdQ?oC!D@$ z%UNncQu8nWc}!t@-1bsiW5u@@H%;VSSyiFb;{1N~6jkMEc|Ar(yRh)P^iYiPjc=x5 zKVjR*6~U~m-X~~uJKxu^neSyd7f1ITIW7F~Gm9PWX`oTS=)n0$qXqW8WB7LMg%dvO z)Fi-!QUn~rZ1E!ClMKz{#^H+nJ5jP_bO2e1w3V{Dm7(Fg=iYWon=@@8@mK6f06)(Z zPo(I1Lh8=qOw&S9u8y#S-<+WU16`1W3^zPl-Sr#+{8EPV)^yrIA+7Q36{|RLl%oU5 z4}a8974I{Eq63KuK*~Y}kP|bexsucCD)l3= zijut7CQg{?PH9PDnPk zjtvbJ#>Q1TssSi>D(?~GVc>%t@e~c!`1Gu-b)xp%+@3wL{zp(xcc-(s*pN&&m#1_2 zCCUB*85&spC|~Oge(C{yOoJhPA$ohHvIb&dyEouOYR~b)nBBE<#OUMZ0eat$pREP{ zYcO3~Zf?t2vc}RI9q!cJi5O)`nxaHMx(6J8tyF7CrZ$#bQ0i;_t@@cuUfG7iY(A9a z(NP&&cJi{-0TXF)ezYm`L=Gt=i{_(NqClBA&c%gy8)3V<#<_vB+Qf7X3C&gFS!Al& z3f<RY>7hh$)b&g`=H>5^*U;A1{8mvZnrStCQ{!81FB(R{ky@+ zit}<(VjGA){qL;%xfC9;Tb(3Is8?xmpyL%CG#6b|+xrLEyWJhF<(Je(g^lbcp-rxK z8ukT@Gw)DX~h50Q|Eq1 za3$(jY>z1cCW?R${uxXV%mj=aK|!j9hnYGY1B^(n_?Vr5FaR|dm{9=$bojgU^!8O; zdfHLU2uWySdA^Ky+jtw|f4Asl?(SDL3=e-`x6@*gW=cZLChiJ4c@GT>T^8>Gk*RD;m9J# zXoa$O<%&$AefJ$P{^eq>|Aqf0G<125Ei`z3M#K7UnW=t_Vr&5bIos9g= z&8mm;Q=Y@&d2bF?ywNI-#C$fC)7C%fLrIUp^Ih%(V#YY>>1<~BMpo?8(2lirtHYI) zRa6MQfNT`Y5BI=ci3+E>y7<(u$9F<*0&2RtnU(#+H?VvIqiPEe*c+8^)#w%FCyV$2 zsk8Rha+97v+oEEfFFYEZorAO9S-UvW@=T#=(Bx8Mn^p5C(n{k>T zJ|q6J!iB8Wo?-L#} znsC$K(YAN8-f+0vUN4#9p3NUc2le{Ln*LPMcJ;|ed!7T zwlGwFNKgbb4g7^gLy%EHaQQIk?f&#s(t^?OS!ixlJunc2~)FF{Qfbvw(6S`uEF zGj$h#_o>eki7!EcedR~^e*tieNgEc=TCV9$aU4pmnrd~nb0hgd&;g#NRBI1Mz$aRv z5JcYbldGd#>IW6Cr`dbXo2pXEZ;@>)+|ahUKYgG?ynbctPZ!L&MpG>{sG?p0WyHOqi@@kl*z&FGQgk>GbH`o$xI3k1y~SqLNl0q&{4=@;qW>D5Tr5AY{@4aKjkb&4*u58`m%F7xaglOEJ`DpJ zcZd0dt?rrgkf3+Hv2jr0U%NwLqrut8llxz3J?}8~#Gr6LezR0}gMxl%w)(8ESOu-` zac%WF?mkR%x`pO)YT@)7E`Y=yD z3=oB34hr~B5-Dapp%w1vvb3+1Qs>9!H7*L(#cgA;SY^-FZ5V<%`x*usJxmbOhWndZ zf5r-DrPdrKB4BfQKc4-h-8oe=6JfPg`XPGvl^>0Fsm^@-iSfb_K5L~Ziv&HCK59fX z%iCrnFku(88eP#iGplBSL(!lpx1R}Om1C2&YpeE=cg1Is5C7)C8}ZeCBeQ%ey>Lz$^wMyE&dXX-Zw3S}VC3RB$s3Mv@9-WpOA zeh9|*yIs)h9V!f#<%Q>Bu(tCL+Wt~xF|q|C`>huI2I@aA1+ zfBNO?_En5fHpa)_O(rK&)L9q)ij^hLo8UAU?G;ic>m7JD=v>OU2Ts>|FJ_IYd3_*C z(1&VMUE14u<@+>?lOo$Vt$V(u-c5v_alq!X8x=67)C=#hLcplAhIDy%Y0FganL4PM zH3`A<9!{+&(ZKiw4k1->c)KkmG?mBGR6Mb9oO|eF{d8H(>auRNJ1VKl1aYxPT7sq6 ze@8RRg1Xbq?&mlTO3Ad4n3)s?~3Y4S*Z z!m@_N{Is3L?VlPr%|sod=PTt-M&^F^R5$X}BIeoxM;l4YwHCLYGt@l>BNt#dj z4H{PeO=SDdza(6?%tiK5XQ)w#EIN6CPk06*HPGwsy4y%8(bz1quHtNaE6*JUdUCC9 z$4LpD40dO}a@pZ&`+{glZNc5uNhjjqeASD9{c3V*hph2LXZ@4S?)*4OEUN2r9OH%e z;(8ZYAPVpY;o@l|Gh^dB$EB7Fa@w@mq3_+QNR!pWT+iFVm(Sbwb%_Wo#ai6jWJB|J zp6*c4C11t)0L)vQBE}TEa0!|M;Av8GmP2t{qxWNVAU!9|6!F`G=5l}lZB=SLvA~C zWn<%zQ9IuDo@QE}+iG^qJ_ztSc|A76AzmP5S25h*GAz@a+UYB-L=+1PilnTXaUAW} zm7`dY!%VC@J#1%F6D!(1qdGXp_)YbfqzP^vKlnS6UvXS7CVHAS#{Z&Ka@o7-_M>tS zHC*4Ptkq5a{fq8!la&xb*7~BO_jwve-_4fkK1=!OZU(0|?y9o(*`>EYkfg~%Y{3zi z-1wgD2<~zP|pOaA{$x2)5&k({`uT6Nt1x;=Kb} zw%f_`lPOcEr*UpiE$fA;$tCu`Yik=zLKQNj-+~M=A${ogg<>%VJStcycv;zbKjg%( zOWd#ISu0;;8o1kacN#n2!?kg*20A-n@wLmCueiqkPW)I2qwUG9sM>hpb_@DwwRsw@ zF+ddqRnz*PxCw6#{tu%qsb~mSc-p=bEw6Nh)-zOJNcvf3>`rYeJ++=Ky*E1@V zX@|V;8#$`-Smw9L4ncIpw*OnJ(DOj6^ zwzG2M;yf~Nx^UU*L3!qR%XDoXsMCkbt5u7vH1XG@JY>7!HfxI=nf)+6yi-H60U)vOGCSR#l;M*qaU?q3};-=DmAgjjdQwLJ6mu$vjz@AX$F_ zf$(2+YC{D5?I-^xD>qIyPhOMZap|sGN4xW>=Fs$#-ef2CzH%%^-<)FHDdjSz&wlt? zn9aAku6}}igfi0*5IWt`4}eB0wCZwN2!H?ZD)52 zpb67-I^LZ)nF36-vb;b^-Aiv$j^^MrV`Z?x{J_jyxd5c`T@hA~`UwI)sI`Ccp!q~o zJKxag@Bug!h9R!;STR%-XyTtot2Wydv8VqjAI;`JqHq`z>kM|gjdI|jkz#9+vTR{< zlz2KtHe4r%dMLDuuv?6T%=o=BA`BMp%*ieWtFRbrV%51#CkJ->o0=?6=ZK3zkR4n0 zewo7REM^c8dH@FKze@B^RfeM@A(>vSnCGQiT4<{^E)K(E9C$syWh=Mi;FMy8|M@Zf z>&<;F;(Sfpe9mZRFMI$)p1SSf?M&SQ5GMD~?3IdK~=+i*c!$=JB*y$gz01n0%} zvMlTFtZ94oNc?_-wpdldI!g@u{qyLPa}bCzuK4c?Mxa0k`1bOJ@`dy1{(>;dJSXT= z@vXsu2=^V})6b@-!^DhGjWFD2CJ8MP=>0!NmR;Z5JM8W}%E{4a*|v=HXGVVHD<~a+ z0YsB{&8~S;gEqTP78~8!AOVZYy-~AcyTnO_%R_-tx(YkXtHpJCcJ5#@=&my^s}uOd-0NQTFub5)mt0t zXUHoJcRE!pU4&d%aA{xiJTP4_$n5Gr&%T25&pr51AqXK6ehm%E>{q1O)d-N#@Jz4> zh;~?LXi73FQef)OF}}mYCSN}%GA)L2eG*ZEU{Fo_AJxzgKO{9yQ;y4GPcCCbVSlQL z!x{InJgKUryKT)Kl&jrnX^oeThCiKgD4DwU7hz<1et7)czIrfH7mwMP7+uBv;%V~y z3%LOcFhH20tUhWH#?zFL81UIxuHAaO)WUo4I@-;6Y+|*>S+B zPfvVWLc;6m;4CGPcVY7{I!M_gv(|8SJmY?Wqy6*+%`@xhD2`$3OscGiCI<;w)|`U%$GzqQ711I5_ijy(PS@vOO=C-2RV zOygIaX{JxK)f+rSZ<6@WGoxAE2_vp{yA~opnJaIMP$;W!m)E}8JVfzMW;~fLn!_bQ z`Q+I)ISmS0XT2IJ$;x&Wg!f`Ir_6Td?d>rHJ?YObW=ts$l$Jw%1Mx=k`hlnZ0B?F7 zknFDw3lWG41)>LmLISn~EJ@@jRZ0HbMC7La=~GIV*)};5G&K!d33mSJbDuP5{KGIy z1qV&`BBTZnRa!%GuD_LL-{$Z)ql9Czn{OioA~1w{EIqvoCcZlJ0{`|2u70hcdoB$p zbLXASp;v&Pv@Be^lrALGBEve^OOH%uwdgYrK9113mYS}qIlZQzKgAm& zOY1z?*?81RG=a#&n%$n<9{enohvDOq`va4kr(&=qsNRF~S7;mL0!sr&y%9za4PnI$ zyGALBmlcK-`pRWc9`$oT_{}qj##VvI|D9*@AoM5C=1%|-98nU3-h#dloiXB;T(&Hh z9GxMdQ#5{r0cYsw_>qPSV|e-hAUOngVKl>(vqhk28AnV~W@Jx)Imolod-B*GB+8v# zT%5|y7`G>+<78wu>dmsj6v!>t-Ka6i9D?dbR*i^dyFEKMUEjz``dy%1E-LNm|5K<|~cc1Przyu8k zfe7Ma3`IM-u?Irgf`amJi}icGt&wF-*3Xgr;1ZwEhWQ0C!3qH|;ev3fYw$t7gS~Ib z?=@Ra8l|BF#N5|MvH#RIpqdSI%}sXOE=LzbegyqVXuu*He5wPjJ~XMPbx&G~_lr}*}qMk6#1cR^CN=Ee`AqiHHZ1t^iR0AbM#2CI#(_q$tR zS5v%}yopGe>NoOT93Le}c4%}6N^nVEcJvV)Ai`G|AG7WbEQjBPtHnB z)4u1Pjy?n+_T-99Bdkj$burD|)@-uAzNSvrwUj4@@5VN7vi)=;7Ha(YoOFLrxTt6# zzrd|5gX0-S!3(nk!oQQJ+WMuT48%(+j!8fQ#4A5+gfP)KVOS13Q9w}8{DV^uxUoLy z6(K({(EtWWG;l~C%#vM19e@fSDF)V0l)%DRWPlVluw`kYF#!PuzS|FW7!aip!a@xJ z{2b(BVUlvdvNI{MnBDr3C0JurmS>2}|F=b}JNPL#YA0MWJ!$6-hjxBc&IGh}$+tKU zXR-c<-_6d??wl4b)xJw3#xnjvu4;z^P_M3-J!xdoXi3P%58pTED>`f%YtqWvI^Kw= zyB>`n3KDif0$X_(TEnhE|B7a*Q_~%(d}brDwE8MY;Q>}@Y1VD7dJh}CC(fr+HiXCusQ+2{sp!?%+o!=bD0Pkf9_ z#tbzGh?u&+z=TBw6JSNr_;&$&pdGdwVgO>BA{ApZoVH6xrBFze9Fb3~pp_n49Fkak zO9DXUgG8a*Cx>4dS?I&3orfG``hfNAkasycy=0(mfg^Ot5gOQ3T#3xgNDjqxB^MpOY)70F^ zr%x6HGu}301qig(^VEZJ+rPRSo^z2Unk_VxeXNF6Tcii0 zh@@e)cR=#LlIcWPTAoFu^Y{NQFwOJT|HemnnI4+^jW1+JD6EhI0aoJ48C+(6l$8q~ z3`KIr&0lJ4DljmRNirCkH6Ms^V(+><9E_*^TF74mfiQ_MpX*7BUDxQek! zWaaaN3&~MPpNa1@+$Rg}?LAe>JMh17fcE*}qLP_BF#%e?_#<|*=~^%yyO^$~wHrFT zq{m5AA={&o5QdqhS-8Fadt7sGQD%{zTD<$?m-+q3vWbk8nKJ3|3=_@SmZWu_q$1gW zw(y15-!fJp1QX-1{)q#3@KH*talQYILwC2t-8md#aG*~_=^aft^8+MOvvhW9zt%N5 z$7f4WjDXbE<3)TOrRa4kNnlpZ;@_OBm_j@pCi6j-4okFZ{ReH&p*vR)Vm>X)a3?F#T>q9W_W|9R=`V zKt@Bwfwv8R`}S*xC^Mp&8At*Jz$6Hi1lvP%Q30rs9t3bC01)iP8LM@R2b&6WyEDMN z`5F~ljP}23N(jbS=vEsbWcoaZ}@}DJ?KjSc*XopJUggw z-3S+`raMDRu0j!iXNomT+q{&nxdNgIRpL-i`)4b26}ou@In!u(1o@-&be_DMy-)? zA78uIXD-G~FAiwek~}IJpPaQP&zICS)xNbexUrZX>t5JUNB*Np0(|&kem^OORPs%T z!+z)b6p{nJKSK%(FxjMLYZDNN`1tIh!{TN*zWk>1xa!-td_h{cvD>MZt*q8PJE z4co={59gP<&FPAwBj2IflJZv?^(b6#=Jbl0mLDk`I~DD@g%h-T-Ld$Yw#stxh+NA1 zIiQ{o2&Sm3V@J5oa6!T{V>pvy>NibStPUOm+M$)6p4x-i4i{ROV31HdFZEkta=tRN zqa!{tEa%8~EExgG5)7zOJ*w+pdP>REX)Ah% z(W>B)`{O(I0!9Yg_qQqI=_ycrua8VDGhIanyrz?o5i*1`ZQ!y$%J)yjRxAf34ibWO zixpt4m)`Bo?tYJipyZ_s%3Yp)7@0Ns@c|NwGR+r{bvwQQ)%ci(Op1DcYwN+m7<^Cl zW-~fqq%=F7)PoZ%?_TMCa&Sje^vsNVb$6dP`dtmS+{~=IRdq&BzGiiOJilm9$4>)G z9UR&4WO`AI6y#XC0esI{u8||HE$0Vg;n-29H&6G!`_pB-7y7i3%T8((!i3E&7%Z8A zKbU{*)lA%Gvv6~31NZSyFMkx9jRe_F_Haw(YS+C(y22$qCfIk?kS{@Mc9{(FE5cFLv>C zD3uM&m^db8duX$jb>xBO%ycEKBwE;CEiuqyt;8|85tp0ul_Hf z6r}aC3y-0`BK>gsYX*(w<(`tPz;4MrkIeZ{koUE3ZdOY3pM zgGkR$ms0_9AuyeO%O4m3>|OW`0>La*(Z@n5 zI-|qy6ckW+<8S@sKjnlw@vFm=DjH;EAX=Seek8)$0^q=pNTJVjc`UuLSQWOK{o?~K zxy)*L^~QrfNt$V{j)?b_fpyAsKgM`JzWBEHPPZAiL`GMi(#|HX`GAA~7hURBtOqTF zTOv-FC)d;Os||k2%(k@`J11a(wxdZ>#atJLt>H}vQ_XEL>W($-nwsm5)^@%xi(`Fi z{va{aiVf@}jx3q6w^1bWH6Grs_Cg-MmI=&QN(mz1dw$-)9;Y@lyY}|CV$r{b^%;t} zEc88kU=6xV=e%PT=gVXl4%)S7U3o5HY9Xk#19EI>T2MA9`G;H;j$ypB*{kXQ9Y39y z?|&?Vcwyrgr5^S)9GKSYpqG@dKDuM=#A8wzq?3CI9Ev(kXQ2Kw$yoSS&z zTNz-Oyn~v=dUvP!?7}el+;DIIn~AUxC-iHrUNnMihnpif>dLSx00*Ezmxv-=-I~3Q z2^d%`Ck%*u56z}ta&`o5BHOO6N~IBVHz2X*p_kiqM@>%ZE{NzstC`jC3KjUxn!6SO zMqO58{Ew<#oWFo9QqhuV;5tfp>sI~2dM2={5H{+(T_|UT^Y`ecJ=d`*o@HH1{Ku5U zOC4gN)k;*=$qG;PxsvJYo%m{7#gDdZ6}6gk6LBTGqeoB6L;>ul#j)=dWi}jZ=|MB2 z<5Tz{oacpw=7^arVH5;lKuiT=KL#3vPkP~UE2p}aBX^UG{w~nHqI6WmQWX_U&l4X@ zY3Y3LcBl_M$W|iSwqvCh>PjJfcyTf{xK`5MV;)gReO2;}0|Hl7sTG44COmT zP$g1C=cDlQeASFE+|59kmTJ1}etMhQ6-`hC?Ut!DY{+HIba-Ql$scOLJv#xto=M?B_j-X6``@^aHNqSUio87#kbbb@Nr1U`<&ZhKlJB_B=69uDg!pVt-Tez z^v?U?3Dwvs(|DMr;qT8fvxY0SrbK!s4?XVE7`9wMy{&CRZ>$jM#B?_Yt9^H6JcSwr z5Me6k{U(8%pYh;0$bgN36%na?F-Fg=kbVWCQXI4P9GtZg;{0Eb($%rE^g4`H|EzIU zXdQ3uR$o@G?~_j+tBc7|q}BDgJ6w7k){S=tMEJT(b9+c{=RcAHREK|0VAPE z1E4xZpJ7_M^MM~E#K`#zkwrNonpl{6gZgXQ(!v^Vm-0Y}xOls^%I*4NCSkXq|BH7d z+i`KNCXZQq!PQ;F?Gk6>#Q9R2^^6Wb+9XM8(Iw}yLQ8*=pP`V(d6xnte>)oremATx zA0a+M0xUj|OHSnji)IuPHI(2N)of!mH+Uv>$Hgdd%P>EmzaWQZSXEshNHl1p9lz9| z$q3Ve<==h7pjOzAmVn8gCk2tJfTtYol!4XU!tTa_Y%;?UUgW52IN)vs z9{d~+B04A!OJFHa=2R%#WrVKxD3pUbh%`~&H;aJe2LM`FtP0(SlEc~bpHy&|)k@YJ z-i%w~9%1Xl6o$3c_*wV-h(otX@SiSAPI^GZg_oTyR!Ax@i%oeDDNQ(lV;v$36MchHL1KzfWy`OEE}2B?fw<<<9<;zid3J%C%y ztEjubB*~JOnX0|c`J0&f7N-wc@pebL4oD9dbN``HTE32g8JIGx)OhYqotI-fl6v>sP zs4+{QO(2=p1#w^^;gO+n_7yH;#DRpFgoTobgZeI*pbF&ZI$t9eHD&!n0nkE&RhSAI(;go(K1*r>9ekg??4;TU(G!}$E0QR$628|L3p*rQjdRa-; z%>5&r$2$Dg?90#y3VuW^fM!J3i(`0wX_F6_WP@4MlWNf@?|n7JIaN$kqO#b$_yHTE zXc1RtKm`vCQIzmWPMBCYA{N>=fT&CWF^DL>E}T$*2H>lR>tf6n8i-6jZEHRF`SVw< ztKMw$Ww!4b7FTWa?vA<41oo7bj*3rJ8h1icVG4b=p}lu9vXYQlz3>U9iJjBb>$vQ6 z{%K{;dNd>+o;K$+!wARu!$O7h^JDsnBPuJAb-sF`d+>8x6udPD4Hqns`b*vN+cD^_ z8&(l9TygnXX0@GdtG0@=uuDF@K23J!)B>z~r{^4%F|xJ)!2^0Fz~+iX)60vi)9nxEB!S9_%cCc3IjT7H zfyGa6*zAj>=to%Z4ccba*TQ-!TcSX<@u9vzpTFkp{1>_nt929GKiq)I*R)I57bBp$ zkVz8_h?8(gI>Qk-I!JOt{*2re+KQT(L1x+Y1?y6NK?%6*+f&aQ~Jt+usb;}llcT~0uM6s~^c)EEqolz|S2O?~9q)xIGl&lXp z;zx+#c%qQQc6XCauv=RSG0+{f>;H}nItiw+Y#?HZ%Pah}sx((`dl5>-v9ukC-O%&j zIta--zrXKOZv^u>`E~?ciftsHEG@sG+1}1(xG1~7;6WqHt80#p+851Br&~}aXSF&z z9~UCR>hx?uz$4)SK8w?p>KIhS z?Vb{nQ}4>{%Oy^mQ+Egr7T$p(_KxXzD7;%G3h85m=OkV&H!P{hUP(wTnvs;=c%pO| zv|A{fwM<2I>sZzFVv}Ok?@v2yE*dT#RL@6RL(=*_nyR*Ws0*oh^uFZ_o2S?PkUO4V z7}t9CSHZBIc}5qL)gP1HhDuH;deXlrnbLkc;xKV)V?D5Dcl+)S3t+MeUVf5Y=)TCFyQA zflUVl$rlF_*bHzJ?eh}U#;U1yHL~e=OGVolwLVe^@L5x}^b+!oRO(Egk9^+9!0>e1 z`ByYWFQ7lXuiJJH*G_Jkt4hL3jHC#xyE{4d?lr+h7KN@cp8K(UBDCFKu7uH+7YWzP zYC;4gKCqJ~y2Z;*+W$~evS7k;)#^z~tE36p&PVk2^tAa%oK-O0HU4rA*1gKjx%~dP zc8aS`TU5|Fl@sqsbuBF1nkQ#SnGDfAC?RWk@{*axy&zHHML{>LSkM}iZm{NUZ;fZ1 zc$P$&yiEQ2zPA~J7la^Bi6%bp5uWPq`r-MMn-Hz)N;P)KJ`=wJ@^FAx(-`>IxT>Sn z;P;LgLM9k>UsWbg&GN-LaJv=5mP`bsCX+eKI#9pJrF?u z{c2BqQklo)Cb+>Hk2|Y~0y?HQN-4%r`si~ftnTItbllheAJ-GPSN>v8nw9Gxa=-ggS!Dr&F2_m3vY$-T|G$;q^N z$+3m)Hk2+E_4qAY@PR`i3qY=tlE7z4#f$Y)mEw|#0eO-y(GjXlnGm_U3LVVU)SmZ^ zP0D&Zfk8D>2|n60q^|Kn_)pAV^tPgQ&qt-xrGU2NxcI2G&bt}PK4fWc+N&|^kCc0) zp__9hczNOeq4CA%hx&;%K*#yl?b_)n(aZ+?&E3Uv^_8=K@Oj$(MCJ)H9E8DFQ>2Zs zj8@yVf!%z|_(D)OOS7W>s@_|sb8v3n%3}qIvB|1(S~5n~0V>uCWX{4bW+D#(P=5dN zK>-J0IwsacMpwB^Dg7`T5|IvS?o=OvV5%^EX5RY9HIJBe_{x z1{O;l6~143QE0WNSf`dLl-$;WZ50EP~-w{n#z9;sU_9D za&5QxlaxV$o#f{{vLrN3@jc}h_xR%Y#3(CW(;(yMiEV5z0vIeaA$nvlfhX)H8yjVjP7Df8v&6p(UdgzywCZWRV1kx zO;~8cy<)VL4Z#y{t<>AgYYLeMVAb_}Hio(pyz9VF`2g|;hCsH1~Eo-iM(=Suc zg)`r5bfj$JiO%Zy!CQ1$SqKSK8eoWWZopZ(Fcy&$z43th8YjPgcE&=Kp{x_F^LT~U zK0B&tD=FMo4!)-Rn7y0&n}aa5zc)uINnN%)#OwN0wy8Neb8Ee+Sg;;~p^@2WFt!&y zw_+R{E5Eg8ZITn|q#;dLumiN0=eLnorFJWLaz;35E zl)dHR;S>twIy{fmY%5(fiAaCqhquN9hG)#kDzxi7W*48O_MIzkdwXTeKG^QwHSk1+ z;+?mH-&$V(u3RirCIU9BWFq}+e6qbDFU^eqKTw;dFHtK+mtlLe$ z90Dp)Hz9KS`q)}zFJT}>_isilla!w*Yyl=%f;uKxz8XEPt*wS@U0E>+^QFF=KIEPf z!o+B*l4f)`rYTSA#nh}NJnU_vVZLqTQRN2V2MNDOT1v=FZOA0Dj3KimtFs7x++5ssOF$^bKN?UmJ*d6i=7 z4z02|Mo>H|FyNTtMp~X<5JpP2E`q53bf5&iMuRww5Si!NDPHK-XWEe6@^+&yT|cH0 zlPIdoi)FtOfUP^RgdO89+Ut)@z;+zCS;{ z9$CY^uxPiR1S=wVXR5jb9YvlSr!#riwKSe~G$ngr$-wjHFJuLLwLVzv%O9WDYmc7& z0h=G(WMX-VSZ#?nJWt9Rc6Ug>@iPtSza0_HmzS^e@E*-r^Uw9gf^7M1DT?FP7@tdf zwb@D~TzPRN+zX)4Bllp0Q4PIXlGQSDrv99~)S`t$`eYGBwa-)x`4}P3Z-LIbxH(1h zfKV0GKlL2j0G;0DU@?IOK&SauY3G?79{^ymv|2#F$t`m2xMR825o)I?k+RU~96d2X z^q&T$!dKw}WCVb3rW>A6K)yX>&TMIba;kgzkR6N&F8ntL-l#I>5P^gcp@N9UI{TP0 zOcXG|{!ugljYN_-*QXa2Ob7M(_NdK%q4K1rh8m(h-;fN{YV*lHrC=6JoZU3Al#nwp z%=L;c-X#n|QdB+p+f(?AA)MaDJ}L)O*s3yVS;ba!yHS(SuZm#{;-$fVX1!uR{mmnnH@QM8M>6&R;8Sgw5!mv7LMi=T-@B45CH^$x@lskqz78z z=jRp(zU(?zsqKQ7C;2Fim_`w%`;rDr=g(I3&VBQ2U=}u|bj~xeU#&V#GWC|Wg%8H5@ zAH_ubtft2lp;f2%-t)FXDlK1oTGPBTp-2%=ow8IhlsH?9aYh~QyF(4{0FqmJdzCga z@-4w2@WXOVBWgl}bGP+SePJ=vloQb^7%0FXe!@;6P{&XC07O6-I3K2r=_DoqfxgEu zOH3D&Yg#d(<2fBH%*Pdo^(U&{U`6apd|7_n39%a7gaYZTNyfAaHIrIif#9F8Zyg0E zmaOjjdwo^W3r9)iV=T(OCHOVB+I0r1=PfZhOJYW5T_6KS`prv}wkfzu)W2Q;7Q8{} zJEbr%nU!v67P#xFY7J;p63z&`@|LGdJ))};)qTA^K!*k;*p5j}n?#I_%u4@NxcM0UI5F>^*!hl1V9zld7~I?)L47`A*J=5lby4F!(Lf$(6=AK(Q@w0vmtg8 zrl<<6`}T|Ss-hjpm&}pUs{Q5$BX>UZ70nhF=uRrsOfZZdW!q6)ich+;*gHnkE{5_^ zSArJ@_rcf8klEWf7}K7Q#xD()q<#k-Gs3{5$(8#?cHX~dT4rC^*Z>DN%q5w{klI=0 ziV{_yJVyH!9h}csigUk8FIcWh6lqw_jogz=EMe{*tx2J0=JK z0;cm|sXj!w8bE=Nun*<0xOi@xWHzx7XICx|lLUoWfT?ZYH9fo^>2Q)P;Vac3uEpPg zGe$vG*9eABN`IieYr)gYaNq_aH;G0Z>NG=kDSOP-TK=`y?7!!$YFFvUsIMLClJ|Xv zi1$TFoQHp%VT@>R)2F-Vy{dZm-n!4O zG)(~>9sm?n!!Z~5qI@fH+-$G9W91%KDy&tZgOGBz<>FxhykC=&Ls{*-5Ix_PB#w?A zNxHAE2RVi!rF{9jtrte=|Mu6pCU%!T{VJgl`sjUppf0haCydufWzzL+&X~(1wN0z@ zP+vOVTbrJ<&Rv@tyRcH4|6|fqKH$NrR>Kgg5`W(t`0jxqDfI;c0E@~yzgaL(J~g=e z8~-TmAjtWKK{4Gb1spDGo?_KYX(+#*_ z6oS+$1N`mU_wVoW>3;Kpf9lZ8?iI7XP^af9E4*4S#%pcd!G(ueVqn28xaJ9)u!O6C ztC@deoBo7^z8sF%%vBZO&XebuL;B^V9%nEZ^RdGKCMc{XTH{A-@ngWrAf-177nf0` zL*|NXhjj^DIoKM3cXeLB+=yebiM&4EZ~w>uS_^zg5Q&Vr3^X>-I)wjn_j-Y-|__<)54obeiZN&@x?e>+*df9Bum zdD=81Qut@&{kHMEItu(5dEca1@0c%4x|@wBI+%uq(CQ?O6}BdP96^LSoy2qzUnGvA zk7N0UGcDkKcleKASbEdBNlt)xVQILr0dhKMy|gR7jaB$~h`O|}l&D?fsi2AeEr+~u zpoV(8_oCf6`}bK7>(yGtJ_eT&Mr75iAZzt$-V-rt)Q*+!!)bfK?+93OE1brHyzVPtO-YV}Lkv>H)rr>2o3QU8A=co0_?7F$goln# zlBp<+Jq;I!N9_=n@3$cdvpJo=3fzMO8aI@zV0KqQfdenD^Px&d6%{jy>hH^f&BKdX zS}`NT7G&C!pwK9r@8Jf2KO6tehU?VH_HwoR<)d+c9*c>HspFLLaDu?c^s$Q5ruK*a z$H@`86Ed!&sjk!(TodxuA0CJvYF`f+Z!aS6BhJ)`E|Blef?{W@LDx#+pQaP0T=}W- z#`fxH3z%Szn!k*sq|OQCLSn20Dj1ORZWPyux7aMos3<$aTWFsXB|9acSxT(6wiJ<= zNqRU$WUs!}-SdDTAeRESfWD>m{!R|z_SX-J^9Ub>1=yf6s1-fQ9?FxcC5*X)xKlTd zdk+__nWkURn`OliK}>jqW8R^^g_&|TJpWp)(m$Xt^i_m(x?3D1oKhTTRiGGRy7K}5 zR+;jVb@wdLIJ+EF;nUIUC2?!n3MCV)?a%n{r~#gvZt5&=y9z#niYmT9b86=oyKX}N zv*sd|Li?3Z$8Iw;2Al{dMr2xxE=e=%`b@f>LwNLb7Pi6ye(hI(wSQu# z&i8QnX1&@MFdmt4MDF8mPssCS=G)!`LC@T(@+aNvwT!w?uCQuvcsl@GEBxAZzgMS2oc|+?-}@26{#iYrQ46H!ySZPf|6-$vigX z#<^Cb;y{P%$+9q=-p9DJ zd}yjBih&J!BwK$eH9KL^B#x_dLQg0|uO}@y1J`|+fRx9-%k8N%i%m9{%49WofQe3XIH+g({*3o!MZZ8u z5y<~zo$>yV_of#)aW;FQ<*3xu<)!zKD@?#&m;ccNwg3{pTHGXHJDrdFhFOmpjt)z_Efk|XtwyDeYO`QaV61$RWKx>iBOrmuPK4BONscs@TWnM z4j9wXe^@7SrT_^E@R`NIl=RAnplC6?*(N+{P&2DJOn1S_WK&$MGjG6~ynnHqw@xhl zzjxYG#0mY6cv9(~MbyVvMot2z44nOuZ9GhI;x9TFfoXlIiNEXt-6Q$cJwt*;$lSwUG zeA(ZS1L5i`T^<>b!IyuwfifJ)SC#hEo5h!dWD= z5vtI$>*9&zYR&$|{S13t z3$bX|RpfaHtisg{Lv?tEzZ)6cK6MGsH_v2E%6rTgVZj67t`p36FG9&0aII>7!Z3){ zk&^?l_4jNJVUiCHwqze0Qe+y7Q5_M{#jW^h#S;NxP5ygMPUN>q{z1-9r#5cU+0@4Hj+qll=X8?Bs|HBh8k4l~XoJ7~EU~ z9f%ray8I23I^Ms|*D9@QsfWcXIk}C~!O?TG0{7_rhhF=>jEszc7YWJ8ckfRi-94Ew zz)ZqS1k%~lLvt)UxD=C(Q;Llwg|fG8?+F`C0wO0T2_BH3nS7A~G5|7@1JuYPde-J3 zR@2?|AGyrg{cFcYSMZUvQM^uSr@>;uT)-aRj!V zv8PZ7Snc_)Q-w683;p@%&o&2V_JqYhEI*?l5x;hsjpnTegOHIJP!JLR9sF6USQA@c zUV5x&XQYmDTMW&Q2-3Zuhi=>7;Rqqt_l1?UJQt z_FeOX!*74p`-xV4G-6R3>KZ(UwcDG@ZlaPypaNTefPgHi^ne?(FdvR9p3F=XfRw0- z-XL(b`((FwvgC}{zy4Q)mj%D)S+Db3CuccA9IMn#f9PQ(9AenO0S06uvXLXO^|mWj z8(e9LFcJ)(l$Rat|8R?0uvg;eoN#}k%f_GR59C31IF~>pSrd@ zs6bfoHqQ*1K<6)(k^b3XU49mxy&^;YA1y5#BV)00O2^A&AmAFQE&d&XQvh1fOehWY zWBRFHTx@mf_|4o>b%uYHtDD!{njPhZvL38B_RG#W<4R6U^coQbHKI6_qk15446=lX zu%uOZlj;3Im4JpO_%kqYGdKhp4;Kbu#*Rt`P(_h}NJ%&(V+9Q2+9|YYU?rr~FvOsv zth3CLxa=G#XjC&J63HpgMJt8}-^3pTVemF}oD@MENl2q3B|!xt(me|x`KjASr? z4U39{C#6dz^PR=!ydqAJS8{rokQM>7d?Is3b)p18BsyKc&QTcPL9 ziRDpt3fuZ+PN2F*Vj?YpVc?~v2+z6(f&ZbqIoX8cNXR0B;C-SWNg0YHG|-%iWyg} zjBn%Wd-opvV7vq~}c91^01(!cLixO*2Ue;DL2{jMRoNiHU$yoz9U(!t)bLsJ9K zfwSm#Jrj~Ues6Y4ZdH3`Okkc967XrX ze%cT2L>hjwZZuBF(?}+S?@=C(pE@=)tO*YTzoHTnv{0c#YDqvDO-T}BAP6y@`1;B$ zbg`fme<>dQ`4A_0V3D%Z|IA;6pB=D#>YnB}3fnWA3xk z&cN*3_yi~%Rff%kd$L~qw&o`D;_OjT6tOdl7QsadiTCX63m5uj6HxI){y z3+)pW7Ao@X-=*nLL5k9LUtnDll2vqmRIZn+)%Hd7+HN8N%?KBUa8|@|5?kck zxSeLAR6hhrx$EjYhK62Uw+%GpT>7n(fUT;9sDhZgU^S`}yd?(S?uHFJa~T?7ZDI;C zC~zvzUA4@o*&YTc*brJZE&LSQT9hZ;?+PyeKBp(WUyTfVOzWcNrCJ;;Nv<|eoUCq^ zOG=flzrRhC^%kU&ICH1Am1V6mQzzi+aM5yWS7^kY%(b2>m}4|vwJu|CcpW~s6Ki=@ zQ0@J5&sCbbUeQ=mn3|MjoIr6)X3_;$grBJ`2%|KBk^lf2wJ^|YAAUGZO0UDV zJ#(LdQxFvun%l|BBln$kwPva#m9)@S>pfG>X`Dtp40`}+#N}^wQdymUYi;ERZT;^b zwYOCDlV^NIXJ?IMLX7zNuU4?6!E?72o=biEx;uG^}&I^_t%eK$b=RccPC|L+U&FI zoJvGWO3H3$SocTLi*FH{IWkgspAotq4!3w;D^jUlhA>#cC&E4;N@!omi`!}pivC!i$&Xe`ET z{)Rwe6UFC3KQdGjFv439WFmoDf)S;)f5JM1Avr4P>A;GT1!n@L%grvLWad%#G2v%;bKGyr>MNoBy(!Id0?g|3XfHPzvf0_*n$v;Gw4&gL9~ z71|d=xsstFz`^aAakV&-)2a|D^{US2)3UU2<2VAB%b_^l&KHL~!D($fG&pdP^5BGd zJyUgdPIrgc`BEE_&#sFVNo0fOMoxcIR)wZIDqHQa^5;6#Q)s#QWl~feE;UTb2rTBJ zecoBnbp=QLu1<&9SZSYxV^l7SdFFV&u!+LL2TyxL*YjACx!dy;^f1if1TN1T(7g>o z3>Q3n^a}`@MB0RDLjU#{tc`QT3pY@j#bW$S1C2qJ0T|kw?4qm1QN@4p+JZoZ5rI$< zFiFdI;C~nz9!|hPqtyh9jlEC{1&7tGE{)mkeuwx(9nbml@xpkoG@G~LTYtOuaN$e;VZY8+XSmc@sbE$7 zIsL=Z&$~w?H*vSwu}i=tF-DEDKg~ESyAZ9ABf_#U+z zi;0OrN9!Q;>c65@!aL}oc4TvKz*P$gW8&X7jQ)hp*NH!Xp)6*>_d?wxI-fK(RR*Vfbwys{%p?1f2pgb1QLMR(E zgd7BP?P| zu(94`A@cQKUvOw3uX{&kWvu9FS8R~LZ|JJDst2}CFRrybTy zMGO8}K3*N&{wJgQ(SgcYR~NC%${%;DW%}@7d3$%Bkn`z59G^1(fzrxUg?5ucywT0+ z#K_j4i9#}iUBty+{6)i*F>Nxqd2cuHXpck)t1+m`=lM6!d)sqh@opQ>)O7|anLo~g zWl_e-5f>%Y{bzZUu^FTjLW{)?mh#6_dJk`-&6aJ7Le~QFe?*O38mQ+wIBk zp%U$5dbH9>;xk8zKOOzfWj(yoGbTVE*{%>h<&b7huhuk_V;sAvK9}siaWdBO@}rEeR$`m zm@630-d>3vR(q!hi$|>I$~6?%?a-v!_?hmXjQlH$87V0!C{9HuDl-cq;0f%4_Kmxq zZ-0u)Rl2I)OesYje!{7jVZr+(86*5&w*+Fn;tqVXTOD@_;Bt6&&Eab_zU;fDrj8q( zo^)NHarcp;gkr$@2@TW?$;!F>g!0lkN1>n~B63_GUBDgq^~(oDEwIkcm1O#j*>YdsIMSzuV~hZNQ|D-Y4J%r?8@0He zc)nzxM7>N*pkn{d2#ZLmcIWa5HJMR6kCTfsezLqtkVXmQ4EPMZetor0?>X4fPiR8) zed@jw3=N_3Jii|(H`epo+d#k~0jdLP^PVc)6g`My-e^SN_T+E`n+}L*P6rXEyDzJ% zXtJPoeyRpOj()#HHQwkMq5XOHb&{6B#d!u2u(C9Fkza%i3JQ`*Eg}FXTyW&Ee(kz3 z_jgwWiFqB27$Kj2-+vrQ?`qQDa(U~muNLO5G?Nd6LuTC8L83YcuYQLNqd(a~hjn}L zd8Y_eUPM@p^l-|$z9=o)Fd!n#(<>9Md)rHeL5E#f^&?im^02va68rJ^tTnkdL^zds zYj9#$9^g37WPacvF9#PS6p#xAL$MaFl(wk`F^c)Xg%W5IcHH*oo`g2>wjJ+~2?yqX*)i;)c zv8@O>t;SK}6)}XzOzpE=iunEdTN9CxI4Pg-W=gV0BxlRP$<44b-aKQWUA+h~B}~ti zUSkj{3UsS=O6;V?a5eWaqk={?1py{!4{ zd>@@yVsst7dL7|T5-73|DFv5YsttLjEoCsrLwn&=rNRK+sL$e66vzRONz( z)^>1L-NGWQ!)O0(n|$?|>*?@S_aw~%oyrp#>Gf$TiQe;X7Xkl_wq3LWx9eK4cQwVq zzF(W?;>L;d zL4wtz`2Sh}f);0onHn^PX=dfv=;BeCL<6%I_@K_)lT--u| zeNK%ccyTRlurt$OCjbx$O9%us;UK;Z34o(&%iu}6gi9}gszpbu2P1~>S;{>m$waXF zad(x_(Y^(r%MI}*H-m?)qcs5qCKs5(=*5^c;|<-z_=E>josavC4V?^U(h`eB8ekQm z`d%9hmAEGZ9}S0)i~W5pjEWbFy0Jr@9*&w!lKLlF-zv8p6$K;v!(*z~a6Hqt2(n=Z zGBh3CI4BqIv+7&?8y^fD+_Tn7tSR~|A#ZmiYz3swz?C$GNi$>PFH%|yXCbfYuoCCpSP!qlfx4Y;wlM_iv` zs}MlX!!R;-=0vA{Q#rWG2B%~SP>>n!w?dj9Obm28UZ@}zXcjGa2`Mk=AlXKcK(%T` zjs~5lHZ~HCl;04=e-W~S?=wKA(}ac^v)GEq<_GNJwz-1itQ ziiWA)f!6lpc8mI99u5ks<9GG2qwqpck=2RfqP)S3bTg95-#a4u+oc6FOMOkY#m!#^ z5c%FB*?f5$ z+nTI}&U!nRvuaE%aA`k4o&$+jNK*2`psvtBnkYzot8DDjf6$nRnOEiLpQWPPV z8YIg3!4iJ4rc}iGgWoDXyQFH!V2I?|GRB>n6*3ku>j8mR9wc&9>ZrMYQeuI%*5o2J z8oU<{f&W`tKn*V2-6emL20+oLJ0{~JfhalH(8xVX+cM)As87C|xb2e^crjv~U4OaQ zT+NL&Lc=PtiB1;BHl>SF>{8_7)CyGTIOM#&P9kXf?!Xq6M$Z#@RtMLL3gYn_?vq3( zC(UhKVWSK&CPz@Zyvpf-CZJ1Guqh9@wVc z5-vLs$B)`X%>;_1t8wk#1(H*&*B_^9&^`=!_@A0F#iEy_0aP*QEhW+SiX?GVF}-4! zr#)&d;kVPBoK*1 z5G*d6S!QNw`OEMZ;*SH2PiSbpnakl%;USqY?2$NF&H@extmGzAB9^(9UX=axyh^k| z3MF%pkV-+z^~O7dDIqtB>c;z>u5KkQv1I7f6o*IGpDHu=Tfbk>A)v+5HpX7hm-{u! zVHZ~_qGP8{d=~j)(qfk>87{?DG?Jld&sNuwi3WlNdO$Nc8f-#y!uawT|#pAL*p+9`VlE z0YUe@+3@sHfw`MX&}ZpP=yA_;-_j~SD`t^-Rf6`M)N*!U2>3Ilx%yjyblhjZb5=Xo zaka2a%WR#^`$cwWqxjFnO{98#A=}NslKt=4A4_~JJ-X^mifTzhWh&`<_VAo?baRAC zlNiPi)#e50S?Vh_k2pZQzIXd`%d!ivcoOZ7p_!SIoUarW(7?!@<3j}o(8SjpyDX$qTHtVT2=I<) z_l`rYYXu7xT-?Oj8XWi?LuZKv8$OETPPnHsB(?Rg>(oU|f_qi$+3xHxv$Zr5D;8gD8v&LQ@!@J} z1t0HzKCv*}&X@VGQ(=jW>CB2p8~x{Ml;bzPRfCm&J2o(eC-Rx#FcJDmlGJDmMs$m3 zO7}PI!r`>f)lF%s!s!MjXg1#rfY1NMB@72e3Z1FL0x7`mdR`KTZ#9`A0|f#F0STh0 zLBQ7cOF;!xGZXI+H<;Um?pLZsudm-}8_2)D%Eor(2%koV;tn=~hI_&;{tWoPAihN~ z34i6RaPd=29bpx2_xx8EjS>)U4--f5yxdfpaA!hn6j^kjrB`*$cPk%JiQ{NGIs0 z=C73EQh&p{EVWLh?`;RUQ zTVx&h4_{BeFGq$u_|#0zu8Ttlm=HbHNrt7(PP+Z%sn!@YvC^I(-^;xbrr^moE(gnQ zL%~0N{Wqgk>)O&eU5Ve<8o`@1^?=@3o<{%i+jQp&T*<_wm40`GX=wv`&R+en-CG;6 zVz0uK42{-rFM{zGBbv$_Yg;qhBlW1px&P_=zC7~qQy=bsbDi%2cbcPe030Ve;`Iv6gc#-zI#i}0!*k-Ydto5uih%YIlE#68Ymj* zn&wmvd1<}m`w%Lb2X!A7G!OtfzJh~v(g6YIbbv}&B_1BDyOYL#=RHNSaO;bCZma2_ zEKxz}gX9%-u8lT7uK#)OO)408-vSagJ$BZTL&tMp<#oMt7EdXgpm(O34VXFV$KgDR zzYcolt(moJ40i3^e#uJEdS4x9Wj!Y5&i#S2++Qu_C{|4v^IrEIEFW05)Vg_lHcN(0 zb*F6mo+IqK**X?S52~DZWvr$#J?kozd+)FezFqbDcTpgi%Rp|uF~NNPxacTe925IX zQf`fT;)LI1xA+F!nH(pG7KygZXFfkW&6QE$VUecnM*zXF(P=h*!yEAJGW+ zY4)1={_K2_&s@b?|G)KI1>HK2;7sFcJAV_?t5bh|^NL3DCq>Yev-2oB4qzg3^5^k% zBI=-6dxYg9sPo3i$a!bvF;%$n{+?*Qzzlzr3Zlvp6SDskk}DVGkwo8QL*MJ$jqw6q z1v(QvDcF4H_I}|bz}r^uOgZ|$pos!uRF#jjfCLSd2A@xr0bE8MpcTZ15Cp-s@mWDC zKqEl2tFeZBrs%m6_a>QKOu1cAt)(~;n-S^!j0oqQU|mVj$CVm<zM)qFhKoFL)DjYL|Sjx_vmkO zzo%@JBDN}=a}MuN%1RV<7qS)U*lOp&TX$@1ZYmn~BmX{a^`T~6R3fm8YQ70V-NYPr zLTCc(esKgBE2K2f0)?=g)VY3bx32p<-RFp{+L43LuQWM(5#@d<_V%v^5KbSG#Vt~O z3-crWTVYny@4N0MfCt!?BA`LaL@6p<5HM(Bc0AOOR&(UQ0SN)X8DXT+fLF@AJUG2uzT{K)kBZ2a@&X}W94hP*QFqbrxr zHYw^bK>=@$x(6TF;PJSMLF5LuN|*F^_2H0hwU+IOvTcqZ2S!*fTsRlIy4}<})=Qv; zo?3@rbLO>OV^%(Db5t;ZSD!mA#k~s-v2esu1{P}y1>$If6#S;jj(pDa|aV9*(2=l$oOl0t)o$~9caAr*0^?W8`4ul| zx~`mY$Cmk;MgTyhRB8M0q?9F54Aa#@zEnc=?K{Q-mzPjCd ziH!u#U|Ut|Cu=2cZY&1stTqKORp}M86}8YXk^6r%w&XH~96S&&RY=O)SEKHg4d^pF z8gSRDACBe}L-WAH!)Kuaxg|P-kLJol;|c57Jk@4N zqt^vw%ynvy<7v{@*FDLtSiS6=HdhFGP}Y-Q;YKqjK#BQ146Sc? z@vd8js-C_1pN@KrwmvVPIE0x1LET!sm)%!2jC*8S-~E z+*Vla6Hk$p)l(4Du17YiaiI5AM7mxpNk>VAZewB3fPl7CO!nN0@RFfEG1T(j-;`~% zllv^_FOLgmy6nU+|0Xb5Pn~`ESIgS1*yYx1i*oT6=7$LZ3=YE%lk2Sy+e&+S^NO{78Hk_Wl*z8f3uu z2Qvd_@w+MZNi{V@(6>JBl7xn|^4AXKmf-z#V_6^w9k(@gf20I&|EV;>r<^VV!$GzU z3UDyUsY;{Cbr5XuW>x?_`}+#@JIc3w3T;~zjI?57@W3+r!TscE&dfSj5tHD1j))%e zv6N(H+C=g5O~u9fQ2ulf6Er0tq=U4l@kisJvbLy+U_ICT1;3lkZkOS3d!?Ryuq{u? zMfbhq3W}RRASKP8x4OoD*UTo(tL;WTBGR`VB1nsLkvW57YZEH+V(DQgvBlH+_@L{0 zv}`x^OR6hd)eP3?}FGr8%jIraVv)gDAs7TQ1|03qygQI?dEl7I8NW|psMF(8&Xt|+N>_x*!ME;S29Ib(K zt$6!3YcpF?sD~gdT9+p!)jTH;_)Wp}CG;}1-2kCFUaD3Jh6)=ETImAeDatozCY$Y` z#Nu7P&3%H;H*w83cMy(vFN*R!?)0lwST+{@6YtjVdRb1t1-xBnE|KRix;}iQG@Tfi zzD`vUWM)@X$kANPmV5lT^A}B|^ckFSze0^ZJvE733m1O3vRu|vPDTYV0$+MZZ@zzJ zL6P1w`-HnnXJRX}b$n6=nCygOCm6YWpFys!);2L+3)s-`7;k>V8{w6vm{>jQCpt40 z_QnOeI^Evd$Ke5yP!`Icx(42|@_TAcl!yh53$k-$za?OwPn9Pjg9K%drX%NR0*Hb8 zV!f`?wwJW}^P}yb^``wLt{t#_>W^9r#AS z*1NzOSIIr4a7}i{#7{S*lahXSYLeh|B${nL_24exlo%4wHBIeYM^Mh-KqDF1#2j zXXT;Dy7^kVYpGqdk>DL(j)QvR1Q8KkUy#AhKLpL!rDv`OcQ(s-08Q-24=yvE)qCQz z71%I^e{;H|rRp~~kn3{HK3BTBhfbwJvz^p&Wr#<#vaPMI&-Wiax-qOnAoviIJstLO zZ)*-xp{^ae!D#)fTEX)r~IG5s*St_J6JIh5>EE%0C{k_2@;MF`u>Hd zg?YYkYntDWwBID)xBRczx@3B7@hQtap}-oB9)Ulyt-hx~9xOLcz;FGmRMzI57`Hqr zLzpmBQhlk1;xocs!qD2cV?&GPJv_!@6hwLp&V?|r4&BfBU2mB&3{_te2>?3iGo{Dn z$ENpJz!HNv!6ZJmqC;}}Tra#$_F7*sUe)hfZ@kvxc<`gUaMMRy zbQT~fJvtKfB@N3pdV2wDy}~%*A6-5+EYOLuJxPHJ`%Qb(W%%tJ*IPdJVVIUy%7X)= z>4`#?>-jbgch?(NXLoHobBu7*F>V~8&3516c^o?w&D!gMt5*M)#cdiM(Tvv@W_>2( zmHXGVy1Fv3giJzG4ny=xGdf5H06`)F7-;z`IDS9Aa%=;-=j1e7=8!Q#BqWd}Ucse7 z#i-%{T1+#S80GF4F;rBduXj&dpE(Q8U4CBRpRj#uoI%4w&F%S@mtgMCwV`O(o%(Vz z1YTGLY!ZEy(JoDP9e#76a35W2m$hp7u7!9hQ`ZFC?3%k*iy?Ja$Vp`}gDi`7>q6t$Bkqd*lgIh+zvxcFs}8uWq{W-|V(t4r94 zX~!F2x$fgHts+n!U*T=~SoeJIFUEIGFkook&+5uq9K`048E$*~XpAs&lX?91zX zOY|3FW~0js;c;p~rXq#}FL06}=pcG{X#I7&Q)pU$bXJh?Y~2XdoXNTAed&^;%DP>Z zgBs7!S+!$fqDyVpD@qwX$H2nCivtofvS$C*k3o~i@5XPowSq}}cb>2uRxmr?#Zl-G zxqY^>_{H--?FkH)S8@Fht02xRxz-5QuWY=N*Op;0!EEHX(=3*FX1vAQIHF2rys+pZ z**z`-)UzfL+%%$7C()6~_`^Y}POzAkDLNvMy(EXcFw#gNJ>%9eQej_c$gKBVih6%J zdyLXEK&B2UBr0rauqgylivU3|a?^iZGP4i8aTU>rf>^_O!o}cyl8MX)TpAP(qyQ)t zg2B(`rl_Or$FGJQ1_4!KWzg}TYa#%uAe+9isLb79^u)2jvL5>_UZaDbY+xjqu2Hsh zaZJ-C1gG!1hnx&f3<3~C5~YHKMnx^8koeR?rjZVF{Sw7DmW0n#vR1G66C_QtmGg0H zysF!4hq1AWcGk1;_uE8{@VfuiSz9}iQ*_tcEc(5je6)R+eZN41kDjll&2Ri1EfxQQ z=_NP(G7qgTUauk_UNx)AYm}wC+?4VQqExYkE=jN-!|y;vE)r-|q$47_bl#zxv9SWg z#d9$vvPk>oZaINHlCX}owfyH)6(Dq`k~|_b}S0WB4S59y$H#!&4 zy*=Kwt=w#5#9KbaBx!l@urx4>+j_DqznBR*S58dU9$Ov^(oq(`{cHaphuXC_sKoj0 z!Reqfpeg6fz04@Uz3kvhLuPitv1wk%$=#i+?g4wDn?Q6-?RyL2tAW8ZgP-qTCwdfv z>h$BYI|)S3X-)N9)zOPVUm0G>53c=8`0h81dWN%vvgxMlYgph%;d{pO>1}SO(MpRD zE4S6!Sf0mZPPJzF#%k?;QBs29^W7oL8(hTGlkoe?*yy%l7Y_eb+$28OllXZg2C<#f z!O{eL&Z@89$4lKVzL;l%^=a$cW)B9Pd^A7Dd@ttuLhNvH$K;qc=E*^Q?Fi(tK_e`bH3H#DJ=alylyV z?fT)TLb#k4&voxfA=|&nwf5C29zQzd;hv-iPXi+XvdUU&HH2YA_C+Z|**uCf%17zsO-V1LIRO#j#GukuKRLJeB7vU_4J1GW#iT6t-na}n+nbu zE`R>RvwtZ&BjwXK-hhQ(S1`!=!dYkcyIj_>E0fPGFy$YndRpi1bK})l=cQOXWqR(z z;!b{}DJCGOtYFfvE=Eg9`@XmpKZ5(lzg$tV6HljWjMK2~UR7H&Q+^IEm-6p%@2X}A z^Bh4J*CKb8CMV^&@!I^4yXmRZr};zfIyQV?CQehYRS&y7?>$mVpt`lJDc1F5c9EAN^O0!=6Z`vDtxcBLg`s}G?vbVLQ% zS%?`_|DpSKa-?pYkj2sc(bFyf^3pUSt3%Mj=lcX$ChgZ(=Zs@@7ka;)a7!P4 zu`%*>j%ZBAR}?ue`}gn>7_a+PS*=dde^|Ro`x0&r3kbI{LqY^ed(xVri3J6^oBj+v zu-M)z_F}aL1JD2oAS)rNL3RmREc=%+_^i+(A3kMmS^0s}gvCASUJ`K}LbCKYLcSLS z>F_Bin=UW~Iw?wt0Q?iEs^VD ztJNEB+Hm|g6H7NFxWj8jBEWkSM{MLZ^l$+AhK{i?TXFh#4dtr&lh|#gKaIt{OR}_+ zwk}^Qn}%$3DEkp+x_t7tTOhwcu3C&xkg@wjAVQgr;cy=8i;o|!TMmha1YD(F?_=jh zOO7uTSe%SGx+RYTTYg%SG5F%420fI=D4uZ#Td(Quuv`wO(MzT3B3s?FR_o(>KckwH zZ>D$=>)hI?wcgaM8Ge&&?^X)lU-x-NAP?g}_27p_#@TO1_u-J6o8SN<0KxR(KR3Ee z17!B<#T!J;f_|#$eTDM{Q(gWWx;Un&iMp(PP+6=-P@NGElpGYiVg$XXAw= z$%a3w=Qv=<9Zizc{;C(RVbyo6>!meh+zcP0CT0mJJr zRVJ(S^?*e6{5KRO0R=bd!|Usppfv{;yms78!Bml;yoS zxp}Ji;N6d`*W(Z}v*!9=M5ez(mtDF#t#>P5eWdyz*%c-W+gdn(W0R~Zy|pK1j#b0e z#8=Ydof{q9ja4GE<{QM(Rw!{lhpt9?s>uTn;NQYp*V8mTl$cRWWlT;(`_Pn4r4wYw z+D6{{Qe?Li_Er_7>{^2o_hP`_FWu-29FM(|O-Mk;R(gR*4Ct9~8Iywrac)c5S|(t3 zjWGW$`o84{ZLRjf6xhuj>XO(BC$;a20}nuUStdbVm2vI4$L+i)lCQ!&5qGEz4MRbo zlD;ds2mTLDZxt0+*L00G78*jJae{=#C4t~BjazUF?(Xgy+@0X=?ykWJ?(Xiv{q*yG z=U-!A^%#5f#a?Sw)vTIx?x?tGyLulTzvON!oS?yKTQ6s6r@2?n9kf<0@ilJ9CSxXv zZHRxP_YyS%dVAiReeR{xR61MJ(rDu>EBE<`SA=zJ;P7$hUs#x>SJ1}sva$NgzI4O= z)3uu#>Bxglm*>MvvKO}^9!z)t-}dBaZAt2Bm~K@sE012|MOsbnJg>>802aW*iTc0x z%W0mgs&O66jM@z97v(P7le;ZxFXsY7?UApV?)27r$#OL}*A4BtV(i4|1hXTg1Bx^g zjEpBX`FHpC-mMiBY}e0+4?<>3fXT7ZxqWQ{oBRkgdTig+WLMAV74Pjfn2f;&WGtT- z4%$0S6+S@y)i-Rg|62X)D~|~Oi5~$PAJH-j2|1{ib8Fm4LuFr?v9jU4^ur*p{ z@LQNERc0_*Q(b45D-j@J@B0v$`opUeFXw2Vr|rLg9=zNk$$|V;l$GuJ&xJ#hme%rr zJibcIB11WMdEdJ_O;-wSP>He5hb;k|BGE`E4@ZYJ_#$K*n{%M=R0nj-2RvQQPAk#4 z?+i!7jCx6|t`(9EhPwv2)gkEWc%01}>^hW0G(Cu@g!aQ8b+jiW$}~Du^Sy>*X(c$ z2Kk4!XcXP~k1L1bp^mzhLm<@;JmL`1(I3XlB2$Ee4un-*ndVFc=r+fM^O6c7{mSsMF?bvfQ3+`!+hCM4~e#RcIc~7EZCR` z8yxQSa0@WMCfE6+>w)!y*a0x`zUT>VIPSpW2Evx@*@~y%Vz03Zx-WN^4GV}1j3lFK zG}lgcezj^ew8|Dts)%8suLx&=@0J>lB?E zOUFWeo@3{^;AduHDIP2j$tY?YNCJydNCXrJYDJw8{osG^8xx^4MMx=KB{L0xPn9v=L|+0fpw%>ztb+sNW# z7j_BldL<$PHiS1`IQLZyeJ93xw=EEGPU;3V5J!&e)qfXF+uJ=HN%qQ8JmI?8)jsHp zJMiOvrZ)^^R&z65YK^j1|4!9ERsT)2T9cyld_hf{vbEYLd!jPo2f+2UZ%;ON+x^*X zKkY9S({=eL@47jvRx7)Eqswmkvum6l2p$!@8WvgF5LyPjqObiA-y>G2NzJ6#eRvyII zhLROSP1p6I^OHy|OJ`{hP#yo=tdqHelrPag9`+92jmHP>O1Zdb5)@mVx$Un9;F$xCbwBt{;7aGw+LBlTq(=gh3><{kT9XvMXKdTXOdwv<;)~q#g*D`rtxL=Yu{y* z5rvHZirq>uUxtScqm1fZEj&-8hyJ_0cE1=C%K%=w$8f{I`F1O`qr+VCfM85N+w$ll z<9z3?e%ySNIrC3$=NFNN>Gw-*tjFr}ft|zy`TD^W5O!+XZD#Sx!vjItzH$LWAolym zl3$1cru}7A02DYTgmxZ}-5Li88@ea$O8GPZOl@T!3E+n>coYu1Mg#L5Cfj)mV)r@l zdD~o-b|^IH(M#ifd?{|S}HM82`?1$%Jk)?BLJ5G6!DPCD?xLb!#*NL}MLHXLw z)qLjft&DS|wIWaJGsXe-qobI>@{7 zx25Hr*w@H_TQi@&ejQi6ru@$-g|#&0Qy;fdK>J#~4}B`5k$Au#lZN+IE!A==0Kh5g zHyLTTAt42QEux!ZwgMEM|nrRFwa-ftT_-`A+=zgNXw0xf4k zw3nAXPqVCM+S8hhtWzkj?{*#K!-a+M2zJ?<3|f>21f;DNq@5K<(4vVqb!yX73fXOB zWpEqBg@>BK?|~UYBaLFf#>k^T#AT_)A9> z(jF9189jv-@CX8A*uP0ojya+Zf@Q2N0-uW4KwB3vA!PgugF_wQ(tyraW4tpW|^OKD;Y5B+P`rk!k<;cMO?ocLEqjt>0c52%Xm)UBO-6d)=!^li3 za8Gh95em7fMS917PnwSfVXQimPliImWY?2o;s^{fw7@OIGB zI%&PDJW-dgkxw9{?zmsq0LEml?k3S3w0H4S2E?oeki*lA-~SPdPhmD0?-(BIxc+p$v6LDr&xsOBc zfdoiF7%L<+olNfAr+NYVURF0dMQ)efOZEeUxf+th79L)=fit+TmI=bgDh1DoO`Sfr zC(){>9ebm!iD;e7%2| zrA*3z&FOul_l{20PY3AKZdRTPODjzGyP}uXrxVfN&gKGS7YMzb1fAjzOMkfus4oBTazcx7$PB+{xsh53%TvJcP^Uh?QrbLC5MelV~J0GAS zmRj0Q^`1PLT-=dO$K}n23xJd0pDx}qMDr<8Gdi1J?g%srz3tU5LV{mhA1UBEwW>FW zTl(~b)$KPcum1S_e6ZLbevXonEYhBT4%^2LL`f$1W~-ej&OoFZ(^B^KP^_}pY8HrW zuTqQ;i&mFqRS9jK+J)!w?ZBn}x1Y%x z>|=?DcN2@XH9p}gRdkx5$=hCC>g>Qjg-2+?fV4yr$7|a(%NlQI{=L142ToA zg$70r5GDZd1I5ZEDog@^sioxsp9)FrhIC=YNk70n%6Ar%tZ~2WbY6wDRz70)3m75- z9)7j5)6$nCQ0Tj3YZG3|K3;Patz7-NDWHrM>hI5N4#H=K0RjZwQdx6Eiu&(>&NOQt>?oRm+lzzcqZwr*h-(>{PuUC^9>{KTXU~r}8NB zY^$W!%!{&Qe{p$?X!|g4xU053C>`4$5~Tf@Ow%9}+g50?%rACOUxdf%498|J+1Zp5CP zbp3swWfwb9Q?hT+8c%JHMxEWcSf4@jEeZz~#%wa9YMuG$ye5{QT_&UJsc4UYjG`i@ zCqqMoWAi1?B#jb_n*VVXqpWW;FDr>HB>L?+l1u_SbDoOt0XronBeROnXp`?qrI@7v? zwBFB+Bvq^nC3R8NU^#AiJ~FE<&P{>bi|Ft#-m6p3;8>1 zhs;R^+NVH9RbGDMQz3ru`+xpA-tT3Me$!hOGyI)zh48XYU6HuRxnRwif@dgZ?$d7n zs8i@IiuAbgDW1xSw;*nrWH{15pX&bDL2+XJgTaI*Ul2wuJG+ULap5L;bTr=MdEW`S zK=CDIREX&-ZVedk8wvv2?!npE_q1Ry~;(EUHmu`nRX`Gxr_5#|ca#1QGr@Qse4Ntypp(LVeMBV6L! zdhUe5YuT)v;!iG)VPYz`q^tBDl~IijDL&#GKVmHARBN>W2&h2Rx4m6mQcMwoTtF)_ zE=)wy<-7MF@taYnUKV8gFY=Ex8F^T4iK29OwG39EZ9f5IBxK~1=$<__`1g#;b_QCErjM#ySuyv?sys)?)4=O_mQ})A8DMyj;Q+pC5)r9!V`4t43 z!jucz6&QTGG8ub6o5T_pz|CHLytq;N$N<^oYwn=Jry6{s>_%7(J)kNpCy@S~0 z8I;8A%m20&vCx^J+Hz}LmjK_Q&U@21`rP$2_TBZ^E~2ih^IwZK{(Tm{N>s>9vUTTq z2^tbG1Xwix1v!@`Td`uMeq7>3O0eE!BtWmqo42UfIsS@@|Ai52KC8{jGEl2kk21<5 zzj_j_`>UFYUhgJ;Lv?kD4!&D@BoD3v%h+d(f)Ovp=6`y(MN z|5{5K%}~vMyK;a&`2O*`JRxw|SSXn7CuI7|&X(vLaTEtqEMs;zD%qb9iW`kJESk|g z1%W5D?3CT@f0-P-wNp_YxNzIV|8Bmg(&BG6D&z9`%VCqUTIT|nb+JX<)v*^Y+hq8q z#pbfw)urE@L@>lNx8LxjFMmEXwvO~`{O&cyqi02YTn;kXNO?t?&C6~V!qz0*ui#FO z{nGG@qlTKA5>B8vFd6$ZryMeD4m`RqIQYRe*g9!QCORkh2hRCSTbj5pDF7g_Nro^ zg3%68PUp@$J&J_gcvgkwv3$~5Pi9IsJ~+-&K%J?^xV~Jv$8iEpwK_0h;N4D=*52KA zUOQSy_=_-RN=iyfG0AaZFd%`nBUh{CReU~FB&fn2kbLVb_7UQsgZJz6`3jA?Fi9+t z)76}_jq^6{aT+A7NnIKF_PwXkgZCqZss&74N_3i}RGT;II77PKp%}IB?5n9W`~^?j z;Gn73)IK`Sm=&&Z-fR)NQ--EHD@LPbrVp%7N?cY#1NrA>tXb_}hObo1!)S#{;|00O zrI|VzOU6&kaBv@)3{Gk+|Ln%$J(Bg_l>_HTX?|%~^Qq^NkBKU$V*%Jqa2#dJ7HP%H zWcRco-G;6xpR=kRr$6Zp_7*DMjIhmw_8X06 zV+3zszSJX}IDc^3i71c73FvzsFJAQ?sB2=)dcYGy6_-iEh8yNcT{|zN61%ZHu%{*& zdvNRDCkmFSz!Iwer(AhRuoR&j)^=} zb+}ORgkeKKkmleI8UB@_(NV953wW^_E=+ljX1i;x6rZb?}H_<5bu%XW0WN&5AsMQ6XW z!F8(*vnp$`;d!YOyMvwkOueca@FsiFXl~^OZzUJ}QpXQ@~#U)paae!7- zCTxLh*aIbBzK*u8hqndO+!@?N@ZbYacG@NwJ_dt)RhPaf`$S-Bi*ahUhk^PSHV^#d z1oUO*J~RkZy&ZDT4xq-^VllOc&DDcN@Z-Xa!cGO`j;4@i$A6Wm8xN>ghC$)odvmuqvSuWX`URFlb?O>UCjy(zT zP2Mf7l)p@@xlHVLFI{j{8crJd(AAkTtV9dhLz;;~4ff~%mT|F`wp*O_toqA|@TS_f zyV#~_@KwpL-o?x-uj^;Dy}BZB{u>KyqeVMO#JQiJTjT2DX!=*n{0}ZtUnZ8D3^^Ha zdCq!EJYla#BDJmj%Juqo@Tzz96gJoF1dr&tj5AGJ^vW1YH@TI>Fmc z$9$Uv2*B7qp>uFhn2bZE~T7#JvCv@dqe?63B=d{!geZ2C($#ABu%ALGfFJHe@$HYO58 zDq*9^d*s(6v|vQ|=C+db*yi%(J1vtN9U;aXSYerktyAmQZtT&SSV5EQjq5}so6C4C z{MIiEn^ueNO_4U2t`5T~W(N}&88cTk_cj4K4T@%7eclGBXMNy2Ay+fa^-#K%1}fIB zljY)JIKewhsg{Rwslm>F=O`R0bXZcn)zg(9x$xV^AF2q*5vK5h!}8|O`SpL6+>n#M zyq%Ey*+mbLl>5SRF^BfTWA(vGB=%v*lW7)GBNu95N$KzUqTNwSS^%&WguPfax4*~j z;-nst=<_HV_D`cmsJ=4B*iMn;Y;QxT z#lNAzHji0GyMg|2%^{3St6_vC3=BqaNW>X-lh+xFn+saNX2VreOWW~1zL|VQyHMe! z2~yncR+bkCikQ4Gsj~K-Xu$eI-;15H>b9|q_@%iEUWhUh79LhuP)j%iW>2}ONShG4 zk=spdPdq+kRp`P94wVK6sa7uX(!r^dpS#z#I?I?0#c6R0X7Ht=SV2`wv+HvdSl@c` zOPRIDztqq-h>>j^M*z!~f2)DyqX_Tjdq)VU9IeQaSiJ)Cg_YG_Rm)W^0Z+$O=v*@% zMD|6Zu_s;LiwTTIlfybP16Q_s0o{JCMObI0wRuTnYI}nPX~%jv@Yzl`Sjj~$ZN_y; zHs$4WdL>ykrHw@Cpd0~xrC{SCv?U&v+pO^%mrT=8*AaSH_)v;rWiK8W02C3pD~kE% z2*oe|Zx(=eD#g$DRulDd!S2_W*Udo)I3y`w<4qGD0HPsD*DWa8+YR$w^HQ$hK3-m2 ze983mc%&e}1~MhG?>En7Fd17-F^{8rjBX?zyo^LJ)1}*>2u%rQ?Op z7D35a({z90BXuE_o8ff20R4p%YTvoiX|O!t)Z!Nh-*m9_wX(fL?BYp*)5_A49+kQv zqGo?ARUV8Y9V(eRATLY{5&)eyJN?6y<+lk6<1T2z3Jvw;Vm1OH!Jwv-^bPdmq<#q5 zv!ci+2erHR=37)|SlSDQb|VY`nIVr4Gl4qZuVv#-3FVy;Dl|BQ_*!q^^l}F>K)py= zW0ubyg?xG|QdqYkYzltIV{A0)7oKADStKqm?RWsTFewQ0sH%%z{}9FW@UjmJ5y?bJ zSb)Lq>K``RnhRY)x3nTojzHWuxv&p#TM4DD{IJk^c|)_A*>aYey%6@728D|RuNU_a zmuYMl+>DE%%!a(tqA9B6?CYM7-!1T1DL5F z9C4%lYtz2?y?O7S#dqG(F-)n^xf3<+MgH5AShUVo>xC`MI|c85CBlGiNgr3qdwCGY zC3Kp&?Z8|ks=kJ^%1&y#LrWd1H~vR<2dAl~L<|V2dw58$YJGZSRJ9uC`F9p{TjyfQ ztFH8$0GQ4eFk8g6OiAJNLxO0d<+`9^`GxNzUlvJ#qjkb^i~RB0>xWJbpLPGW72=nx zwqTQ&NqSDVAHgzY%lYh4QPP+0{pJ>BLapB5?>58hyeH9wnJq>$HTwJ%hLAcOG&CZ{ zVxzKJnFMG}$7Tc)>~$|zt0M=e!-D;Dr!zdz*e#}h7Up{#7_v09S-*ZI4M{LpXV~ek zQ8}UmbeFc7FI28=OA`PSx-zq^lVuHrcl^oapiPYwg<-MaY% zDK|f=)thDE%*j2KLxETRfJBX-I=Tf!l;n|N#oPTLmb_qq0TRgeFFvA{D779DfHM-5 zw+B=7K=8=vbY@sW*}KDtOP1tc zwhV7v-VZ0rVk9ez|1E$oIZ#WHz(v~X<@9$=s@^UloSL@}WFZ{N)Mt?%0Lf#BsDXIG z@Xs7YIn!(Clcj2;3`r-M7_a`ESTlTHNz7=t5*HN}c&jWg4=#G&L_-v6J&GZh$6Bq; zm#k%7XMrL?kOkOQRKmQ~N>TYKC?e?7W$F0*fd&c;#zKz8~_1*+qiIcJKit-7x?KxJ#C4Vi@x7q2U3=+#XPIH%z&8 zT#y7&_7EW3!kFPuE;qtfj*JBbj6n+fI|3vc6V64ArIgX?#8R@$RBUYj0q7e5#bo^< zu^&UQ9+5s$fhWzs9?@f%lZeerhu6~}A1R+5%hPo2-R}n0WPBZs1=x;>jT)~rX1^p^ z4U+zbCxWfFkmFV!$_aVJAEe_<(9mleV9aL@)-;cPwcUFg8Ps0>)?4c zOInP^=NQ9-XbR%R?z}E+7Umi^4=3`T^C?8$&oVC9E{3VC@_+94e1Ats5X}hBeK@=& z8@M^q6rxk0uh1AgcIs>;k+ZkAqI%Q$)2YtaH1qRed?xGVX4Qn>B1pb)#V&IvLGKu4LlnDG#xK{^xcL*1rOzeBX3ha$9KGHHg~MK0%|*Y)%uyE1gpS> zfq_D2+i@ZpL`0Au=4JrI4#Zq%FFf4SLZi5K%$O{n;h_{foHDFC-o7EK2X!&rz zq(OU1Hyg1UXPd5p;L93J8oUVT&Q3LAT$&}bd2go1tMqUw8Ugu^)5&bK)=-M)Uhamr1|7SLnRE`A2l|^_SQsyHIF0I+OYI6g{kx2cwYJLnR$Um!UO^M zLl7X~Zge0>5Xo$f#ps9tP1wVHs*Z+nAUO%khv93H{;|v_65!Av+9`&-;kT4mrlWUU z0QJ|y{d6%89@g|UjDNekQ?fE&UKgMF`S_ja88Fdh3sbUU^pVhWJC4)PH2KYJoAAe9 zF1QCe@5b7Z7Xn%1 zzCx=Jp**6uWMr{xhYq#hv@CxXXY&rc=HE>v6m zn%29DuG&n8O#;14Pq6D#UjQCgM=y< zi*W#|xFm1Li*PJ32f*xAP)($D`gsunrFz{?AwjL1J&Q2%umIQ`K|OgSSTG`FjHFaG z694MY%UnbV#hajfq!Wj7UjFZq{l%|Al7MuQSVQSxgF)B`JNay5P8XCo{BKviML72P z;*;f2EgVn?>mYb5_e^fs(GSV}xFPfo1FK7MQnizG8ps*xyfLp3?g#GwMz zO3GAzUWroI`L>jXebtSAL^F@8GVE;hcK7=9zP5-6m=|*w22Aoz?ZjBUzPcEVzWu#7 z`Bpyh8L7os+(q4rAdjp1;r2P|0Gn>H`dM5>Tbv!lNdikT$w6}-)_Io4TR9_Z$5r`x z_BK^OOFCX!qn?kMn;*mmD)%(ocA^cfnG$NP{}ECtCHt2y>&0ud^M~ZgzroKiF$PWZ zX3C#9Ud}#>IeZ*l$J@zkmnN89o_Rg3Q04PDN$e_Lt9ds)zj_gbd{rO>Qqb5?lR*=;$=OLYA0=fJf5!N_@zZmGBD#-u4)!br6Pqzz~kBTvZ)kep~sMs!ji%Yf(^m;S_{f4 zsf8tyk_;eQ?hbshF={N~tYIuLdb5oBfC{E$z59-CsOJMp?Fdzy4hvmnN zaDI@UKbg8}f^G<>a#3NR2n>Q?ll77!A>RCCD6Dy*z?;O5X}| zlVRa%oQ}~v@~SGNbV`$b{kRgHRZsnP_I#U&sJWh_m*LTiVwT9_q6$C8SjX@iV~^)I zF$sE`&_9WkAdKlQwU=!#PV52C+Wi;`F`Sdwx?h%Dek*M+SNOH2gZn1Fa zfX$5Os){TmSqhqYm$jI>bONZgTW|i8OpzV;?7E3`lvx63rZ_VoF{fdHuq;PFQ|VVT z*ZiKm;ibqN?6TB6Xr7q~UNy&RMr?@yQI&tGJllk|}oDc#0KD3;kabvbdOz z2J)xV$S=VHp#b86)uv;2pes*in_DY`Cs-2*r>_LO*rZ{VC&yd`%Q&5X0G^{l4D}9R z=re#$@dRiat(YVdWeoYa+nmlP^zma(3^gNIP{~orIhGh4lnoRHLu8#)R9b#~^FbDl z#VE9>;hUodCc=lq2M<;u@Rt6n)>rnA8GtxhuC^Qah;me>9Ag;D=c6up-VM{Vq3(9v zOiNJ=Mjv~>UWvW4S&FkK+J~CLC^BH{t-s7={!aK6jLMXf3(2XsGD=~*CRHW3AQ9W| z;&?l)*l92DQMX*PcU{h4xO!A7DP>G!B71)DX8%whqS^kkaz0&bG5**tJt$D|z+{S{ zdM}mTDA5^Wzngiwoj`43M?pmi&|Kzfd1=a9rc_NLg8^IqE+wxln@yYE4vV@~+_F#< z(tbZ@2NTB3%3gdFM2gA(X_L@~)A0Ad9Q;<~Wa8kbj_?gXs#Q9nF$$pFzMuWI7H+Id zn{GC^<#{A!lx-iIve9djD5fTNh5y! z*MHw7(oeyA)5mP{elj0rW|!{o_ruV5MrniFhR24fkQx?77ezXXFx2lSmVXv*IcJPr zLP#l?_5%iBlrXfByOy+tVoMjhvksZ%l2bTmp19e>ij%3;OA9`)z@yHB=?9J(QLaJd z;q0NG)Wpy%rg8(f{aTMdbIRl2oB+*@imd~SXr;x@Q(bEavu4;$)+4tY1sw)Jv`Mww+J+?p1gIv?0{KcpwxOFb5C|$fIufYt=Ba+6sEc$12VhZLRt?N-{56 ztWK5^l@mxG(ghvg({?kp$=S3PpJwCzcb$ro;h|V744+m9+X~7m+)||6G31lh;oap~ zPc;VU_*Pd`v&G;JnFiHdF5zj1LG!VB~Dcvr)%US z+jshFC@02x9RA$3M!ht=x0+$Qi6ULvi~=5ia$Yi}^;y42@LMG-tT+`4b9%6HCQlvU z()agFW_#$+OWT!F(M`Kkd^(w`%rvg6sc8YKdGJ)pu>-pcN+dQeINK?@7MJ;Q-2SjB zV@V>SIPAau)e0==P~^$w=oI;*bU{O8a6rtb{kEb!`0rmY7oa}+mZDYiLhv;ywjTtO zZK6-Q1WL`_;MIG<8YgJZkubHN_05dodO;BsL)EWlPuziO8QKs|lKs`dzw zvpfXo+I~yxl8#{CQ#x?97Ei1B&P*nkN{*8!p3H6*vtK4PWAgtFusd9fA_8m#DXi|# z;A{yeTK{dWMq{B90XR7jkloR@yjM3$=b9^SzI-KwL7m-E4THw7uP?xhlSHbP%omW3 zksyN96B9zM085Dh#z-aX2E>F?f`x>^0UD$RpW-Y>a4q2MB_P><$xZ!>6+nNpHP{3U zAnk>Ezx(?K>#m99ht&jl4NZOA9tzrQ+G;g|DYGr)`HJ)c$g^eCSRCQtd$N0@gigF>QWf#w1q8tPO5uIe?K9yS>O-+LtWkdS$bZ;RP-PBNRsLKB3Un-tItT?g zdgzT$KL=CHE$TO3iN6u)lz+$(?9+(^6+6Z0@(Tm@ORDm^xrZrx=C_C z*|PFi=-ExLT_j_5zugMs8%y?HayA+qngV@~j!yrhAyhj3;JuolhlhA3p5q*LKLam zMeR!fB0I)A-r_St-fQen&W@xvLT8EDUT3n<5(7qT+-#46h`(RBxXr|~*95&)7r*#& z<0LgIWpcG2^kGWKslGH9+b2i|!UfCfSbFso{it3SWgQqD*yLSEAZ~v#Jx{Dlf`zyz zc$`S*0d&PWyIukiTIKJ~JbtyUFL$~vRsMO4yOBLes8*y^FZ{NAsjz|SQ^&jiUnGF8 z1Jo4Bz@==;7P!AeEa0N-;=U|&Tmq9r@~=8vlyHc&zP0?AN#EB~PcQL)J7pI$AQB4O zQxjq#ZEtU5t!_nG#Z(AIhz%KiY5BV$0t0|g2?6L~Nq@)%y5SpPkm3W9LP`Pz(1!o{ z0mxlf11UCRw>CR}Ac~|UnEI=O^gHor*!jueLlNz>1>i0TXq9{5bpa^80P$SK55UlF z_!w-G;ASG!@krPzYA|wJ-PJr5|IuW{7^aKXiX1wH1eSD+2ryM}D4OpV-p@2$N){Bz0t-O2@Bdp+H+VxFt8(8+eh}jp9EJ97C*P-qM!#Zd>sc?h zvf(E2f1j6)4d(+{)5fE#v9qY9pr-^i#HeZpn*jOO%;(!%_B#IeZ7DxFKjZ}WA2O^& zA|mAe7PHFtjAXlUMBgH)X{k#F>%u9u-($>zu#jTZu#@2B;IW#`B$eQYQBLq<5lG$9 zJ%OYE5eQf_ZcoQYi^J!R2N4^I;+rKcs>`A-7zBmwXCZdtv1=Y@&`3!kWVW0gIFTGTpLO@`OVrg%tsD$N^|a$DPX9*V@g}!cWX1XQnYQS)?}_ zEo8mzCY5+(RDVz#%Ed?}e*!>8{#gM0e&P)O?`q(k*-Fu7ad1I{A3RgUF_*!jfGoN6yU^7fxx~1>kY2f-vEb|k* zUm=xiqY2p`IajxD&>bEpo~;ZhLUH6XEF$uT5lDg++&OmR(4A0P+FChU ztH}-*aH|*U5EbI|^A9ni^Z{!BRI|j&sy|OMyPqHI=7)gtQ=Zu4FHM6Cj^m-j9749O zxzHZCkDaEaC5KXO+Um~jQTm5ZU$9S@Bjn_rfg$AKOkffSfW|kQ*5B+feX{uBcXi?8 zFU>d#cql1VA6DQG3x$NUo0hS-$xsHL&QE3{Ygq|u%T&rlLkU~CDLtHE0ozud%PX4LX7S%$ExGzukkrn`()nl zc2rqQp=oSM#qD`2*S6@u*v8FMX{<;;-KWOgr%ww`L)3coCvP%KWO8VsLBGCddAa33G#M#Jq{wmT)SR3ZePcmQGT`+@{_U- z)>ms>nKz7?et{V02L#{bxLW}AL-Xj-e9L*b#GUA{2!fk-6I_p)9Ed@Hj)vxHfNlKm zuszpWx=^*SijtVOoqSE9o2GUD%1^tI!b%uem~3$%s0*Db23h#i1G;>Y5wTb>wncY| z-)xL9(q|k5JYPWs0d^5mPDz#^;hX>vs$L`vy4EbFXbgo!#4WtMm_I~LpBh9BeBiuB z7}J$UPZlmZykyYEAFvvZ?SBJ{+%y|t>98PPlErG7YuhoB%1N+sVp8Xj0%Dlv5a>8D z{9s{WTOisyCW2?$FNz-;-X?6?P_dQD{YrzR2gCKhycip{1sx6Z6_FwV1!hnVPgib( zqi`&giLm!yWEB@)%#Ks#t>U@!g}H@$k>*gGSBam@*LvjWq~dH&lhn(uOn!|nlk*cv zM8W%348HF0Kb&0;zbYTyxX8bd7%UQW9V_#|XyMO^e9ceb<8AfmD?c8YCBdX5?MNFK z+sJTQ*0Vz$J_wOZK2xEnB3Cg8{x*?@NqY0Ku0(A8!Fo&aJyT3#>Q4LpxHE+Q;}e~CE^phc53Ek9 z7hh@J9CpsBJQ~6kNLQnI%7B2ZPZx^bqsn6hl|v(lzGzM7K2Ey076u9Gt<`_yCX>>8 z@2=#bp2;Uqdm}I?@*NCbpJ3LgXC1+Yf@QNz&{Wbh?8WgSD!OtJ7VsQKiu?0_#o9|} z4Dve%6qJx6QUF4m@Gi9ab_~Ueen3(xEDMZYyG#Kf4K3|ISDxB$8Zx4Sz=C@1dT~H? zfQ)1bs|k^DwPjj@f;`rV?jm`>&*~X&wZmEftO#ta(PI+igfY+ffCR`EQl<{ek^32!0S{s4G!dr|!9o~F`38&wrklBv z?i0wq(LgxdM7(-BRxQLTbl&4VSlrF6ck5$HA+#^`uZyxPUvUoR1GtEPW+DcOR-?U) z-&o^sR7tGH!L^{@dpAA0-mrh=Yd(*g8NZ%b(oi+p6nOtZfyeq|FFn8@oo@{%qUJ(!4)mq04)xO-FV$hxa4B z60F0U*Y{VU#W^d>aU9k6ddn_;Ai#Tukkd==yuzSAb=bwG@O&$Qc~7+o;NCHxrBiNDk|K3YjoAII2Jlz{B8w8tePW1ixE-+K$FZlj*u z8g2O!yFS@EtdZFHB~hyd|M_f?6h}Q;YnJNtn;Z)nI`+W}DYj5e;s=U{fJb|5yN~P-^@zGr z0{v_Ap>_WZ7KQ%rLUIgA8c?oy6r?yn9L3K8oO{gTGRQ-?~RVSiST1W6d8e#}Ph3y&T$3_uzxl>{OMI!Vt!HRFSN6Ks{`p~UB2DvXqTi=lP#v zFUppmw?_TH_o;t>M(bqilP*ayg*i06@lrP|fU44&O|K!dF}Q>t3bbi|Z}@!#Aq# z9p(a{lYpo#7yS3yyt|2AG8|MNt6P39#j3Ix?@#lJP#9^1IpSQeqty+f9HuE`%-N~> zy)Q3~Htju*HYPBfd!MZ644|HBRkYa43S8~FeTD8PK=*=2l%rbPPR=#c80SC^&qmv8 z{MRP)5NPTDuc_(W!{elL;LCw@1;NHlfpUa;P4F;6-+6C0^l8x!gr;wi(utsaco@H= z&pFgEu<2l`08GOvEQ5%~3?MYEFYFEqEiKU)!_k(QOM(pV9bh!d9A>~MDi zgYZ~P<%o7Kxacz*u?20*DhpIst?$q6y6A?xJ;?1hxDe%ZyR`G2z$RqF3g z>1n_az{1Y`>xY>4LPmNjPGLYeO{~F`ok=9F&Lpx=9w%R>RjNO@V8XzjgGG+$E5<- z#DR;+0l6!^>#45gYvbuCAlu{sjRHZpM*Bi&AtRBH_GEr9!7f?UL)P1zGuVZRhQjCn zqv6Y&9?yl#2f6w*bFL=Yu zIdiy}wbs7x&)O?cK)(>$I}yc^WaWPM5Ia(;GH7W&8cK#siXO;d$;dqioy5agW~)e9 zy+5x_v>vIvE5K%*&$97BQz(|_Y&RGkRGH6bexWAay)Tx~6kN(ZKC8mLDHDnLoT^*iBR5gY*AOY9st$m5)|9>>RyiamN@!cly1N1fN3C!GHStH>D^P>$Y>8a+R|d~2ZR?o-l<>{V?+md!DzIw*YjRX8$93vbhMcz@n zYFFR(P?GRj2OKty{Lxf#omMNC!+l{7zu>z!#UuBT5JW02_#@FUtcTPbL1A z!pE97nQZr_sA6uoY`Ca#OHnaGT55EJ8IVSOti=|D=S!l{PvKAaY5OB0Iytw=ZJZ6o zKnKZNa1@){v5Pf%ae7YA5NiNU3N|IvR(NuF>uyh9^!I@x&S1Jc4hjnVCvtiTzaP?h zqEoWtQ7p*iVHwGYK(6$-BZv>FFksAmOBAPiZTzA5m zBW|{sKPIc4mKM955d3?PUFe1DI9ZBYZu(G2riMUq{4=gdt@1=Q3hY)$jmEKhF#AFx z?`YbNYkDUoz!O>F-Qk-UR-%ou{E=~i%I$*;syE?t{)1{l!ORNJo<11`F* zMl94BkM<`e4i!*ejcxab2bN1BP~^_WhlQ+Lf8d zP?7hpI|Tq7-V(pq&P)&m-~Qh2=PXw{;Na;oXT@rKdXDV+V11%$O$Zg*Asqyx@>=aJ{^dsNi8rd4lgM|3=l1xG~16$ zDS+{%4Odc9s%(FGSnCYP&B^(@ZtJ!R4krv5`DaF`UA8YgzTc*I$?&Z^eR$q!VPyK5v(5MC2NI{`@Kp zS^WL|{0Y0ETe69Oqlx@w_gD{?c~McF*w(`npZ=zaB;et9i{YJbcV6Ik9$R=Z5u+-o zY3~8rqRFUMqM9W->D+5}OU1hnKX}#P;iy$!(n>HmY59!B|l-0)Gkh=3SB(L9#M zMp=7lN=e7vORNFp#)yP+V+Kv}-fU%vGTEGxQT)6o-uT(G`Pyv1R(k2OGo`apHf-gU z^6aEv%8|BJA|F*Ui-h#M=!>O`3Ph~JGLzkEGS4VD7u2;h2Ofnur@0S|N>9RD~S({A29V+*M6l|G-v zI3yrbd050pHhX<}NFudL$>T9&HQnAh%2~ta(Kb0=@L_a0c$%KS_ZG`i*+g`S+}AT0 z{WZi>x!|2u#rp$)v(?@9b|wmo0W}!};x3pxB4D%g@K5JpbojX~)PGy7q#T$2CVP66 zqa0!~@{XB_VmX|yxH>0)xtyFY{}%7b>@*&n-dUCSj>v0v&QCW<^X1)8!--y%^<$?| z6KUZ>n?`Pe*6k7B4<+i)2f_CzS3My$_LmW`W&`J+nj7{PD>UY-6}$v%#X|md>MULz z+S+%xMDSQHc=3*rNkvDcaMpR$+JBurTxCKvTllz%^lEbxo*oht=Kt@v^dM7G!b4I6 ziQvt#jfL>N8v^jWR|svlT5{U{kS@S&gA7ETn+l~a^K%)f-(HRFR4^h3zMl3DAye=L zeXx10vK1*zgcE3}qXzZjmAjXQ(gzpo)w(PBUXIL>bN+2RxMojwkDj*SG7~x!3_YG4 zs=QWV2kC6#J&a&1Lr)&PnL-241TP9^MTp&Bg)c7e65D1SL(i?64d8(*o8+utUwq~= zQGgz5qVuktzXH8#jla0uP6oO7^5YB_zmEkUeg-;xgHc46@9Y&2fp>(xxeUMG{1kF6 zxwO0Zg9FtxrcA2YEKoANX3L;hJ>vgEPWcufJsqc>K5WaD!?t)lemnE0!O16d9TYA@ z0w1U?1YvlLVrluQgLKp!IxF8De<-nW@37)Y2N(k$Mv2qz)|Mk)D`mJ?a%@&7{s zrg&6O5GXs4Z7V{s40{Tq45_1ucHX54aDQge`~8zL=G#atc5r?kw|bT3a)U67le^)Y zC+~TU*Hgs;Gs(}N^*oO2YfZ<{gT%m}GfvMZEvIdV6)n1M+X*7%YmWToJ29%y_0VUA z>2H{5T@Z=!bzA)4PCm7F`FVMPU+JaeKCEcmNLwHtij2BxYcEBxv^6>(FT6gszY?(< z_vgk49h|$Kw7iFY4_K29a<@P>>Lps z(I24$sGN>TTQwE*d@$%)aGYl_(3CF)(n{noAlOdX5!)XMz_pkd^vVGNbx zh=wnXb5y|&e{;}UsD)n#vW@OTXafcyWjCW9YZKUS_TGIsz%O509P0 zM#_p10^6I*L8kVnU0n+`QR%bd*U`p2ugmSl8w#wfI@##7yRAnH7ZN39=~+JZq@XIN!G-2ZN67~1FUntgPp zuv3x3Z_^{q$6h9Uy-MIiu-qUEDs6=?0$usx1|#D1b_Eo>DHX4qi422?f|or@uO7Er z^>vVA?&!xa1RRm64kQLnb(RoG*egiNCT_bP{zhr5E5U!VwhET#LY zT#6$^;$0J-nK!QTpReN8*~rd{N*3XGf&Qw zy0HDLL$do)Dw731_58)obJ#Xs$F|ZP7vD$^(E)_~8MC*ltry=hcu0QfX$l<=sUW8B z6;Kv>w?_9I`0TtII}9TG>I-g{*2g%SKOGl%byUHZ0_w2|vy#_MeLVe{gsW+7S1Z|< z*39gkvo%NAhw_t`{9GmtISF{|$YR_om-mj_J}EPW-5c|UhgsJi3hxq{h`d)f-McUZ zII;L{4kwd}H3myP6!gV(Ew#x2=EQ~Iiq%7EHpaH|_oVgE=`++5BZo$RSDJ2Ap4Hu%{SEmp& zHdvABZ!c^g4{uuexa7uJKX5LS9Jn%z)9f_nIVCwHrnhtRR%}E(CLR4{TSVvQf(1Sr z32Nsdk-DyRW+EcPeZ^G5S3K+qMm<@n;AAP4|6$Pcw+A5~9qlf($~X5h;`dAA*)LP- zI%(9mH>WNq<|`*~G*FSj09-^{;?G^xXovxfL%0zVlXZD{c~*sIvYZOlh#wGlla-rf z*8OwWOJwsiApwnq-?76NCXUK#x%qna_2qVJAbxXm6XZ@Ll&AuYp-`*&n(d_~hmjO6 zFlHkw$K??a5X4ixWggZ3LcIkZ zvvyI5`RmIwnb(z)bexQgjB16pS+zPL=l8Qo1>xgi9B(4Da|CbPdtqv=imKfK#hjnMbo2BiU z>AgGRRU~}`IYUYbT#%@4&R`0}R7J_!dLZdEI_l;#>=#!(T+Uy+RC08;6V)k0P^=Ls z`u=d(B&^Lps$YG;oAqDz@bZN5%-+YIdlW$j7aIDW3$tQeoXZ=?W{9y|Lu${$^2Wu7oc#RJ%9U2fF~>h z^?3QuhIPD_visW6v|D^|jC&CdAbW2+_I5o;wA2yJ@|Jh76E2d}Xr|y$uk7=nh^HSq&wKWs{ZbT^*<-PB( z0~Q227nv5Jl&KQYqhFZX(F(f*i+!SjbBLscL07v8;2%H8HjBtlfpxSl3D`DJ0&LYH{qmqD`V z@i!07^vjzZG#i$ctI1R&EuK)W{Ys)RNMLlK>2C{s;XEVFmf&-o!!uv{8YYz%coi9B zitrL|uKRNe5<)iQkL;U6`LD~FI?CpYoU=uE92en4QQi(aIZ-||{1<&!!+kzNYb$a$ z4a8n~9@B4L+|M6FG|{yiOeNf@0d*CpiHuF)Of1(u#(H6Q=l$VhvVxHNr%!t6XDUer z&MF$-6mJ14G2I0Zu|9k3Z=1nZjm$-nx7^PIPOjrIhBK3!FZu_8NSqt?6&0q}PCsg71GnuTI8|3FJk|mwajSg*?HB$L5(S9tTD6i7zinxL;;BV5)u=s zKYaKlORbTpkRf;&z7T*DfBs-Sffh6vnK)%2^IfGYU*OM)8WZ-%QvRH%*@sG zu8^@bzE5AcE$3LxCh|bp%YpbawhXMVz+P&}+t$`r)6ujh>*eMTUlLJ?dH-$OE6M!a zy$3hna;ePbWSW}2$1xWJxc^VF2Gii%o=cJbu?)ARH0 zXu1U_p~uypAKU2In9p(jY;P1%QfjJz?OI2Z!!DR6zNt))3BP&{C$ssJ2L=c86QcuJ z4g#)cYv9LyxH&#_oTczI$c@*&e`^KF(hC3t#X!(*Y-cDkWw-d@U5w-iHpOsv9S?DzE8iejxo;!S?uQf={8>X))T3E}_zYY;k zvUlpsQGi43-m{nC|5V*k3v5eO@pXQO$AJr2kHTnpa_1-Gc;7V?NFmjYW$TR0SZzdr zKm1Ah7x4xY{~J+gRuwVtYe6)Yz+3kNj8Xq!pi7us^N}YX){+eGC{2LZ{2+y~^iouA zuVHgr-p=OgrM56L=7z`W*T3_wW7A9xJ4o)MtX#dZ_$5VD4(VE1;7JjCLS_P zwvy-5^^5TjLaz7Yky<2H8jqs`n+JDG(NxE)tQXz|*E|wiKVG$mn9T9D4u5%DEfh{; zGsKq^r8sYi^@a#rt~#!9zQ_5aE&+@2kdhnzT4D1KTQK(q zV9A3$?qiv+Q0O??@gc*cJfTqhM8EUrg)zNWhu*((-$+lxint+cN)+A^g@P*e4di+( zz};+FPZvKI+d(&h!cE5cmJ_M%Mp{0+?O5UfVmbhLEpVoB{^|2adt`{A6xkYQ9{Em# z8*nnY8|(un1_r%~aQgojCmLmNE|}2k zq@y_w??h~jpbq}{H&Y3WNo9NRLTvn~Hm`Z+uRLp*I#DtIVPE)wR+(K&~ zkllGa8R?D*9DGJ-Fo(1_4*7je?OL=f68wDClh^m%%ZFvM{jn~iO@E}q2ycPAY^b&@ zWd1 ziqhZH$O2)TtTjDiR_`qXi*e-2*Mx{G&8d15fl37d&I#oEbHgBbo58mDxAHpnZ|M)+ zQdMs(RgAjsNmAe6Io;o*(&Kn~-^PB}t$5MwY1tXeWQlwKp~KX3kjF{N z^glcwf1PD*(|o>NEV$Ne#a?|B$;CF>$!wIeJ||gV?Kjn^R8_F~sJs^kv!-Z(P-?Nx zd&o5L**O$>DgB#k>3VNr_wfMz(ywpGfQB>O(20B{%(d5W3!-!LEeEK?V+fwDxxmBv zLnM7!(uS-j+dCJaeHv=}*a44h{F&MeVd1DvSh!1v+17nAq)RQ&yE9&rxOj=MGs@Di zFP(+td7Y#3h4FFLO-)UG(Ii^+<~RVdf0gU`2DMy@;CVMT;}95ExLH|k2Y=GRA)ym< zm<&gg2%wYj|BQ>Pfes}iyv3a=*K(OD({OGf0c5EsLoq3NqiYwBr}~2q-Y<6(%fZiA z=(K{{H@dMW?2w=1?LNg6JI>WZUj{QZqh!fKeU+0U?T* zkDZ^N5crj2TQwTU8@w%RoLpf*^GKN`ERU9t_h3&VbqBp%UqQ66B ze2d>1JhHg*c;ErK;wHG{|JbK5J>if>X2gV!772^aD{70m25&onB`Ibte^Z$09jEy? zU4$&GLWykWbTGByipiPBgKw-caS4P!3^&H6!1BW!p-*=hA{yydW^$TMti5{7YW}ns z*9r|pA%_hGAmWhsfk3DDbRD0Ai6FbRiAG`+oLpB2Kuu4pzUhiysu@ub)ruO~Z=ix0 z)Dc$8r}ffeu{=WpJH}8$h^-?~VEq*R@zGVc&^+)%C<5TmyT*&trdM~+$EEd-s*1QM z&Kz>>Oi(M?z7c0QepKkEdcP~3bn*}1gzqT%5oX(m&Q1?*BNUOM`82F zEISFd_Oi9DG~yXODOV<1gK_$uEb@;@wHl0!7MWL^gak^U^zwmD`AfSRT2#XG^#;aA zEwt!zDfxqms?-SCn}#&fbHzV4BM0^o{Vd1h_Lp79dlfPr$GJMIeV+Say3#K`zZeO| z@_NV!XFg2XcHM^h>U+F8+tihvz)e!}Z>2Kd#uIK|Mn~y2m$%+^%xNM8JF+R?-_5e` z34NbEQ@t+aQFzF5dh^A+K=M=M14#gX+q7V{rdv$^n%5`($AecP8&@z0bp!~*{JYLy ze`880W6l~_4WUpj)bQG7OgEvCB5&3V@iX+!8ZaJKP+0;bf z2IuZ*I(Qi&Zxx~F;LuRSzK>%({(?{MS8iAM!0YnLUuI15gJHr>yx2QcPkc$r!?Gyq_OH+osNX zSySnqiV93tULMu^_x^~Ar6q(A2&j1KFstQCMuEFnaDm2&T={oSLKPJ*!YIky*iGJa z`OhDV-7*?9%X4TG6BEDsnDcXXv2Jf~0}xzXRdw|}Ti&8)P8_p`hldGsPVY2oQ1MTM zn$b(k+$)LZ4{fujj^G<54XG$A|NQmKjFYg~1GSudh#ktk>`6;OMNXb&?kr_qdOR8_ zz3jSFivXBUM6of`(T(prD&NsbWA)k9`2;FD|F^j7`?AqDfBt-;afk(c368pb;s4=R z@lh#_Q3P)zwDxQaRr4q5Y{KWFmURxzR(wxG=ln~Ks9f*gKcn2VWWvy}$OiF;^c|*Q z{hlXD2Kvsyj+r&W{CxO_aAdy{7zSGb=VPc7q8w$cl4vYuHp~h&u7N)?Os^H;C*YE} zuqb5!9Qa#Ds-yXc&pWT%%aKLTPLYMFPar@2)1=tW9bH*sJ+k);0RqdPtB0Bd3-y)K zs<|E%-6n(?Cn6W5;6Qsl-$=4_U!4>{M0ehdV(q4sjKk2=(0V%t2IZrYFqdtws(_i zOt*)mv+sgGsjL|dIm^RlNeo4BKsh1Y!x=cj`iL^u-^4r-Du0C@#&Hj0h+qDBD2cKV+0$|DK9#%Mnbax0b zusT#&fMQ6L?q?P1(Wp;)FGzsUy;)y#D@_hkYoPD^`S@Gq@O}!_{5O4Ckx0J%aRF$Q$SN5L`TP5N|A=aydd z5PsY<(-&|;piw-Z)g*G(HXhhF2ks#8oLU{ahc*9e~D;QD1XAifCKEvjuhEy zszcra^xoJxsH)k(HLzoOx-!hcttnrg1~_9(#UkyTa@?%{GBJ>5*_;l+GRLxU?H#Js zsC|a%@rJ}P zG)b*~GLJVY$>Y5ML!ahgt+u_oc-I;7a+FP>MNiKJ6a19Qy463VSJikZ zJB%;5@qw(WCDn~ao&I!DheYdwB%MpjtKVw+;$d@=*%N6cT=&eqkupl9*ib5y+?&A3}AEzjQC^l@IaT z$^U~77&kV07fnkIbDo03fHkC_5!w^?mxwWL2%${=6DkZMTmU9qXbQHPY_z+Znn!a^ z6G`!7EtPb2bu~7%tm|QU!>|cQD^41(o#DY$F+PaDbg>#382FQK?d~?1PlxZ`&)1r^ zpZ$Hi-0bv4S{lZe%=^v&BwBDIg0MV)1~4!)-&|bKv#?;idGi`=@)x5eOCOmn!l9wf69u?z{C8g`AO*r z(|8@uyL*OrtY9D!0Dpa1>gA?)>9E0!FG>hz2n_O9NWhrhpRX;>XT*~p+xn++mj-pCG4AHTe%o|c^|SzqS!}I1Rt+gtutZfACH{h^g&2c zcV0G1bGEXuv6n5$Q3a4%Z>_@?sN* zB=WiUE_Dapa*Bqo?68lBtC2xs5e^CV0~6y~$Cu?(?Rx4z^8(u=D|8snGA?26ow}2c zoB$x#HTGh=cc`;JqxA#FT@D?EwUTI(P3X?LJ(xxQ)N5CUXCp}dT6fRUW`MSxWPD>0 z)8*w@ld|B}oV8WW31RS~fk>C;R@dY0+`gu8peOHDgX|hh2%Fc!oknDriDxn0g&ZTQ zgV92(Fg!ev%^Zc@?s{Gwx;o!|W5UV<)y+eGdxqfaRmkEZRsI$TFVPvMmyL{W^s@Op z!7+OzbhAOJlXqSnJXN!g?SU5D{u8lGzgoNp*_ z+OIF{vK*gO<1^>hsJA#32FDL{fMqYn$>ZPk%h@FyZy{HcxvTq6{9NU|60jo1KgfWK z+w+6zX0aS0n~{q5rMYTQgkXb`%UB%jKf0mF@71gM@={BeW!uNERI2Mvn~Pt8pbJ zYbk>2gSl}ZCM|ft3C@31-tBm2r)6Rc<8fzbcw(X?D2B&O$Yw5A(4*_kMjJzthvx&u z^2_yo647VL@#DW27u2;YIh@SE@QE-h-;=Z6vr!Vu%l;i3P_zw_R?Um4=-e4Xd}pO@ zjvfP{G$EYLqUx=WhVHWp^jK__kGGAaC_Gy&4Ncm)NuCW?XMQC?LWo_a0b~R}-9Hw) zuey-zj}x;BU#A-hDp#tJ0OV>{9j{}# z3}_4#%2ny2%u`+ZC6~qbvg2pe-*`EaDw-p zU;I71^k-vM(I?Z< zox~*eW)Xefe^tGQ%sQ>jrmF)eF`RF@DY#M7GHt3q{f5`FSKh+LZQJhO)UmYf!S7J8w2!t-|7B`hp#@wb%+NIQa%gT~D4tlA`}(z|`zpQ+gJe)h2` z1^4&&UeDLFl%Jy`BO-D=KlIL7a%14L`toaki??X_9y^*QLwD>ffWvJVS+2?ez#}2` z_4E*+27`KuQj(R53f;AIPcoRHE-h5Rg#&LeUjzMCAbWpMTA9La#qagtSUO!|M%ZTu z=BX)7BqPS5nt98D%Supj0rv|dc+wsWEG&5NqLs6^NAtB`l9<6K1nsx|E)3zNDuYiz zR?46cJ4qO%6t;+Ypf4*)`zEB>@nGtiA{wsSw+rF7nA6hXW%xyC30+`Ovaf_Ovc51J z=%MpmQ`hQ(B&p`kSI!<|ZlEK$+{16WI2jO=BoUJ`Tdwo#Fr(ifPT@WbXrlKUe|0Ce z=-~KA&jj7RxE8;1GRIPKlXZpljeT!+Q6G8aM06WX^2iI9Wpw60|BXiO9=YQKD?AK2 zl_4Tb*4~+ZhkO)f1aZYsQ+`_~+bnxUnB*(43QVgG9y)zB)3q%>`&&9MNo5{7jqi_Q zGWRudIylOokQ2?x3_Z4oUfq~gXOLFFo%$2&0ReCKbYsIkt8b3y_3OT-&n(CCNovGXyf5?@9~@EA2#L9e zi^#)IU60J;KTe(@<(mRW_wA#my8B8G&OUuKl+5y{F1P=A%S&$p77B~Cg!dO85@UUR zXd%CZ4%X#P<(@X1`WMORs8M|*-ChL%MP)97?mDqjuO5qB>r7ED_J5`aYQpuTuR_bM z$={Gg{xV2NnxxAptPTf}8F^9c49hXu9LRh^n&C(Xt9Db=B(bR$w(@&U&}sjssZ?*lUHR#%T9f@tr`5NQrQLArB^- zEPX5{upI(ld@^`F#y_InHgr3fU7y57I2!3|cYi+q8V~L+PT7u)BC0y>Nr+~z*~?ER zcQOdw*E&~8Ie4rU>kh`cLf9)*w%^+XJgc!3;whg6JXYwI&=)!8I;GEf58&u+uO2P# zmi){%!&lI254Mj*UYozU&19|XUU$9e!zo|!@OUo9zFkHEJlj0*w%@~u^ejZb-94S# z$|YGm`Wp4+8yESQ`SHuZ5<1yvfp%W>CaPFa@7AH7kF?w2HO^=BWjSFE9-A!fyo}8@ z{D-|jZjuvw*oMwzXoL^H^Om)FiMof?^8y$URX5+mWPx|JTvsznVq#`wsjMzX@{xp~ zRl#L;I<@5Vs!2TQYUO|wqib}eC;j%T)FIndnoyj`8 z&EEWi0-=Y!Tuz_~q`|>91_oMMR(w=Y+|;f%?718hw&70INIage-QA24_6C*EvoqU_ zUA1z}?cH5GLPAZh-d`mwQIeEi?ptmdJD!Y8Owj+#wHh$aSWXtm2{`U4s;f(F2Ocjr z_`E*fx|}Sb0H6wM(|VoAI3y$_#5kneXqyf43@j`PO7BXi%{d9_boyB%V`4CR8$quO z92}$l+j2gUNed#D#H`~E##A#@-h6Slj9oYIX*3eetoy)NHc&&1h4uGRuva~fN-T8Q zx?$c@GIxC2o(+{UJcn@z3Wd72w&}Hd?~Y~qfLnxj5_}#v;70_(TSy>S)BEkZ#Sd_L z-r4&2_+(!c1VT0}CPrT!d~rqq{WZrd%-rv+XKB$0q%d&$YuT#!A)Fyf#sp=dj8*W! zP*xLu8N4fjlhh`R2&FD=dXMElPTG14j~h}`H{D6jGP)AWN-cr+yo%&;@eo>u+QgrU zxHx!P$8VskhvGp9u0$&H6~)Ye2~=0v$BI4G?FLQx8(N3 zGxV@1m|d{c<{AWutVF0ajM6p6Y^iAO)pd=b-^1(cy7Ojnwsvh{zY?ezaiN6?aZ?LD|wq1NP_xA z78rs>!9CwH-965CBvNm|@|8UA@8~v$W zG9eDnorc3-zjHMLmHmb8+YOv75Y#xu_jUwhfHey?Z&rgn4l-sAM^XmcwCQ#C^H@?Z zO_~-h9ZhbBv$}iP#c|@HV!{mGtWQb3D6@~^vh_j)!9nyWXzyXYGRYRKW+p~pS~Gg! zF%Flmj_P4=Bzus^m9Hg?A!4C+eF+x$F(=axm(zT6$l&;I1tl#fx`kwBuW1iuNg(P?XbGEalz@1+Sd3Da>VH=9ztJze!gm{)h_fzPVU=a_hPxZH3WaF2=U8)Kp-&l9D?3<%(fI zySeD>N=tzWD9bH+uRnzdG)R{;-wx)tT?&7EJ+M&X>WN&%j4NrozU(depf#3WN`F;~ z*;m6sQ+aPT>f7nU(Va%3#ht;20Kg7;3EE1XBGL&CSqeEkG-bAp(K898r+&OTWJ!_e z3-nmEUcYpdzo8Lmm^}VW$m3BXG2*Oy8~!!PVQ*GXH$#YH&OU@n?2%b7quBs$8J#nw zx8e97E&IfQz1(8ej;wV>o>}J6O01f-*7;UD*wtegkoPY<)TxlMlGiyI>x&s?EK29> zHzuC&KhQLxA=VhKHj1v^nJb51A3gvOJjfK|0Z72w1XT2{JqYA@v<809e6!vZe0~t$ zXBGDr@Rv(uzOE2{oPJwD!Q=S46)c(Uj_+}~*FG_e_CA?HU6enexF5#wtIQ;( zN<3!kSi;UDe(N)8j^88>P7;=3B_5`>d;ZnNyeC}r`2yjvIT2r!B zy|lfVlD4Cm?ysL3U)S(9BCOnm5LZHeCKHdse@R<5Yamn%4HEdO`ySI~%`PF!lCVrC zJ6b$2arF$JF8n=3URG9zVKs<)J)W(1fld`EpFP#sr449%1)^XFV~~xq4ub?4*t}z` zK`Q8K3u0MN4MadsZ?&FM~E)VPra2U^#0P98+G_-OZX8%+)`00pKAqo=*tA+5A_kHU5nu(X6-br4h#w!Nf&4UD{M~e{Ldr<6wwI4@ENgEIP;l9aw1UIdpGgLOUY(<|Rb4y?ay#hTiLr3=>>e7Jgv+yP-0xdZN#& zs=aM9>MCb9kNUo{e<6`cz~26nvCUf{%G-Uu;Sx%qtnp~wDZum)ZHh0Kz)BHW0@L@M zc%#hOLqxy3;v)?f_Iv0&g>On4K0<#%Tc!|jlZnwBwsjq8dR5{uwH#MWD`)S&!;Gahsv8aO%7Div)uhm6qR>w>!PqD zrYQ}?X$;RC0Ifq`WnYh(`{(*I^8Nmj2eOV1UP z+j-{i-J(LM(f35U(k#T4LswtM^h(YZTYmD^nN2S3+AXFlDLTY`8a%^xc-%ynH$Fl-()&_@|2W_T1Ub$U3q?wXoO^o=u~+Eq|ZP8co=?QDwT<|BFWV zR98->y6k1Ao`e_TRIFL!riw=K10+WMklIOTYHCZyWBY1OiFMurLEGC}lVd7O8un+i zg!}ykO5@TL*);YwFl%5LnqfZ9KQ-hr*sMZG0qD={n|u^p%Sfa>KtOOQSF4u4Q8Wy2 zwsU$MeiZLgAFumFFnbacJ-)^r z4%hBR%@Sh0$7_A+z%ql5oY%5^KvVbYV?>+VOopJfL(?$-vc<>8Mxvdc2^E6gfrBHY zW>Z_Q{7VU>iKg>5q(mC^d8bb?bR_da%rCVRT_-nJ)={9Kf&pVkC$llU|1(+^8LgQ6 zYUD2KHOrQ4byXv;2)IuLsxe zqLaWeg$`kZ!sw8ca6-~-wlpkKoV0P*bu-}OQ)Upg{X=!oK+)2<7b|psYv~qFxfU7h z#kzldj3MHYpR5C`U5&)}`1lc%+L98G-hJ0oZPW)=tew^kvn47+eSJtk(ab4GyguHZ zo=%tIb+Nj)3bC=-{S8C`iCM6X5(Wn5qsL92G|^(6+2kKL2Vr4h!gM+0b+Fn%mPGTa8DUfUcaa~0U0}+vqDub>RE{h#oUPD7e5CMbnh0Ysv1v{NAX~g9vCnv|n z#Ra1gJ>1_b!R-9Y{shx8JDX$@Ld>c!Vs2hCxgQx7l^;f;Rrfs>)GhGwr6eN2;FtS3 z1{AH^+S3JcYE)T|I^~7T^3215( zcCmtQ0nrT#V^(~Be}9lZqZzV4UZ`hs(TuwL>_>r2m$mCln{3+ic9-Skqjw@E0uH3} z%@}#omyOs#gM>c95Ex&G#)rr%t4HQ>eKR1GU*i8*fThN;p?ud=c#WRlHS7}8@DUX4 zS^xSLOY{B|Icb*R6^2P6sT;Z7tb4b*3oa4)R)tdu5Ss*u_SI}}Q?Uz*&?mS_vmU6= z!|-#&R`sF?&EHR8SfEpJRZMeIj2g2`!@rw1gP>(WNY&9oWs*7elQbpj^Z{{PO&*yi zlXF$I;t{RgF=TZI?eodYxB^>z3)BhqJhnJQMnqKvmcX?LmWJ0xcY%#RDAb`0MhL0m zu41mDYtU{3UzsnAzK7fbKBiB)j#LsqBwocM7H-G?ZDf`H-^DQ}bhVB2Jp znziuLe4! z+st#Tve4Z5-_8H0cXZcp=9k@Hma6Eb;f`zmFnlBd@Erqo=SEy@hQ_wz-~l^7Q9dY? zd!YXHU>mJa-dUl@I(d|RI7T6g;B0#LGY3ZmN%Xp&4_kp#i;;!~khR%}qwvBYM0?ke z_9Z4T4>3FYGdl+ic`Nzr3;(<>K?=#ncDe-NmG#o}If%*0}RfAy4S?b%9euR_lLm(y#i@iXZX zHl&jgHbZJoRfO~J0UCd85=nqQYl(AHqjC){UYfA?(LzEgMW;80)4@MlwxyhTL;p>M zOn%Irk&I!MmbeU(`8r*B@A<@IkKC?+<~q4;b#4l7>ai2lu<${ZMhSt+=Mhkbd%f> z;Ms~CxhiDipF5njtCNy5UxX(Z+^h|qiI znGtZwV?2{bbJm>&pG3Sg1T0C_sc; zGc%(N+&-uO)~zhWUCg1K2uwX0x@&2jeK8baRKonu2+?=NJ?8T~h?gKzR_)tw9V~O! zV~Gg3!_r5wB}68KB}Ek%VGmYlduA)_Xggb^UT-V&1>oeR!%KL58qnkKratxVIl&D$l66Bm~<=kS$E8gQR!ehe4*A`J{28K7EvOB z#9fq@j{acL=HJ-jtxT3nQBGF1ps%E_L#s3K80MX=zhUpI) zI-|#UqpHs+4_)J3O*8Dko+gVy*M5A1xph*$g0ofbV&(N-{mE3A4aSigoA>1|n$983 z5;2FC0J-hRo}E)c`+Kv6=!pG3hV)_&9;p+oS1pHZa^RyHX73g{X}enEe!t&9aq3vQ zP->Ee+SZuMscY_RSA&@f3-6Y!_2_ooMy2=srA)&jACzQZ*w#Z2?zGVqs)7Kp zbTQ$H;*rd?KWwpq?zTx!5nKAk=IKV=$s1HL*uJKB+s4i_wG2rxW1_xN+p4;?1g#Q`X79%WKhKWV4tjh0Cyhcn!j=WN zV-&2PpHtAR7C9B<&f1(`=ncLceCUoOoZw+$c|99vSSIA5rTyx?^4sV7;gwrH`gBg{ z`d=qpn6zCG^Rd`Xb)sA<=gExgIbKW3)iGosVJfCPA4W8-p9Auam>7q^?MX&j|Hr5% zBg&sW5p_yIX_nZ(OO8yz;+bKrQ$NGL&(U4 zJ#H*TA+*nhW`rLY5Ja=Mir=DL|H{tbQ~rNUeRW*a(Ubk}?R?e4n^{vjW69QAw8J?A-3+)JarR+ubZW@Gra{U~=M z1t@$UoW<1CAUP&vOF2$ogQyNeCvxhwQ-*_~dDsR7A+E!Ejh}$yx-W>ygoW2W{M;q@(GMFCCq>;V3md;Dk3_n zi>YO{kaG`p*vNo=oeT2fliUJJSv5&8AqA!x9X>R6z&R1zmfRhN)~t}hrZ@5YaSkJyh#6HbPF@;Wr=U^^rdta=FY4dA( z9rx#$s*EJ{-?cUz7mfKYBBLW9BEYfMv7gFA&h9xDX9a)iivA;-H z=f04$2$IvIy!L#FZWj*r*n~sVRvl-1M;LWAGgxu}YmUdk>#G@=Yt?~VKOX14dU??l ztyBC?G?252A5wk!yJlfBzSlL>OMC1?3K{YegXHP2Z)NAt5kwXQYF@?xI#-ryw$;i> z^e7SBYKFewtL5(&`vQ$46AN67>1LXH{`Gs;kvXoJC7YLs0_ z6k}4k(C~0jvQZ^C&P_^GeK7ycRaaJvs7*QBhORO$RZ#UkEt#uCAeyTjiZ&De!ix4e z*cz1i(P)5k?-;f>aD?rpa5dNdim+014B*kSR2wp`j_OjhLyDFQ6T-a6*K* zW%KgpdQ;M{zY57s%5HHQ`ZztykIN}q5X}4HWsy#PMpwQLHTO)R50)r8GQyOEdM1|5 zkF!>IH!<KcOF7K;>gh7#YVC(7J;j%U=Li<6m~OlL#255_ z^+JyVelot7B)k8f`?}8Bk$@D^?AL|^(^T6SSxt_&mOo=#Vlc~f_21HP)707vkCWeq zhGU?aXRGiE{O)iYMjoelr1iP*{zH2Ij_qMEixvIQnLnkkoB|O_x>JGwG>+`|pULzk zR?xrYbsuNZ5Y6srHm~m+6F!Kwu%GSO+$7}KXuGm;`=UX;@U1*r>@jCr^UOPR4zJC* zAsmH;G~e70Dx>XO*b6Q>pz|%;nfD=Dd`aRV$mui{ZVd5fS7fEXejLcXcO-%kc(m*4 zEIv4=e)eGG1z!efO;=8q*xt7ay&g8b5jE^u4}2EFYvEZI@}R1ErK7mwvb$ad(t~SbMmu<6=?c;Zs z%uN@=kiHeS2`*WE*Byh0UvaeYwt*lB6Zo;Z4i&$@I8OfDevYLpUFnCCHi4`c5a5eHA`NP8+=a zGixyBvI>_bOi2aCA9+Z#3!jAKSpzRp_9ti_E@T2L<)zto_cz0v1Je5|>MRTwS10No zid}J;`mJ6wl@M0d#=k;FiI#PqzfAbNbFL4jMmf}V)tLfH|4L4g?JQ}DHctFv{^Yz* zS#lch81SA=np0S1FPg(Gy>Q${_E@=gtfC}{(S!zFf9Kj?$>y(o!ygg^I%bDf$yx8A%{9bmJ_w<-umuR4)EQvlL~bc0vUfVV%tI z!Ln~=5O@*sjHhzhD2--0aJgc*L6`b-isJ3mP&6&6MyRU8u6wwB1ahZEPsZC?WRU8^ z4bz5iBx~g>ck@Dj4xQ`w_u{T-G`ksNq4vAK<-!O=w`8c6<2_2_^+Oug)L{uR9;GZZ zXwnF?iC2GTCZe8yV%L6rzh9-0937lK9_p`&@Ts~$as{2veDkNdP^!MiO{C^g)FkTL z0v0v4;)o$HG{xNW4*`^DF5l?d?z(%Fb+PwC!Wos-*y^5>N1O30OhUWq2`7tXuA7E4 zaBvPqrNv;Zn&Meu{?RDdJ?1%dh+GO&>|X-?yEaWc5=&9JWHX^5Q1TqO+l)|auN`SX zP0m41ebM}eIq|N(+&-8E#x%y-5aVlivaFgSW18nC$^{ux4|)r zshrG`!?&B9J{+2;b%nwII`KZ+Db9`|L-0S`-L{jEDxH5(7%zeGdTcp1f4#2lweUhq zomOrZezGusv!2q6%YL^WucsxL6(NKg@!8A^6cd7v(prhEzH=`icGfz)d==vrTNYR@ zOj6Mn*O&X6rUi*JmCr98sS+Z}1zn3Ro5!BPf7)C=m zb0h0Gm_WL|A#y!hocQiw3@}tjuUDbC%jLqJ>w;rvuA#a$OoF26$X#7sqB%!DGhYW{ z=Y2R_bf$0neLzA2g?2h9F>#2{LEtpi)IukBs~76(YK`)~n!aeE#_!%IR@_hHG`kn0 z;#Sb^Fw`G*jb_muSZcYF*3?YU@L5PSvW?;oco7LbbzG)P#r~uRuPIQq3oXZ z0VjBa&rLYVQv9<&qY34EOg5G0vClaNqf1`b z6`A9Yij572;r|SZx%OTSlOG{4PIN>OUNDdVxtzt|JQsy1{KsEm&&Qdw%3D+cwKYw| z$MLPc%sZ0Tu0J{&%)7}0-j(FHolW2Scha|racbX_U|74!3L2+!d)=5hWo@Fraebje zltG{yHf1rctx;Z^2jSqU@rHg)!wD{Cbh=jg8SzBA^Siw%HjT*@6RK2VNf827;0z=u z?MM>ZDfu8N$g$H@Ran)CV>dv2{%N!9%KPErV^JZi+QkhE+nxyaD|}>l7{nYx z=!4D^a^-^TCQ|hM+Nz*;`|J$Ppu0qM z7wN`@k-35lhroYXh5!w53P89{3CmlD>Kf4F|#WskNOH*4djrK3k3)S843x+opz+a?`XSpREat}Xma}W zNg_If&W?5F86W!al^!~+#HQzpm7?w!@AjH4C+;6s`VC)|LOs&*#c$ADy$;7m8?zGc zv2P2~SP^&XS)z><58m{UmtVd3hW)-&%L<7|Osj}wti$tkVwZfp`08?u9@YfYxMc7? zi<=eVj|tR{B_4A%!AaW0S*V%IW?;te%ITIC?W_?u2y;&qNRY)?qLgw&;WR~HVEUk^ zS2?p^QW}rtWcx!|87J9Pmj%Dy!FGj6R%v3W{$1%SaNq3eH&(}65|o$&IfIs{g)Ff8 zopf9K5zO?V61Wvy-`%zB3O9~NFI0uphStI&^H4q;$;jpdx0Sv38yJhprkX{Q$||3K z>XhkhjdLA>EmT24!O6)905_9(3Pxa}1DlRbcW&AEE?Zrn8Qef;i2n=%6$QGo+O+ck zG1EO6w;bO0;dicAKPNC7K^+&i|)+=M=KT6-h|G)g4Cch>HHk%7AFs* z2|_S{l4ota!lO<(`%zH-p^%y8CkHY`8XLca>vDgFxwzS}$D=UkSIfJ-bHQ$N#b;Jb zn9rXNWo6fnv*=Eyl}iL`xto6(%{^`XX_(@V`;^^whEY{2J9FJQU0Jl1kB@~Vii%2e zj^&97#l;jCR~|3RG44-OHe(TBq}t<)5Ky+r*##J z8^bR0Ydp=N_3r9VSNFWtQQPzzx|6}-J0Dg9QZ${D5>7wQKV;%pk$UIt4l81dTT&;i$i~V?GrOD;#)-Az%G?nupUj6vqD;B zId=Z#jDweZF|*>mQH2(hb^09+*8CH|HLVEqVP^E{T=ZY&oVB%Y0SH(7sh}wIUh`xk zo!de$+maAsKiFGqI*mihuQ>ySJluwS%l^BJ3ZZfNW^Lj$ev+<4zkF!xIGvnGr(rA$ zF;7gjFE1s8WNDrp=J43~?FnOpB{>S$m`<9pnrNZ^yTN7XUj{?yRA-yOsxm}ydOVU z$SA48<=7aVdSEwa6FcFGnjfQ$g}CUlM`5y#`9Ae&-cTss$qyz_HJ>pwUr06=SHo}PO-FqgR!LaXj!1( zX}0X_HLW(H4vyu^>{+1lk|?2~=(Ca#EsA;ZO0d?usNU^J@8HZvS8op3YugiH{GqxLC11wNNsA^2f6D|x<^KBal@#Hz z>Inkzd~2&9o;V3@g_k?4-YVWU`H$zzlrbARwK(crdU5>xKjFi*H0X3@MOz853%5!( za5tymf-J%u*m7`M*EzYwZbMT4g&G*RH?M`MZm|Qrjgr&x;TCX&%eq4e7w#=gwe_7+^a|QdABZ70@INpnJU`%HV4!@ZThNxHe-7X z$zRp?iJ?Q7j^PGhXCbzKyckKZt(4~t&a}&ZxyaxXFAC)oat`pflz*V_5`5MPhjeC*?89lEuCA~D1eC|VfUef&d6CX(7GsfK3p<*v z*{JC!opkX5r^3U-V`LHk;w49$ike!?hG=eF|3-LeQ2DP6ZWWVxn#t#=6f7yjZ0NEj z3Rm#WC z3V`zTqVQ2GB3ce&6^5Y^ww5^1UO=OHVe-t>@RZV>v(fon5yol?Qi6RDT5kFxY`tne z7J2`#w&FUBJHKTDgWP&dQUk(t*-JmtGrlFD>Jry*bI&ZYLLgo}D6a%U8j4lg9=MaB#r}4Zd`#f6qWq>0DKID zrvwgJgOUewMj4A~h6$fU4$@5FBYAq}?~`w$?Z*CNGx{H^(G{@AK=!KX$Rmb`#|+C^ zuuwMTwfh$S4T3)EIha22*sSZmps9T6cjIB|^B~;zm_1nf zHMER!ZEKqt)?#quk@VJPz94gKih!o;Q09b7e%A5SWJ8RWV)K*0W%CWkO~xIL&U`z9 z7YR?)JGaBuogco8NOLx}drOUJs2Iv&JD%b*u~{Y7j?%XI2oq0V7UQ;}<8MMfD zvm>#9&w)9*>Db%T^Ji;|RM=OL7=u&62AD&QjLd$s`kEEYq@uZ2jiNx5#NGzeF+WZ08FNU110RThcihJ z*sasN%kEzRF_pL<7Z9^k*>c<4vpx?RM_E}~&LX8TBnat=4h(%}004 ziBQ=Q2U?}o>YX@-PQYBp%85pqL5rZQ{_K5lbg6T>@|X05_+qcSZoPMe{gEV{y@KpXMIgJ-9yd` z(@5%v|3)9n7fS9ZQc|KF5(3AEh+-JUciBsUfgq{z8&#^>KskRGuZI4Y`OTVLhZS|V z>;JvFexMIqusVM6ofdLdJ!&5u9pb7>X8t%(tNK4&fGK8d$U5;)8^3#1W4GnJww9{w zJ3PpNfD3TEYBT;>__0dR@p`EuV{RW9RQh^^DiUnO7B2FvRm?n;swk5YJj_Qj9i{t@ zhkSnGVu~fbd|;z*)i0ZVQ#46wGF@9>O}&KgwK^92{MxI3v9|kJ|4$EG$YoMWR(Z)_ zM6BHwo}%)-<9z1EvUf#(!=8eI}SVnFcosL>j-)d)kPJwJSx;+>yZy70=yoKYr8lIiQD(=rmL3m(HQ07D`N$ zvU;0sxLGR})z{zS?Fv8Xky)=0dMP6NWc1`y9I{M!oXsF?c~EJv%Ibj78f`8WqkNvh z7%u!1%wJTw--sHWM~-GKey3&zo8kHS6*0PPX_+27O=_r}f=_EdrORvU#u6fbBc*)$lrM zbNl&)AJQr42?*K1>kb&lHRjO+Iyj)G0M6b&8zV!*HQ+l}S63H`K|+Xv2zGc%@Be*p zIsO?42!L-~h=$$*ZHs!9UNrl(i_dE3rx_im=#?Y=dh_vIu~6U+!mYr>%$LG+0bZCgcMhj`!&x zU9xFO>7Hu~IINyme=}24uYoJ{#lf7@{q;!zxi#>{8%g8TZm=#XE(X1Fx6svMo%8Ml zFl`3rOH2`tz@$mfWlGIn{aciJJm(+?B9@rX;0{iJXv+TpkqIYZr2P+*N zW7|OQ^8_jtndeTfUw8lAwg@H%Ts8ep&mZxwd|J`+F#dVAc&+3vG-5c*08_YUC{UUp z6;gRwW(>5DN|^v@X2w%2C6O@56n_+?xR9WgoW420L%i#cgb4+5yw!?LmQw*#16nL_`ddJ8`LdG zgM&beQ!K&v92P@LPlHD;f&+nr2iBTa>~iey=#B7mAS2YCJcSR!XMgWAYBr_+JF5|d zS?i~kf4`3RAZed(#0R&r*Lp2e#_pe;Sn=h#`cG}`CGXz4iW+Tv-Dq+4TF>YWWny9$ z&Coe8psEosx7uP&_>MXYJ4_oAkBqyc9r?%q(`jw0T!J-FG@ZMzY)Yv2OrH~Sg9RO0fF%KDx4Xq?X-91KuY28QGZSCO z;rf8>tOgbX^!v%Mak8UDWgTr*T#Ti*BoMT_OU^7Ph{zT0@a%2EHp$9< zee<#LJlmT1W;IN1hL|-!iL|Oj;7YkM=ADjbzfgy?)$p?p9p}HR`G4DdM;)ni{m19( z&7+vR<2KeuZYMvhnW?B^WqEBGvcFFuJbEsbWa)QoBGk57%fb?__gPWWq``A96RCx1 z?}OU#Qmt+$RTX}9nM7H6`EH_QvhsGN$#(nF>tsLODW9RufzKL*l{(+@u)q!whml+CLsPyC(L@i4-k4t16pj;%1+B9gLaCU=ZRd$?xD~b0c>y_fQYIv?C6!kCml+_&`-S%DdtW zN=^{L6$H&EG58MXqK&?hwpE=VM05gMX zWg08|_?*S)LOt{?WpaFBhjDIV_u9SlaQ3}+Pq0d8+mmi#K zLg|F(TL|-qSCapv@de{xC@eD!LJxs6Q5I*{2vdQeeZqkFqmVy0fTQ%6M?_SOKn)JD zkSC&sAi}{}!qec4^vzYu%j82KII4&!8gmyfd1L4yNu}QN&r$MfVR`;f z!UWV$Xp61?6Whqc^Q222RCzR>(bi5)$67NdlKP{RGro=;W-PGD%g=anzk`uEw{Js3 z<0}E@AK~xkvWCgSw;oEt&wII-VXrQA=e_bb`&+bMMBwM5uN_j~-pSZF{@%9#Q^ZZM z^Jb~;Oq9~t*wgf*i%dt(HcQ`{hSS*j8EyD$B!^{%MxCW^3$dD)rX&KrnJ#}a%gZt! z=jb}z4Tg7GF@7LvF$n-sX6Dt>Vi6?!U4yI4gcR=z59Jl5h4hnU$aekHa6yXPy$3P# zZu_j=QEOxGOE0&_j*yMtA`mnqAGY)5J2X5N9nbY^hoHBPd|a!SxF0$$2G^PIVgXiQ zN2BL;_&vWqxxjZaC%MqvD#h;lp!;Pj2VGcRB%LZPdh7o5ew^unfTwJ~66P=Cgb6xb zPJs>=%LFM#L7mgxQ(b-U7WF6@@c{Z`@K_lE#g<*$oib38@?jMw!Ly1F2W5o1$h}wN ze*L-yoG@^CRTdY|wt2Y$YOA_>a%m|DZ@?h|(lO)ym=)iw*RZ99$P$TiYVVOvz|M7hr{` z*XC(zW@ct;3J2+T_!s5ldAe@y`gX_xjbeeN*%KbnEbm$*eD4!QB+0bXT?SM*w{g?0f93*A#yYVNnm)F zs7RW|&C}cRx~2I4J?qY*ceA4;Gw1Tm@?u{SpVuo90KS!Km10{^hgZcfPFgDu3$77%_dm8kyf*zSyuDOWu_t0FQP1fyA?6-% zMSBGvBpc~AF#F2-hq4frIB`fcc|QH#w9Ue8V+S!Lz>u3=s6?!*`g5ePZiD3okzeZS zsO)^lm58S4#U~@j`}bgg{&RpA?yLGLX+;8htxLp1*SNdhNnfO(?(L~g(wY3w&-f6J za2FBO`eiTY^yC?t?Q6BfltfO$0hGiEX%TF9-|e3ymfj(SRya=OR3wqXHx6h;YxaCJ zG`(C3!?hU1e8<4q6gZ6`jJ(#=*4{sH|0iq(;3+5|z{S*m`(}1lqcBBT7PR#ynwNJE zpjFld*C%NHN&W5}6edA=p+yn(@VEgacVOkhI$Uc=kU))#gVTEU^EJ5B4K~O0s(2X~ zWYnfWudk&x)lcdrpKPk81|FI=3Ga1?0?@&srs!~jwz+?rA-$VA({dR_3dzw(6 zJs&X-2{zEzW*L z!GQzb%$uF$ouCs~G_W%fC4@pA+>ZQ`!92KI-2}3Xeo( zQXrp4p%l}@*Rlgn0l5zFSc&<*4;tJh{oX1_M%TAatV6E1c)bcgj}3|X`)k)o?lDV7 z$n)WPFx>X4Y2QvXcBGs%O>3hw!6}-@I*!t_B7*5bX3Xl5;3#u{1NnHi&nI*QJsTjM)gmAcF1vKJZF7zc@a93NN18m++ku}yeb2|Gihysk)pR*%c&|Z2Jv=Z_TvCD>+FfYD@oHw*8MOG2 z;~Vb)@@jH&0ODn#s!Gfur=bixkAU8}u5M^(NJUju8=S^AudD2bu7{LBa;=`IpS}{A zfiLuNpm`{&XbAR6B4Y78w4JIw2-S;3=r{;JR^$RuAcl|__*@aF0R+QeEJ;cP4yIoI z0+MPJbBd!e$_K@HnC=weMZTLSE@4wMELmxvn+z=P(eMOaS=hNP$O)VByg8L-#604uQ^#lpSg-iK zUe!}?=5lVAedX_x6=o!5bukiwdrXVDF(U2LZZnGvR^RN#$$v*_fzoRS${9UQ0yP5^ zg+3x7Bcla$H;aplVA=qwF)&{kc?Oc@YvP!Z5&20jV5tqJyDRakSLnzj82tbwL3{q3 z7$daM0z8Lg0?zc4k6#pv3<*Xa`YDZoEdYo8st5$wWyyo0oy4FzEAT4WbaQLVcD|m0 z0KIl+5B#@kCx1>)p9eKw6F8e>3;O|?X#hEJ_+D?`uT;ee4z{8S(qQTXq$Zedv-3b4 zMfoZrq_X|iGPeJ0E5oiZKR=307|8e^y_Zl4yHzSQB^xZ#VF6!cnyoXz1_zkN>Vww> z&sqx*vw|Ptc?0NK@|FN{RvEHREiJ7Lvc;S4rW|#j8w4~0N`SIX>g;2=_{$j@Pd z$WRNVTYCTSF6bu}a!63R1Hg&#aDUp*R)mn$rWWp9+u`wV}0u-k5_Jur38jKjMENFytfCAtMAsFI|6EOI{m!*;2 z^qaZ6%wB8MkFf>i<9|QSju0qB1}Kv#Vf@4xh%J}IKCYLMXu?mY$Kk5ar4zajog#*l ztAu@O?SBj`r(9aA{;HFUPHNwCzkX*|ao6qSgjX)4VUa>_%4Ofk7wz$@f1TIjh$g)eK80HwH zCl^|bvZn3YuGyzr7$!|D*mVFe3=qdAMTyzCz#Op-8UcU{fTAZvItG5xXXh71$4Uoy zZh>@8JDEl%!Bq3^@^ArE34b^Qz@%ID+^OsxRCGSz z)c$W=hoDf>f-ZZF)ywJdl%>r7y_#xts$pQ>d6ZbS8L)T5FvKjz%@SeK*_C z@bJ@lk?_UNxG}&v7EcII!ny^?Q1pn3y;{Jq3I|Rap?>sH39d zc)p{&C1)BA^o%lKP;M~sdZei?x_R%}}Iv20>!#JUKT?g0`8n>JWcov0-AyL&4`K@f2pBvtWiXtagl&e)MzH) zlNuY977LwvN17*YI3RM?(GOwv@Bbo>|Ea%zq|=!ga1bO}294%b=R5;l7$UinWc~e+ z>L!(xzA9+qJgEbUdCl`&icXq;+(tig3gI+uInxtNBUKPWW1voA4#OMSBy&Eq5ptIY z{LaOH-bYh7vxQ?*84kA(O%QUkkp6ytGs%t1J!=@QXipBsnLFH>Dm2R^EiZnim?$dB z5gIn2nO?v+8%|>T;zY5Ue%ybLaNBA^HCe{R>*_C?sanAFSpysz2NI{pH{4!ltd3kzy25C*QI_6pkH>imNOOt|CBfEH5{?;pq;2x$Tjq0zi80ZZ^>MGy}NdA9BaadXyVV%E-b( z8$`_Sm=6vba671l?E^7jOn7)2_^YgT1qy>x>bBzZ&-amFqrAL)eced?n-GX+m@4~_ z4Lk_Ip9j)dSLw37YXNxa_e9shup%YTn{P5)TWnQY8iGXXEXEC|8rR;4t)f1RE}|^< z#zpLFUtdn2ALaZnElj9{2(*{LfuOh~7^#Q~N1a1O`?nO1NFf#~zYZ2sH4HlrNdyA* zAEb~&%tPFJ|JiMz77xQHh9@anm9#hvVmNgBjt7>YGF^mK7hs!1pqu}VN+JnD^XnH! zf^Hia?Bc+Y;P=9ESV`q^f=w3Sl&?~vejD>*+bM!rVuqKL3& zZ#MPVJp1reL)Yh|;~ZeC2{xjc*sQ7~&;PQr_7k|s+hA@xO7uU(AM?lN!$rk?NM`Q~ zF-2127V!P+g|CFuNbPVf$nE_o{b#ZFzK9JD1_?KZ0IUPruL>a-DZk<+OFx}{6XDJ% zAW|Z?9$*}D*|u@}Hvm2X;ga}y+wD!rMd%ox`w!KriVrl#enwG~y<+m#Q3t2gJ^c>E zcu+Xy-*6V{1mPML+~f0Et-fp+g&I2OqA zF$~_WF1DRWm9}jQ?kbj)GH@XPd;CI++lNPx zt_LV8z_3(JZPT9bA5QJq1j(ta1W3D2EzIoW$J&r^#iBZ34?>Kw{pSxGg0YLs1voCC zTle*CpWHQ3>~3xrphcslqXU+z&mr=Yi;Z^rK0f~#iH?r7W7}xK|C)9HBT7g}KsxiR zH8uvojZs_6p)h%Sb@UPvFP#tG4)~gumR3k8^WoTmj~L)?Ucd z5BNusL%~1`eDXpJp`!2AgzQ)SCwE5%297=3x(upIONZE^4h{}P$m7z}EeZO-o*o#1 z;7R}s6QY8o;1S>;l9ZIx13r3tZ@k-o{1C<4n#_|x%i6ZGvI1prlyn9ZFc6rv<}3hK zgvLfj5E11hFyKg2ub;Sso36obg(ZSgTX<3=GBOf;2QUMeLk2%J)l`&|Q^k>(amgNg z2Far*6>uKl8U?I1-5Feol8OqO$-+70avX^Nmxq(6E{KtqG^D3$$?f#-t16`fO8c(+ zN-_i_Jp69PDoprQ51i-7DyoSugse5w-8Xs#rZpOk zSwKJ!edZq?7w(Wr$m6q|bStp5k10Bj`A#QKGN&QponrWFUCs4N=>b_}Hb`3M%%@j$ zzYIKUnFz>0@WN!GwYL_B0kk}44Q{kd`oxb-cgxPrK3 zNkph2Tewqc#HjD#DY473t-nlB%=xhm_vJ$Tp;Gc$TBA$rM&z>W{+~P`@DPkWzw=5f zsTaG{RGq)sE|O-`MYHwH*z8-O%t((PAyK;zJUZ8KZVT-M!Bu zVof~x>_b}G(gf#PD5yv+cE{Br(WzVA`YZxU%F6=m43f!cWo1Kq_g61oc`7uLM!%c$ zDgD#MiVr7p6(x`H*1|?N6VGEW;-m@Kw)3a7-L9+MEd7iQcy;>2iJ)u9tFW%)(WbZ7 zjmK^$W$As?cg7C1NFGn0hvv|ZTkDl#cvzwZd>RBwr!W~!qxGtmw$a% z&w`~MuTF~ju6O@tvX*Pc|BbfFNU%;-cd&UOdOlZ1o9}LnKJ2-A_710lqUA=OUPD7| zu1c-jNGE}7KcWUhS;yu4_}FhGYxY~HcEYUJ2TYXjB&Q?8l_PR8NdEopkCtbH*knuI z+KKXKO&PqtO@1pyjf$_`HlHwASfq^NDdEDfBB{hYoxdBIhw}Zxp;1w*@IF6GelWeD z)PN`02)+DX?zS(U{rV@>nIN5Xs{}emU|6`VK@`pNWJ%{B4|t6~e6nw6VBH+`(k?P> z-_JA-Te2(pDriv>Oe}_amYG?1=Tz+F6szuUwy5kXE4V&GIn;(^AgNfVq3pY(1M%fI zR8Gq^dJ%w*(_6uI0`woryRZnO`VNYMq$#Dk5%bu7V*+GsUd>f>2$(m2nr^_ZC2#-* zNW%b4m62P)ScV(u);>0Im0wV|kPJtIl9+6&Fqy~CDPIy5G9h^JTlH*vY^ByTvU{zQ z05m=CK(_>ZhyaGrLlnhc7R7^(K+z*P>#5Pjt<6Y*2m!>}$-I--2V=#_Mg+*(xcm5n zx6-vVHBNJU+7w<1&vFQ4U1HM5#3oB!1&enaHwqpA0u`1d@r!0F!#!>DdelcQ~d~dT^%S*fT%Uy9pb<1|ndH>l_ zMaR=ipW52FFo^+Ov^VYt88lT2t#1Tuh6nSm8m;w&A6|-n)jxPLd-RHh-JF&%^-D;# zCM=qaDkNXsjo~BXUSzyJipH2`l07oZCQP1uv%DFDvHS#fdSsr1rX;VWq?Q+IEmjXY z4(}$Z7?ufmL{ryx7F$HH^-)RYE0=^Tn;sX_l%YTVy?DjGoxs(vZLcenzTS9A-aKId zUz-Zu98PSyEzM}qJ}yL3LR7JYnD9L>EUJ!lvds29prEvMdr@v6|`)Bv*@?0pCn75}^~YVE9H zd>8+$6Y`}HzLp;OhaB-{W_$kk{G|LBL=8K*AqBxEF~YT83%M_{wG)DL{$f9G=y64X zQ1mdqC)yiiMF;&$|J3w!^O7y?TX+ce3tS0ssmsIaU~@XHK|z zmXGn2Fe<9PD{Izzt5Ipo8XT!l{r`_x~lmqrT7_-N*3`G+3${(y{R&`g6 zCxIIg^45=|ma+^r%N^rV^Wm-0HKl@?;b=nk zoIfTuA8X6Jwhw;%*q{oD{0WmAR9Bo?4fN}azTa!o_-N^LCOic^03wqvSn#y(XwNqVMI z_eD48zN*QOiTke(7u>*>65i~bb%*7(5wEpCzJ->C8lOCv>?Q?THa}#uI?1*J?nHh; z@4=GGcd3~-+6}g-p@Cs^%vufBOAVuqOgu!iKZv=Q+#m6(|1XT0p-<3VOR9M~B}OI5 zP5Ou5R&<8+9Za|Npzz&w7#LR#(YZ9?VOBjH<_mu>5$bAerK_!*>yQMb=>*T^^)tz+XgnuyaFqV@myrm}`Zu{z0cM-g&bDgwseewmYRWOjES`=(L}I*8aQD3PYC;0A3C9X zuRn%^9xIe7N5fEcrFA>0H@yErRzX46#l>YHj{?CW-GUHNJ`bXTDHZ>=CLD71WfiL? zaN87Bj#&11h(su1WKf7RQSybtu(WGS>raB8=)vVjmmk`0PEI|Wn8#f(5XR1uk^KF; z@oRpvw~cL_uuWCfH1rO|rCr5^|Au`Fu}FgLr0K#z*!oyf&?xdt8T%awjPX#uS`czd zb5zlcow2jCE8oQG#tr&e$1(UEOed*qS(^MH>r2&*=70VQ<{P@fr;PK@4jY7eRq8YW z@*7xIrKR0~L0(BVtq)%zwu=B;6usNPYuqJs)0Xpyq6IS%Q*)Vj0d2vqo(3tALGzh> z);6$k1;_MVEOY8;I7>n%s37u_CZASEj!_^$Hny8DIL3*Zfa@m3N;%9i>BJucptK2> z%r*u{C(p&OU!0#naXdjdM6}C)Y@gQix^(y%ZO`DtowNSs63a40uvk~i--C{8xwVHT zDVS=@#!oL#m*HF2-5=Z8nd2NnOZW&~Z1+b#DJ#BLlLZAuTlX8WdUS|X_WLiWY?}{j z{0Zf4)^?`q#vUdmt!V8b*j1z)yYmcVZI3gHi|0ze!Y+CWgq^SS&EEKQa~pF^bdmXg z*Z8HNaUJe;@EtxUS#C>9zG=N|tiyf1rvL1067d!M{4tT;W1S=>yT}L8q##M136DR& zSh(NDQM<3U59@ha-24cRKo9!#r$PzAdQu*@DJvlo?rB#aSM&*IW;D9z?%>H!S z0?DK#i|*-QX5$Lq&E6-t7k}FyCamo4%kqel(#1lYiMfgwhU`*W{vTOy0TpEzwhIrT z41#nF0!j=>cMOu!-Cfd%(%m6Q3(^hJAyU%aAtfOoU4lq=!`Xc2|LXkDdY6mWB@7IE z@B7}@eT7-QnR19quIJ5Vht^)J&4HjA+8ei9F@mim%%LW;$@ZGBJj$}}ia$w7O#R|+ z7=+6Idos_{Mnkl>sG_gMnG+Kb!_HCHb(_UJW2ib`AK8dbZ&?2B8DT(sbP~0T$>IE? zh!N2EXOtYXHlAWd`Y!W?IUqRJ@Ll5x-0XmiKx$N-g|EV{w`edmU5Kmj76ZRb1M^nu zN6cusA?~*0$)N3mnYX%aAL)|cd1i;0&c|!U?P;ucbF~BM@H)%h{xRa{quauMC8ct# zkiBc?j+?Z*LI=OmXg8bvLUzWk=sYCY|no%5>N=6vNHBjtxRdV2zde8)0o94Gx0-8ilU z^mCwFamSZYYFxFgd3LAd3WZ$Fh|V*c2Hy$-lumV&gls~Ya zn>JQUm?H|%ks)f`+DP1xidTDY2Y5)$YP;Wha3Aw<5a-c_%d4V7XdfX%I5Ddj*{&%e zYzv&a@4aou*wd!8ZB1ie@OPuWj%3G2^~h9GRD2Ew*vagskS`7roO+Z^SYPph>-(#@ z_V|`|%O?B^4eqi)i-_n(+_X|np*gpNu!{KczpXPMj`N-Ry7{0HhHW#y>uaf8J4PP7 z78PeC1q{gt`=rGqWJ@pys`N#zHdZ8ZVb^yN)ulOWG|hj9 zrRDhz>-TCl3;i0Lhd+1>nsp?6rdV{iUOyR{6u#Zs|A>ac|Hsw$AUn<1ZM*y0*Ny-2 zbm!<@>vA9s$dt~8cQ4r6bnw6MKKwOq9`l9eph&H++Aisv?`?*gnl6ap1}Y-BCB_)w zq8ZbbU;#SO8jkRvU>RP;f8S4=@|!gbK4)pGk+yo6bi_(HQP=bw+A^44Y(&&$xvy^J zm^K-Z^&^~rfo35+uq%n`vFmh{|ThQNyDT1_|l8n*%f*xe1z)YK5oqwDNfj# zLK%NuHw*Ia`n?D>vES~&TN1G)4wd$@b)%mvu`d5zjXo9%~Gcm&7hyANuL^y-xz&D zg`^O_Y4#lNT;Pw+FhGG!e*MvcA=Fx>=6md@5!v;dotyi9ps#JFX%4Y{;26GY^9W^R zp+_l)hm7Od!YqBb)61osp2e0_D@gm`do4fjV;h6CVe%`&5B^5)R)Uj_<>_iaTVCH) zDKsgT&S_MacfZlacNp+~y=2HM-)y~}pAX~CI0iGj5{3yXzetprORnE#Bw%*55J6~uKWT739nf0N zS&)k7Dt$A|D?i)du`o%`8%Z{w^pcCS=3@kLn_k0@wdKcW|M*Nqkj9Pf&kO>uotK(? zl(vfBb*(EVXpGz+Y~R>sxbDnuIOcqe*pP2o^R!D(gC!a*&+jmWB@z3wm2WuvRE*FG zsJfnBsHA}o%%h4W=Wv^M*Yl_Wm|PAK7_g_(_(P??#P+GFm8+rRmy@6GfS3p0o^Fwr z=a?OTW5k4R&1|Ji&Cj+cw zxqTbYL!LPg$Lt8X`E;tp_m*F=enWOR5(n~wI)pd#Mi^(?+AZ(Xe?7xW!Kzn1i-P>J zvN_bqqe_3rKj}bSCc0s%^?;kurM-TEk?)8CI;P=Iqy^Y5@B*FD1-bW{eyx_o5cQC* zrRT3^m|OnD3q2ZUBBTx)(ZPiDfB2~#jL)Z7|CS6@L}YC5#@%-N7>oqQ$sR)cX$8nl+*oI2RG@>CmKd!b>I&$9H<8^Ryo z=1c~OP7nKh9mU3|e3ZE#8^i85H&uCg`C7q7-{gJ?IVV218yOP{m@y|e!i%_*?*cZT za<7LSN^~X&Hr4FE`T6G8VE}cZd^;^36P&{huFQbL06~(aqY}YI#w4<&fA!KdEc7Mz zRJ^NmMyWuHZ&%c6T=w|tlPunjeQcaUcu(&Xe#8+f)2NcJ)1>!H8C4&j7P|0KE^Q4B zO`GBQm@200Ti>ivx3W^A8XSNKgZqr;#t;@eFU|Gy$XV?BFzp3!XFI+$(_vv?NEP;L zHHJ+axETN>`DvGt;mGk`g!xXreinUccXb-G! z7p`l$_g?3`a+a7xe=y%W-OsQG;;9N?&-0ph|KUAW4Ogs~BhUTH4Lfdp^gKG6T723W zwcWH#V-RzM9TYXSd=Xg`_ z@I);Ocy%QlC3a&pJ`&gnJf$=w!nH?LmPUH*I^^Niv7S~gXsBO``;%E#UKIE7h0efi zo3HQ068it>x9*?Oi5S4C4wvpZSTSy*Rfy?sL<$6ST{arO+q_`>5MXLKa^rgu+Qe#s zy_1|cRV?ueqAc5#XKBqW#0OR#KN{9uaZ^Pd zZ59I0++Hd$Si@f;-!+jFAH&V@#bx1Sxq$L&Un&+&Ro1KgL#!@ zReS8+o|wL5MHbPZ(|!)spKIw`2uy2TaC=I|@$V1u8=bbZz1LzRZlB)p6I47Bcum!j zjBFVo@evV}@w~3dkn^1cZ?tM{Mn&!O>yZz`>h=;rW(xLrW*~DaK2&eM-BPsv~S5Iu|fnED)JC| zKf@PbdJFjR6gZ(_VKRz}iy&H^xo-YwT#gmwTzItzzZLz#5JSwYTW!D6S-0dn2e?EZ z`8HzA5@j@lheREH{{^02dV_ff7(O-v8ed-|2|g6H6)2TJlqduTl@c$k;%#a#_|H@S zfh+)C3g~l+zkUT`93?q9>LBq4=7CSE9(TI~Sre1o{N174CyUVg7hap>&ovV64$sq_ zwogt_MW3QVgKRM+a0s@kewgfk($I2auHP$puh$gOsHx0ES6cMt3)Pn|U;J~R$apbW zTQ0Fyb(v1Se(7*k{V4+w9N-I-Loxuq+)Pj7PZ{D}I=_9Ap^kbHI+83L)}C_z^zHp( z3}m@+Pv&Kmy*?d@sz6cE*FW`j3yx5zVToILkDPLzHZjGuvAa#7o8HHV z@@BrWns=ENyoi>%&nq?#`qkRwqr`8zS|yP+OF&;01yRphawf^`e|+JocS#*R!t(Do zXG}F9IkpLK+Pssi^4+QIOAbzmA&S>&ugMw6E5@O>3ZVMx%E9NiSKaIR^6xRWuw&>O ziOozt%FGW->0JL(tXN+qeo9_TJ-gvu1bprZLK5$isa(D2&8!rc8@rKb+OseDIiuAg z&59cf*hMIKUzQiwI;{6+&kfcgBI#ds;}zE}>UbV)^$`=C!C{D))HV97Xm`iFkpF)A z0*r!K7|74cV{61pESiS0Paxm3cN@_MhZd!18Haw{ZT2LEl~3<1x=iL`Ki!)Av&7(O z`o!zgTpPPNaKUYIoE%%);)zGE)KBu&GJpG{ayE_fUzLeks^gCH0ZRLrAV&0_4iuud z{m$!tw{{zd$yub{Rl(Pr_u>4oQme2R%?r+CLCQr6W6+|Kt(g$Rf-iY;(}5vrtw*^i zwFDNMWG0$FEhHItXK!oW7T>W)R+O<`Z)|gA-6c7^^v$w@Hs)FC+pkw(0GoHnIg^Lm zIyJVp%Q1P!y?DW8pWP^d!E53KAD1bN(M-Pe(EisS4+8mJgk0L&e`l5RS-u@O7d}!# zdmM_g5(=RrYyZ zdE7o9LqH?Fi)z{bmL2i$FP@o`S+6`3J$$E*oEZMX;u%U1eZjr)x5CdDe47}=EX^yy zq>6}@AYwyzBnVzuH;HAsijEGS-`x!elQEclxCI$&5F7L7&0Q5WF(vt^Hh| zM}&8a;z_R;%v zywUdR-OXXs+VEz&rG)XJqc{4WB_@lps~uZJ+$ByQ`)$ z2iTCuaoi(0B0BgoZU5$rJ-UCB?`ON}klT|;!Krc`+3ItphOTS9PS@eYntIfZ!h5!( zQN%{}uy#B0n7pS5M-`<@QG2%7&?$mdsbY*YNQSJ{v z-DYJVzx$oJkc2HPd-eQu%PUtwNQZB^&|_mc+t1))s_L7b;;!&PTXOH;=O6Jz>zW1c zPv!jF4st0qs9`=?vzJ*Em~0e7F{q(zQ4q?Ix69tXT5)_bNJ9-17uERH+1UxGEGYA- z$m~-9=L2*d07ozlic%6Tz_Fu-`P@|iUdg-QJhAV4-UMW*KBIgX0ENb%q?h9m(CRmrTN@G8x!vkzn6KX-X+eOPi@lEc%j6Vj}REpRv+sg=mZm7->wHI*%< z>EhxfH#FSePoX{iA)CDBv`+XWJxH}3^U0GbP^1}-~N?Hh5DTDhHhA=%jYY7oJVhE3yE#+{)NZneh>T>K0qUd)pqnt2dyZv=5S^~ zj%d;AhgAq?fZt#@QsAi1VMFc{PwOJ4w%hI{@2tY+tDa5n%9+Wq!u_VPaypLQO2296 zqc1w;ds7B$`H(gp8C# z4G(;ejBHC2^8(_J#P%3^_HA&|`X>ZMglu`PQ;QZQwgVMH0#Qd*pa?(+75b|uIAF~b z7@;6hm3qjRI z!@Pc16InoVdIh-4;9B>-tbsN`7O3O^SIrLmNZ{uEe!8q>GtKZUY7X zNPoXkWD6kmgSYv;m6cAj`(b+kN)vFn($aG4C9!;v;R8xgM2`ue=Kyj|(HbH==SJ`M zNg)bal&+pMGDKEsw2GM%Z$uhylEF`i;G+EcI>dx(AU!Vrkv5in-eanOcrj*bdTN6) zn$DQ62CFnIqrobdg%EW@nQ)@V06XL!KFH^fK2}b%&BK69MplEy;f00gqU1nWUc|?5* zjDb+UR#1x_$iqy{3(@~f?P`HO-Rg4IR|eo>n4fvmbYw z__m)R{C@V#XP-3@9sM@qgfuH>sO_c%jKzJg)(Ce=>n7EsJ=gbN5}j(Cv3n>8Z#R38 zPO;86TP+oqvo1y8jQ`o+UVk=Ue2lSa;J)^u!Qpt$c@4YPR_Dm&x8T~#YP+qNj0N`e zyK61Wmo{I0*1854Uo3vRCZv}18LhnI4|ZdOZnv&wd6r6P*6v?7wD=1D`|CE?s{7q* zx|KBR-r2LGU*~CNkW#-^vdZ*Dl*rf|u)Yz^ei9n!|cb2nh+J*3w<3 z9z4l}Rzs-)^D+?ZUC;f+mSS}VUS79@{_n)KJQ~TD9Uy zKz?O?mm}eD4tDhf5EXz>El|$9zXiuM+&7E}uH+3c*z&R78_qjRj0~(gK zoSdBhD<~bV-F=XU|6dty1QBz|w{Hshs%6_2mX_G0JRm*MZXlKh_*q0s<^mnVB@n+S zG3(LFjIZ`Yv|X%XL^?=F^fY@MF9H?qc6pmGpachqP{c?BZk(B!8Hf(^>I!}e-kh>+ z9&Tehbt%#p+<#V)J10rlIi*%&dVWTra+oKqa6Gf=eVw0Sm!Ay&4E`pL+_S zgyb>25{$!D(eIM%vRUG)Qt7G9zpu1soeNg0WsVA#CJBR0;1Jz|Gfpe57k6I2owP0c3z}tCw zN(~KUqzGIzuDlC6iYS_JGvd7Zm0-@J9P~kZh*?WcL1uplDZ$x9pOciQ`+q$*2PgeQ zLJdVr?Oyw-2z&ecsLS_*=Og!y3ooLCXg8alFr5GC&hh0P$vR(6{nd6oT{1suzTZ}L z#tE^9>9v?jdB~l7k}F4m(Ao8k7QV|8&P=b`(t;91`P$;f-|A>BXIZ%>vzdM$H#M5A ztI9q_Z`awek00Y6WAcz$iv3x9M9On>TP-8eutbzxmpn!D-B7?Nw2UOBu7>Za68-ao zQPwF;@8vxi6tq7dJNNDA^=ePk3P%vv{ztp-`<`i-%T&97d#J2qgemBfeshV8VT<4`;W41(%czm{sNFb+ch+jrPGXLc07ex?UK9A_6vbO2Ggu7f_po1XARu zCMP4GalZqROD{8=V@pcPwy|Jr%h7%})S_>}R?G2O2W~CqI+>i7SiQ;Yv@)v3#@TQ8 z>14)BG`?0w{HvGv?GW<7NvkeHcscC68b3_Bn!h1D`S?x$QXdfsc&6ai= zn~E7=8ql>OjYubwn=+H+2)tAF@gaawgx3c;D`;x$=q7%k5~n#7=VD}R%BRtM^WymW z=F=x`k}YG|6yrSaMlA7n@1h*6AKmE6RgWa%g<7rHdy1{uE|hEh(kxN!mc^krvNT5> z@9eI~5&Gzal}?SyK~aWwDuU40Abfh|BUus?n39qLWE0FXLON$MjZ6XbV)~jss5mRK zT)Hpmg%*Hui>qA8yC(K5vTDqdYnm)!ar*Dpj47dLH?5^Z>5W$<|4M#DDk~~Sz+5|D zXr|^4lMPinXT@04La&`OW+N#Iiz_)|rz*=@S;)|V9-^kTArU8EYP8d4X=iuw+xzAQ z#jE@#YC*p7uK#-0gmaxduhJt}_tSli7<*Cb??LzalXGE#Y&uugD>>-Gf^eJ>D^&a!H@)Ith&i-heO-1v zy|>WP(wgbuYm(J^p5-`;$1`NFpDQeeBxu zoW(=z&8&^f&zQl;^GBH*%c;$ zYK7h9pL*L8HYC`1-g!cScitYQS6f^?!e|>9q@W=q^*M;A@Z?Wcm4XVMl=L#Tio_I{ zRNzrRMh*ZVk51y#E@c9VJX5LMG2xpP?DS*M{Q@%iZ76jV(3b*+=xREMQ;&1XOiSbU zI9dlG@|xY#z!GKy0Lp|izrp1=ejJxt5|~fT%z$?Q*iigIU{S)saR5#bP>I1MVRxaa zuB0Stt$WR3unJ09#gtZHrK1T>7rsrPsoV#_ZwNBQd{t1hwG3P|fSd*{^Um5@UP%ea zbf!1K_hZ_NITmzA*}YhR;=@lyvJ) z6a*`(%NuOKT?S;i60W&Ev0*gWc9$2J{CZ%!H5H?x(3C!cSew1%w~!hVCkx!pd)MXr zQB)0KL~WrLOfa5Z>l;lKSoSJ0MwL*3s%raH;sBRlg}0W6q-XN0Pk1IJR3yXPxNT;? z9Y{J4#ceFaW5QzO^VzfrQN>LY=a_K$m_^7eWJEd&JhsS922u-Vd#j~Un-Yg;KCYE4 z=uJ$Bl|`X&NGal#*2jt9)wz61JA7;~=C8tUJ}8Z3u`cfXSkYB7nSE+f`6Z+DRL#O% z-McDQ!syl8qXEB^{|zDeNlH(=d`fm>=N5-fqVsbNr-2KPHrMk+z=&G(um9wZj|$w% z+u>d;dRuEU45cmiL%o!yYxM2Mp1{|T7AHrq&GqLdS!>rPf+w#b?S*tpmUTlS%Rg=K zqZtqfH_)g>0OXeMq`Uq(}!cE{kP&`dKQ+){^C?BZ{B1jB+$s<bXbW(!QI48|bXTW=JsS1J_t!3(PMOgVTo zTI68HxO>mA63P*bgCqhq?aM}mapMhHOk_5c%SlcCMD=Uh|ClY-lr=~<;|0PE_D$$4+Xy?3J??;8PhSQnwz z(&2^pfXBH^F&Xftk#r+k)e3Q)CY0gUM=aq9HG@G5%--0VFV|<7X@*$1CU6 zNQdPTaU9FgS+bezkpos_r6?dH_#e*&(sH|p*SCM8PlJ@fX~zZ=he}Yzh*MF@#M;c2 zlsu6bavK(3rT(s9r)gWj&OAwEQd3C4NXmudK~`h5Ohqy zLhtYc1`EU=1WD3^xXZ8RI*ny@)zt)qgkj42z$qYxS}Od*;n9(!!8G$=m5m-88CSIX zl3-J6`V>f==I8aNT|j&$U^WA0B3uSs*T8_V^8$P=5PuA6TDes!9UnF-GJtbd4n_;3 zk&vU5n<}x9Hlee(v5-)E&zXBap`k(Q7M6A*@fo3Sa~>+WYL_N5a%;) zC)vhz-4m?aqc~+=7=z^Fv|qow$U0@GfBri-^O3<`v(-^`;cwz?q5aQ~jkM6#hs-9l zI5G%go(A%fG?LoQQ#33<3isN!&jnw}j0*6v;$Uuh(h1AO(FGCituc4^QbK+y{K~M4zwA}Z=XJW+DGPu39%m`lz0uzFDxunrH9ZlGNy4^ z#nPP+c`QLF2vlWaF}s%2G3GXa^IFhzS*X>!cRxl(z-Sqi@o;e}C?(~i#iGwaDj>F2l5~d%~mTf-y6O ziJ9~(N-d4EmeLVdS*$Ia=bKJ>YFJWa>@$y&ug4Pu{`gUo^{qqT_qe*#vBE^1^iC$5sOKq~0J{A>EI7_F&ir=o0|whL1Zf=SuSw;FnL)Dn|Pwzcv78%4*=~ zaCy85ss+#}0|BHmow~mMPNS`j4YR2=I2EXJEdWLd9KPS#+K&AA0WA3-IG-e$2TRJz zq9k*{+5y$n1KewYZZNQ88DOKF04tV;I}Wk;$)|8gOjNH$3k$(83knM(qCNuv3NMVw zu(`gX;^1N}n)uf|gzkT}#oC3dn>w!g_MSzkCe3F2za&LG<#QjB|FwpKNsXlx1@!<;t zFKbYPzXEqRXf}uz2JNlF1c)Mb2G-?zi7wr^QaGZXpeNYL0+0~6Aa}}?Y+pU#x?uc% zw!hR?P+$STacDMBl_w=8K2RQ}fhQffY=SSJ$ogjQ_!1n4IHCZ7lc9|R4vx4z3-RMd zY%{>A1)$Rf6%`c?4VU%cmVDGt%{&MorA>`;=AoFPQ#q`kv(RJ|?I@c$2;R3~GlmB0aGe zNhPW)rc_&i6WF3}*@l`dFMrEY5||<&T1*fA#O!VDcoV^pB;m=ttU4m6q$R zvW-$S{vb57a*(@B_E`;LQ&LGU}HEfDK3tByiWLX9bYGr z^x3n!%Td2xlS(YmGwsg004I9z0N)CG1%;=kJ7Clx4h5JDaGM0ycM8QtD$Xjcq482a*&d+xz)=sS z9e4>COeet6A(@{4=w)I7xCvHA4RG;8@?S#$I}Ksbog(?Gz5*B!SbVdyx@}8-S6hXk z8+h^&d`dCU^}FsbW~k1NkK=`jC$bm-H9324W0@b=RXG)@PQJh{?~U`A1i%fz_5hUt zKX3%xk|98f&kei?vPL%>_a2;eLFWnfFEIN1^o(2fBtu{$*%TbFbomeN!O4-(Cw^zY zErHv`my&nohAj|^1lh3ElNWzt5lr3jeARrkdTrT}HjB?*OtQMKhpuG#R@G+rn#{-C zzQz|B<7Se;JN06J{2_p|ltH=GP0Ph)jc-#P)gQpQ^2uzpnV}2k?F*Ign+@JRCB!Z& z`tqWeoxt%<+OvOZjaZei&Z;HiNc-5*_eIC_UoOXE0dXzf2v)m0-vNQX#xh?%=nxCO znuHj=5i{VzJK?RXhP*8K7eR8csb|aYb937vqR5d#C8W_?ahcX~e!p8eZ+~?#{an=y z)H~4Me}dnP{{C);9E%D@yuGOYal`;5&butXUE5DfU29ZYQrGmD45ch@Zd~pnWb`6B z+tQf%O?<$QIr3`rgW`f*j$|m#2Y3P%#0csDeDF||*9T&r|L>>Gh%P?}$6#7wN)|yD zB@VJMv=j5KbiwdTfzqCfp7Jz~{lECAPZh>ur6ZCwYf zlnCZ}&?8ZC`l{$rdL+@2v-e?AW}>NKBO|W;_>gsIc{x zZHIfw#0FJQSDH$xd|`f4IcF}bFd_cal_6o=#Xwv)g6K2Mbgs6^WqYHMC0{+>ZmyuX~WeHJTx|M#A(5Xf#7@6FQ%k(8&V#h@w~E zFb>aAmX$~w1QWB!N^_l@w+2H`*RrHYU*CMo$F5nZ8_~B;sBLI3q*bGxS}X zfrGS5J$%B4L1Xxeq!SHnYUg}dC<{q!B4M*e&nk{4zIDl70cnZG8{bOL_T_A7N6F~) zUa2^Lo#v3=Moi5%?+Yh=8FO=!f$*9@P_E-3Ij7%s$f$2GH#cq%$0rJe?57sPuN+LJ zhP*EoIw3`jU*R9q;b6Nx5K=|_(^J|oPhfh4x{0* zfF5S`Vc)g|&pY&y3!jnwuMsBiX0r^ zARffS)y&3LrbN6<;2>M+>CY`w6qG}!%_Tz6DiG5>&O-4-2L~eF!I7`-929TfV33Dz zD2(gh1;ck01+Xz6an4K8eus46@}C22b8F{p`PU*j!J)~huDFs4V2jUZ-SOSo&wVde zz>1<*D+nFy$=zMyGdVf?7%CYTCcb*u_d@tzQEo6M$vBH!cnav^)&GaMEu-BY z=RuL%;+xwaq;^tq*2g1nxx${UmNwU#OG*6u&U)^xQ!8kDIptH+)JT8Wqs3plTZTlV zJrPv$Y?c44R)GIF!_GAkZNf>*yNc2Z?O&Rj7sEEG=@CdU!KZz^dc3XcV{?1-CySS- z`wE**A#bNN{p4JDKE2v#k93gXUu4vy+x?7J$Ju@YN%j?irzMXYv#t3xky(-eWBquw zv?0R_OBF>2Tj=cOg9I$1U~X;No7gKc-&e=L$q5UMD&*+)Ll@1vsijiKxxSPT#p435yTI55BaQJ89k7CMs>UCN~7?vhYnRL2>Ps(W? zKto`f!v@22-J8EWZ@c%guqaqVl$1QT_ok$OqO=OeXux?EbVr8nE03mK=6;E>bA_J7 z^B5$kGMF@Jm1;WxJW>!iR|4;h0FmX*UqOdx!6=Xl9;7FMC`0+ukKx~Ds;^VqgnBBo z0`7z}kT(bd7|U>b>3Wl$!f{{)E9YC{R0ZayOm*ApIf}sgQ6?4#d8CJkFM)$g!HHf1 ziZG1?W|yi1W^iU^E0G_a$*HdmHlki@%TnqeTNP%y?j-L1$tqirhySi3hYaDy!UwX` zkvd|{l8IsnWnyu;q1wTI22f=Zoh)z z&eh`jmsyo&vp;{gy8T;lq*bFzkS<3I!cUEGaWftWv!D~ zv=5wsV9!6&sG0di*v&nf=+ZwOP0sQs8$jU5IFh}@IY=u&4;`#Bn{#D@y|uQ6q9W6G z#S;=jx<}Q;6VrmNbP}25qPFoSb@!uj!h2Q&L_{<+205N~_5T=RC@+$h8+^?49@VMO zqV|&kBlAvlEtiv#Zac9&hTUAlOX|YFwfd5hl*9qV#Z5q-F;#T~1hfb9_&9^r_(aMx^YZ9V;T zdP#n4dI;MjFl8c;kx1nJs5fAtfI%ClKEWmG+~th07fGik#tD2AK(=-I{{lUu%hzUT z)R6852Fj$sXoyL?(|KP;W)wPvLIj5M7!^_ySrmbQs1Ss3TvqfzR-6cqUWiIhI$&HX zshdTV*hE&+@e;-SMm{8 zMTnG?*^Ee!Du>z5@z63r`qD9ktu-~-Vx$XHqsq|qlj;fZbLbnbn+mq5hGPC<$TEqB zb((kA&zU6GREA>#3onH||6zln%8{ZpX;`{bgl;JN2Rd@h--5TU`A`7Wt zk`s4-aaSUUh4um0q>L6{Cefe|X~WT9nhn?-k*uW;Rx!y|6`KwQvGsmvRJ4gxm1Uzv zbxOjG^j8_If*WX|BL^7_p1cpwkhLf*OtZj)hlfS7Rqn)hnGIHc7B`b;L(V(e*q}47 zVv^Bh2w+d733Fh;6{V*^HB}@rcV#HX$$1-Yqo>IchC>x()Q#&zi0;rwh030EsZLMw zz8Bh;`V6H})@H(3J{I0qM$uko(;u7uHtb(`BqBd50ujgln6}_0QXh*s6kkFGkqdzc z*M7aa$NzufBm`M9Dkft5b2mi4G*9Hx;pKYnFa+Wc3H^z{fgsCJL2=mbP=d6JN*Gr4 zA8xt;xXy4MmysfK2^Lyv&EJo55Q)hIfdtJLH(b{TnwgN z#yrQXm{$5wA-LaBAVD}Wstb#4s+i0&vCz81OYZ`!@aEJmgW5i0e}9#}csAsJ(lCEc z$a1`~2#it$TExhLj1F2hrq=eNAP52_$#?|!5#%SllPnDa*+fOEkBY=~vk`?!U~NL$ zKf{s4(os+p`PM>>(NV-;#@)rVWS!}CVU#=$WQ~Oy>Rs}qn31v@0_$rBxGH`p%NdMqy5}tI6LaLlR4_Li%eEV-_6h_Kbmiq$ z;zcMRz`wQufO}1+Q+iULp4HgQ@x-Kv(UXHPG{A{r*_dkOih;0%cl3 z<%S11z_*1Bf{Cf`ASMFw_m8sL_Clgzp`59-(TU-6!F7R{;@l^RL7-_+xzJEIj3)#L z9C5&vTx-Ym@BC7;jRt6zkx0nXRY)S^T#!A-H0npx@~;cXpvd!AUcXTlkzik5P#pgJ z&tL0WNPbNZ=@&aM>2UsTBoSQKk{85|MzrY26gC+w?0m}mI7cR4Op(xVtLbTR|C)Ym z+(gMrKs9JA1HpH>!0k&|_yuKrX66HNi&eXVf|plo=Kw@?(8disB?WqOaQm9Avs*Ha z79=uda+U-)JNK8WZ*_HbK{j65cCq@82XxaMCP{&VBS?i#8J+|Etq7w8On)%$B>p7X zgSRn~G57N)U@^K!RzmA=Z>azS0wfBEk4L$)KqjZBzm|Zus~h=-K0> zpBBCH`KsB<{Z(MBn>z+xE;Wbl&|LtA8NN(D!Nmtw5yp2*FmwJWTB*$hS_3~iNS-Ra z!UTXLz&B#4s@@c_a2KN06a5rzmP?Y7(vph zwcH7Djh6B3hZ@|Wuy;OVwS#^pMnkx^yQ_3OY$8PpjO&oq+(pVwe>&k44nAr9ztNZw zh(l{M(dGLJ$qz3dh(8{JXDTpmal=AW?j)k+@V%SYj}K?MIMaKGAeQ zhNY!VL@z+E{ec@sM$i+s2$T(B@VoP`17Mv-70mVEXX*Io?C{|g70+*z& zWy|3+gbi{`w)3CYdtvUMt&VU!zw%wyFQNC0_eJpRnvt3sC%8YVzdwo{r*Zb}kAaVE zN^d^^I2*XN0mKo|Q<&)KJ??MM0p6;}zgrDr7p8z~QaWO+)+T!xa9Ydt8;*bhTM?L1dZ%I(WxD(T#P&^n0Hw}XuM46#N zh(dyiX7p=!!4}sn{cQ@9WJqu%3vr6VwS7v|Ev-VJ6RW0aqq5%#_()G#Se{MRnQEh@ z1$2ufp|2Y0M3osM!|47%A&6W|{7wi$n#=)l(v<$6N~*r-j#iG>X*mIBKB1I1)Dba1 zXB!<~{Hb2(3TvD2k$kv(PgDa^^<9c(X2Zp51oNQh<)0*=yZdfFmcy#kL^6&d<=l~ z8Ch5o;af3M&vgL650bioOsE?+I5;?0Z8ZTz50a5EFwI;;BpJBssHotI{vGwZJ04_h zdkC)uv`v7Xf^p$Pw9msR2H5AO``ujv5WkAa4k%`Yg_j)2RzeBT`RW!u-{0TeGBPmi zfsr^^U7%?@1##;A!@~gk+C$Lsu1*F@W{I0Q=-?S>wBo506MlNHSizJS;9fyOv+T z@>HFn2i61zXj*0Z07LcIZ#f6d&hLQC0)DRF;NR4MWHU3429RtbLQw(cv4AlLR^kL( zBzRv>_m`Gj42-jG@;i|ng3cS@he9JA`!A)CH|&cjXglHE@OCJ~VZGMX#5w_)rrSIU zAy&N*)Q@y%_eZ)L32F6h+q86(?Qa%}+K8KblawNbC~Je^fI)b=>ByXX%eU!@c2GoBw(v?<+i>FGNNPWrrtVGo-dZ%N-mI1;Niw}OM*q+{ z$Ptx;;n;`klU97dae%OkV17R5pZ))8-&$c`9W8C^==o2>hGCMsv51<0*YCI9Zwo@+ zmCFet27*)+U>Kc|n0WS64EuCFx|2tefm`4mU6MAc;ZHiw6Y zwE&S3gysN3#mQ;}D;6nFq$BEyH5gO1fJKp{?-<532#hS4Sy&zt9o~SGD1lK6G*YP+ zz#$#b@RfCS8DwJLGKR6Uu(GxRIfJ3^b}**`EA6r%%@(h^V4f{>e{&3QbWIJ7Zw5`8@d2ZN%n#D;28V_K1IrX}D4%w@111-kt3Eha z0}oF@K(Gc$Q`A8>r{#VLa2X~**mT{V`UdPjfk`zGBJ2U1ZQ#~%7s1j<2lNJjVEv}b zENpbsYYpfr{O)hT8)XhyPn`|Lbh3aB2E28B0g|YY@v5e^)i5^ixuB<+g~h{>n-hHN z=c%)E>}2VWYdQ4D9;qmgk~Zev!G#D36Rnn+(Ej2qKrF(6>`uYD1*6_TSiJqr1&JSm zb$}B7^TO@P$+gpg72k;(^ymI0W1E4+SG!mYT%z0;SHzD|>~UAg!Z=w}^a=`)F*&h@ zVm_)oyG@?0J6D;0SeQ?Qzp%<|H$8XlkP6^1V)@{X*Lvl7KKpd8bzbvO!93ofZ`<&pB&9>8yCtQ&ySqiYyE`P5M!KZCyFp6n?hvILr0ZS${@*k6IfF1doU_l~ zYpuJkK%se0Lw6{UL-HDHyR~wphUz0lNjEo1($uZ{sL@~vW`xx#bmAt>)V(?fWOHUZ9#Kz~N8O=6mz zWdMktr&34*qy{4s(>eemfdiP=WfvXg70AGt-#h>_Bfu-W2j5yNmuF^N0OLeRw|XRz zR+VdK(qe}BD^O?$E&6(JT_NZqLjYm~ChcG@33_WUp}p0P?%N&sWq^Ka*J5b=_~Y~5 zbdf#C5nfyrw?}0Qfa9_{fXVNzb=(7YwMj6G1QqgEM<7x>T@(XUsKIRm%aUc_MaRl@ zUOzcAlQ2pGBp6(FD_jsMsswV0uS)8!0O7ltfh8hF|MU5%VA9PLK^_Il$mk7?=*q3Q zH=DnDcs8rJ)J$WWlvC^o1O__#LoyqpwBWI%{C{=l&Fqv$ajhW+vSW`V6is)AI|o4c zuqr=lO{U9Hh&#Rg!=b8@NJ1u?HsHZ#{?+kS06KICx<{P5Wzsb zq^3Sw%K!l>aqh1amO@J%;All54en347#Sp|v~>OC`QNl+7#eYw994RHI(ZzzPW$)o zJ&ptztaY9jX5Yml313{kl7X%jKsSR^;_~vcp`ig_I6xAcIEY?wju*!@gu54vx<)W{N zO&KQz5#k~!OsoWRKk!Cid`8_6nLQBjjcc?(OIBF82aM#+m)%4~e}LSBHq9myIF6~rh5Tu) zjCFaYp8r8w2|dQ*Frtr&J>l++$M5HsGpE=p^`?N^0X4iKjST}%&hIcL?LWA|8GWb< z@Z|G9T=i@>QFmoJUDxqGVN;vU4C~}ab0N<`VTJ2)gS;g`M~oCT{u*b&t{gmYbFP$7 z#A5ZzkUf%1+($wffzCLHEC&IB6>W54bwYmp#}V(G%h=)%FaMd2*OA{-bvJwjF9Yve znm+3nm7Y}G%`RC(Y0w5b!OsO&?7Q|qKdy+EQRz&%3KexlR23^(K(Z1wWCE1_ zrk%rA*Y&M>v1q7$T!9h^N{D=K-o4QLC(wg6=c~GVlQ5ZAWcHfX=-XRs9Fbm z^Tc!9o5Ow$C8wfY)Jr%R#!$~`ibbzk}Q{d49gX`A+uJt{znz8r!gP*zm?Z~YS3%POGc1KSc`+D8q zA6tCw9xQ*Cu`b~Cq!9bEVnF3eA#MEiB}c3Pxg3~XTUP+6?{sR?MaPM+J@^RV69Lia z2A!i|;)e}T?yhlLybuOXPaUWS^?-TE#Do$;U^qBXU4U7g+dxn@puQ2`bi+sHPUti( zAH8IOAU?{_?yP`Q541zd8-Q3Q8RlS3!&S*z`QyZ(&$XK6XPO%!Z+}?Ty`SV1nbL@C zJ^AQxX{mpYgn!2s1q)vO;;L1nC{@9ki}UEMR}NSyxW*7D0x7s^bY8KKW_tJy!N(P^4cvCNi4A@KKsj4i z@tzehJ$Cpeqfx4N2hSKhV=2>tJqw<=mmLBrO#v>Gfe-kwgkZh~WHY=0Y(*JcrUZby z30)?H*Y3e*+x9%}(3B~#^a41SHQ-%~r|$xQr0c*qIKx>Pn|ntrA1L(eRDeXGMeyk? zhL_y*D+?&Yj`sE}o0cJV07FV4eGKp;?D7|U_U!Lwtz3c9z9Rvicntu(zS!^<0T=;T z4nPPTpszr2$6I1pWXvKRPhZvU1Ry-{gf2?T*yQBoG9`h5P&*kcg$7l4;>u;S0F=WC zo(VAXMMXh@?5ar)IIh?=aoVjcwBQY-h=0M=2QOZxS@pCy?l&-!dG})Zu@120mT7j2 zwcrkfOBG={1O5r{4IBsDQc&0cd_k;fDodeisV6ve+#==N4;Rjon%c#!`B_<=?vI-6 z0q&kH)aQ3^=%`s;TU#4VBzi9xs|0XG^Xm43i(zMHhuxr+rQr32d&5?Mje^LKk6zD3 z&PchI=V4{n`p@MLaE?(n7o&@xV4j)_NXtyGo_GKK&Cvurj!LBW@}=_TglBj9{o4r`pbqd((1KmSn=7SWyS!t}|h&_k@JZAynn3ZZql6=muC0aaux{Elsf zqTglPv+w2WrpKPL7@x(DUyQLtSEd`)^T7UodFcQ2=H3q60N>&V(2Q#Z+|CzpyrQDw zIxzJCw;{&a&cq}nRkvae)Fe!;ySac$b_VD)Pn&pMT2=Go+&dY-5s`KLqRQ{GOc~p< z0?MKSjr(sbVCT3y34KDrsL*Sfdc(%d?6B}LEEBNP0ZRCjcz<^kt*szuGt z^KXWCQ^^{Ydbf%ELz#$KZ3l#JL`Z0Q+rR&g@!RdtE>oWq;d6HVTyk94u1TsD$%hdj zRhvxE3VrlyFikizd+8}Z=GEgsBuJh1xXG}!f8Z<3j$VKD+djanSkf{tph->O{bakl zx0mklR(SX0Y#haRRl>I>XuKsjv*zY_Q#S0mSS4eBkF&AT|*z$pNXZFMRdfo#uM;a&qEKWo+W zzDX$D13J_o?fe`T_DuyG&Z45C{QUgdNiSWX9dJgNe|?Ei08jqn)62>_We3h-w+0>g z_#gsbRabdP$O^U_(_b#Oo?u7de)o~ zk`k$3OwLF_4xkHvLgK+yEu1q4A<8Ad%xlhyQ;^gKJ zzt5@!9C{MGi@~GKU?CU}Q+w&b_ozA~B-^|$4 zM`sA)i4J7|MtEIy1)m~H?2_7-hPle6Q-TSk2(4uoLEkA zfMt-ehzhPKpKdHHCKRnKBnJxV?Sv&-Y4=Zq^&0(wIIyZ0rJ=(iT(R)&T7i-0BdHW{ zq)opgS)LA)-y3#)%!{q;Tk3c{lwB~72_W5G5eaBQ*gj&Euj88y^>|R79141G>jG6 zBmMPA6JKg~As$eMCSn^8^vz&Ey>b&qgR zH9=6=8wAk7$YmlJ)bAQYA${r&?0e`4`4jy250n8Pq3|+3VN?sFYlRh~s>6|CeDd7> zVGA1@Mn)qdoY~!W;b^Xa9ws7dHG~ak3`0qF$joNQlx9gtkx5+x$4%m!t62Kqf{CIU z)JcBYu@$puh%ED~w{psTY_5_C=)XrHx`^C?;`&=mc~8iG@>AEWB9izeNiA=hvm zw;!urX+IpooTEhI5svpwYYIup+q{77e@PW@ga;-ZtP~_d3Kup@jfhUH_O4Uk{BcL$ zy~+T2PIxH6Pl{l1?}#Sr?{7zn4PjIYp`08)#%Nd`;lIbZ*FkUV5*iTO{Fmb=OL=l3sl(>*O3mb5jvcJgFQQW7Zks|xQK4O_=Izh`ODNpx$7Fqac< z!7At#5V4?_-#nFAI>;{kV3yyKDusA2o7svb_TjLsikMX3SxWlpG}^{zrWZBYj!6y{ zIQ-?Si^~_0QQ!IckMG3O?Kr4BzHkC6!kp4T89^ivqkcgs%Xc-V!rgr(oxhq=i+;{5 z!YMLyuEFYL<#!7^dyu&7vZJ24eiU7g^S4j?E{`))W{O4ui=J@G0b2BQNMFrJ1>z!5 z@_5s&=WUm3Kh;kD+8=EsFG!q%?VmA}#T`44op$Nky1+#lSo;iA_X7~HV1BWAY8;jC zN`Ha|{$D`}8=|M^{m}T>wC^XX>9Xwh=coWW(X7s3AD(m*rwD@091TO{Y*4#Gy`~J4LA{Y z)zyK3w9^d+uHS*`*U`S4C*ua8-1)D|Xz0r$>EDmYmHu9wiw!Y$C%BDG8i)GJ^TYCQ z&sQgR*jsDI{7-`S`a&LsK}uR?l~F?8G%<&=I{kf(fnuGH$g;Eh#xDImPmE3@;fPP6jvK$p0qf?cu5K&g0o0HK9-UiV+6wyuv$))P;g+Z1&rfz4PabrlZNrBtA-l6RUfv z`ymt^D_&FF_X5W;&-vW`+V4_6FtbD(K6pHS(44q>?b(sVj6ie#)%XX$K1AZe<`?cB zR2Z@6-Etlt!Nf1uk(yc=B;r=}=>~e9dELgf(^aOJb+7)MA0n9$3xy~=L}i;=bem)E zO&ykIn^umK?)k3HMq|NzORBgCdI}>blHN7sV6BKSF5~dJiH6UY8Z&UuzSI5&PNvG( zuKKoSa}=~?Qd;JoMEE%E3GJ)u5z{i_@6xXg8&6lw+a33$W<6+h`)UH4*0@c{jIU;H zyE{}jc9%vABgkw^=bX`%(=<-b*B^f~`9BRCMkzK`RKgll!@`alv*Y*wiE^WyiZFgn z+;$fk>OAIguorg1J5-VB$SAQUR10Bq%tvNMCsW~@o?C(b-@@yf)>v=3oI4%FGvR zo^_4zLER%L3%K1nQz!^2(1}t?|3)ev86J7ggb?!G#|_nmc6=-0xdY(Lsy>hZ_qJIT zZarwncF~hay1v-R1=Gbohiu8Kj`KGujUvCiXfvp>uvpA~y%9C%ZHU92_=&Ea;}$c3 zo3=_a3_X%0cpObmJQXZD$!hmGkQg8vJ5WTe-N8563vDsq8l?TYvdXvB3sO!9Hg`s8}RoYwE z%boJf?$7}AJB#RHHv2D;*bU)UpAQ#cR@IBPN55=FOB$&wr0{SZpT5N;FAp1UQV z+?_||iQwLr$m0mOy?6U@loi#gNa(d2Nc^Ovt3Jiky>fHavRwHvo;AS{#Zn+W=wOVT zF0L;y&n&IZ1Gemu7d{t&Y+W9n^nU_w&EibQmp;yCt znBLi6gV{f(b53eKsr1}?gne>f*z~*l2Qyc^x;Np1sH-p+at@>-M~{d>s{y@9n6oPD zu8GOl{D>zxA9=aLEsvPw75z>-y7vkBC+q}wxFG6WtwOMcwq6%y`P|VbF;w<)Tzt2Z zvwIgMt(J=(#Vk@Z$wi|5UOGN{=adDf+qFr~_v}HJ(`J3qO_E6rr7T2*+*C3Iq3_em#b3!^QhWC~YqUr9 zNrTFfskqMFR<`TocQOS&qEl6t}{WiiX(-TpqG<;e6R;t+kj5Hw zpnD*nVA9cL{4n?B|EAe~I3egYdqIygs~RE_`o=a>(f+<~ecN4Mlq&qnH}z(ru|(a{ zt>Pq&7}e+L_lbA*u0iYF+`Zo=z5DREuWNhK?Xd{7UMw_4`9?5Ktg+eL7*Et%EG0ht zwoAtuS~Agnq+115Nd4-H=RuXk_`TP4tM|vDnzBHZ9XOHAcHA&@sgMX*L`+NB;eUs_ zS)Aq`&CjFuU(k4`;v4hnm3Xki;l8N;Vp+=JZJO&kU(GIWHJ>)ok9R3?pC$QD)Ifp> zJCdJ>8#hM0I5xurh20S3OrenB+Y|r2DZ}Ar7*&_2NJ5%plt77H#1`s@DD(zYrFs5` z58pa31Lxobo9~j;%kaewx5Zt=lrs{XF!@NkVWOoFq?ify)Q^-W~r=QBm7PG|cn<0gl*QR1~_ zYnpFabeTkPWSbeE%nH=Z@onCUCCMm~UAp^UyL)@Te(4jaIS9eJZ*b zcKy_ffvKM^`b%`L*HdHZu3N@+>G@ay&X|<4PlOH~xdwgQ?|TTguux{^JPJ)J_ue8? zA};j=+LHxGH#t&>5XDDDI8>eQ0hV0v)e0=+h2)KFKGV)a~ zY3C9swv)WwGcQY+*4n+C$@R607Y=6P&Y{W^aq&VKoU{*@O0413DdNn3IVXEM@kBZI znq)1~5Dg#kr@Py>|Jgr&@*Lj2Y{-aF--rcW2X;c=989<^-4DsUnw+IL(msMx+o+5@ zc3a!a$9m6IhC@wuX_5)N@wV!=iImg^?80$)!a@w6p-bdc?3-tg<--Y9KX9ylDC#nw z6p?>A&ero86vsQ}ZS@qGm0lWvC1jOXsG)X((s--#8M?hXrjJTRO>M@Co43>YD2Z@b zflDDkXhi83mXr{t+6=DiK^i0R{d`r`T9)E=+}=CO`{r-(tP+DpxeL2PyX~_|uoY5l zvgjD-Ll|(;n^uba&X*GR``P-7<>q08wf4z#Luui!)e3;~)x{0(oX&-5SL?I=^JUO| zV>G7-3Z9$^>}ESwkn#3dEFm;9OW%T5zG@yOX{PtlBDPIN^nlK-{I%V~R_UZ_^jb`+ zP~YiS=H-7nfv^D(_B!jXmll!t`chky@BN|m)9+wGttPLXHG^T%$y>wQt-ft;1=*y>U{5RJvB%PMeKuuWsYNA>j{R&){4VL zJAsZcwg@7tKCdAOrHD3`9H_e*$?93 znRP)m&dg#_C%w|VkiEUT;dATrb(AmXs-Tvfrp3M1bG_pTT*PM?Iip%3rMN!}bbn|V zY3y8@B(0ZU-|o9$*e|Z|nkzK#ZQr?fHu!k8)2}D052W}IKtzNcD3aiG_H zp%4w_ZBNZ;`YK)TF*w2X8JWo9sz9qg)j%JTK|1u1K1+-vab63wU!rpSEy2vX%_4Yf zT;_d_hxts-9n0=}XYj`!PsmB|c_|Axt`+RAOS-B>$H^W{Xp8iqSv2+4{%Kh~`+iw= z{@D&L4sEH$)@yb&eeC(y2YbKyFV|N(|8*u&RR0&KA_z=7#cM2`1k(EbFJd%TJKcD8 zJX3`yIEp3yeoBq_2qE!6M*JLSF%?NB9-AYwT~`u$#UD4{FBW?7&Xg)U^3X+nN!i*w zx%R^txn_|~>91R%UpTb`oZQ*DHgp=&aprNkI-2gkeeIl8WM~3>%sHEpCdt>oD41KK zLy4i=ZW2HK-G@QbtFaLFSIS3JZ0CtjA(^UF9Bin}d@WXTJA0>3%zHoO%A2l6Sc|Fd zg7yB(Hz6hLBo{T3vAEnRy!aI%C^bl zjD4UQ{B5h~{^5eLEK{?Quua42gZV0gDhoSYgR}1S{)Z@8rjneuE@kqgPeJ;{v--9= zo3GSaD?LhUxqBswxY}9IbC$FYWZ6*qVJ@sZ~t;>-8EmevzY02z;xFu;4g{aB_?v7!RVw9arzB|myWEUFZ~WX{7T@vTO2%qS zlWaH1rvClwzxB1EboF25rtp1BS@?%}?msYHIpzf%nou=4dwnXJUu=SllpWBxE?(U6 z9%v*gMm0SxTvLa&8?_XZ6?Ze6aH7L_HD6;uo9pNt0!wHG_GRS)LiHsExIl!GjTq!NLnorMp z1)oQUr`Ri;uk&XKsG&+XsAP@e*jbmhIjl9wKLydho7O2=Na689J`eRs7%=$33om>a zr=^lLA`_E;yh%_N>&{Eqe;yv?!w1!s*&qG+Fi` zO8yNDn}N-@)T^3DK6vHTc3!?}$6<9l_PcZGY>?>jZe6flP@I}eEr2QKO-o1$LO)wGWW!8FIOA01>Z{gxdJeM1E z`9W??_*_els^4k>b*Eyp8Z>+T;eFTnlDc)%t8wHVrbTX^9td(V8wGVGf3@xXRN$}Z zpB2f5&KHrIgz_Rhv9M0P=%z;OWUNbJK_vQ!4I)(htD%fkJ~1;%sAIYdT{ed7MXR=) zaxr#g{TgkpQuMHAhhk9CSedL&_N*EKq|_P0BA6#>jS^l;2gkZx_In@U^XPk$D)9YW z`D3DQeRwp=i;sL|ZTw038B%vpAhL7)c7thcxC+tanRx-a^QPH%z5f4D#>SD~W~W8B zsZzaymY(RLQG=KO$2W#|+-F<#{wEXt!xBa?`?lpRf?c~)*{e?TIvSZDcQbJddShkc z&BQB%sxUGO+M@NFdkelDTf*C)u70VSYUq)Oo3~|_W)RA1p!|TqF_p-P?=ada zbI@Mhah_1YZjWap4h7 zfABfVuCkFc9lp+X?vqUxjQjTAn$xZ-|KH)Y7Nzr{-pl&&Pxj*#A#V%e_0C)xm!sz} zIvXrDk7zBgFiJvr_-8b6@1FJ+$;Y+L>8k_54NqipI9Q6mKbunDZvKW5g9KRyzA=i0 z-b05NcY9Pr)`qzxmj}`T4)%5tJP6PG-5a%S3REeY-LZOtvSpBIxgf}t4 z{VUZq6>YQQ0{m=&+s0&6lxw~om@U1Ykn&X#z5AIazo$BS|3=D1_l#G;&JvE4^(i~1 z6(z@3WEB;0>;MTJ;NI~R3$s0kqJz=&yD(=*XHa8=pLZWm-|ZgF?{3(Qr>cKe2n@7E z5G)Jgjjiewh`5@0eXfZGR={yzRe6^xD{6*BEHXRjIHr4_mx&XWt39dm6TpFq-@TJj z@X|aXt+M%d$uTVx!%n}LxLoV_<8mHE+WfgS91-Tu%Au`Rjg+N{8<#G(*Xu}#P1?Lg zQF~eYsk}aS_^qfOHSYvS<_$=(NPZTkHLbS)`8C|Q>gbF%E8PH}X6a9ENi}_B0u3oX z<>M;3L-qNQy@BJ;Y;f?jt6ACdcR+viDkLI@ytc9+hg*z*%(QJm6ViTUdO59Vz&*9D z$xrb$Jc|45D@_a)syuLbQZF2Q`x!Pd1j|e1^Y!I%m<4bHwta_e9ukwAz@7XY>%=yf48cohLjB=&o`%e0*t`phy zhpPL+JC}K-AL)8=#b!lG((H)I&Ro<27ptvwleU5;c@5XTu!4AZ(Av1=?jv!&7r$Eh;2qd$C*=oR2*>9 zD-K5|yV4(6w9y$~+`%j3DBhBd)|pDj>!FFa$#99?5dQU@EdiEc;ZLXTSQ8$0#NN|Z z;i2=+IuDK)BSn&zTuV}~EFX?LV&Q`QiT%U=5p~yTIh%scQo7j42urg>sj;%U97c<0 zDc9eCg8tp@We(1wO}J1B@o+k!L#kN$1{Lv#pfid3(+$z2)w|$!$GVR$p$m(xO2+E* zE~ZY4|0-`8)7T2^4|#KLx4XQavi-EGv}k9uC|Ey`h@gII_V;@hrgzsFJvbpqw>!>@ z^kK#Ss!mBvTyd46=S|4hbgx${u-}qr{{(e%tG51;NrT@x$Av%)ekvE;^@X@}V9 z$M(_eS=9Z9Mx=%VY2D%>7WDiyo*WAo$2==4x6Z#`8My^63y2$1>Q!JF*I@T9XDYa% zeVFVkx0*7Wot^|AZmr?l=p#t`N&53~*u+0u$1V9!Zk$OHyFBx2&VgxzZP}iZopxE_ zc18;$0dJ{XV;){bimL96|McA^@$;k1F+bNI_#itrST`XqG_PN!cC!l$RRc^sJ5`E2 z7Gs-%`68jD%5+q$eU3DA-%{|#4-9T|w>VdvMDw`ic}6F8y8CR%8b`!`BS#LpD|BqD zht*M0P0a*o#S1}gu*SY5fkm^PhMiZ$&v#*@d`qktW~>sFmieQq0zO|;bEW9}`>=^F zHICIe?U96WPuOnMiV9w4nO>ji)}6qiYEF%D5(wajY>~O^I2}#|GBxisz9sIOlD zHeIH~Z+dvp5EA!~SqHNnL>hE1Ni7Oe7rm?MoyI~pL=nbX+D_KUMo3}FJWhNNBHzZ6 z2*dW}r$|ajT&??e6|1PqJkC9(kbg-w(VknK#Ssb59rAw0H&=?3u3q=Mzq~?YKKl{# zbuMk{-;46$8dhA$dM)uN?@pr))ajS9#>pS?`W0$rt4O3KM~A=w)a!oP@G%_z9mD|e zHv(xCFs&J};D{eaetMtyo?6e@SwUIP+vCLL-Z0M6h%2L@ptL`jGP$G9%541}w=F<` zu%Mx$W;y+$RVMg?5FSngYls~hK4SBRy7m58e~Tyr-2SCDQ1~&8gRji{Q9+aE1ojV> zlwLviwZ!xVT#+sfxcCfEd&Qp0Q{EWK{Nq~a!LV!tpW41D%cM(DAMUH$wykUM+$w5Vjq=p9pa6k_${b5Tas0b_ zah>jd19kD|DmrcbL(aZ>QyDMjQ93QskZUi{Kkyir1z~Y~5EMiL&>^o9h2Tz4&vB?K_VYJk=^{H2Aec~M0T4TGm4-DKMGseY zuE7h@&#~_BID^>m%A;|KFXD^KsshgPPp!wvSO#Ew21e8Cpmz*!_(>@jdon_}|oL-*l0DrM}$gi@oEJg?7|9(!2 z_&+RwUAw1|QFp7))lUf$MGDwTl0UgmpXZ4_6ya-Ir(r;#q=cZMr4T&VsHEh$E^mCV zbh+N(e8S;fA{_AQ6^p@-@yY8Ir?7>)&Le&de@k>4-b2gvDp|hZr6ImIZHK$#d%Gm* zxlN-nt21I{-x>VHShb>mBxYZIV8P}jp<<|eJ8qvC!JKH6rjswO|lXO~ej z>z&Q@bl>s_I=-YZ-6cxNkavH*;FEu|FwK!`}6XO?KJspqM&%fBe(C+mAP zgjcB^S8pW9y}zJSCcM?CSSyczCyt}IlyJTQ!QK45P++$of=B4Ggc#OOK!%h_ zzN)PX(G8452}hTdDMRpB4D=Jw~u%IA!m6fcfGCiqDyh zhD=mxj1o%nn74?y&FkFt@#sG6OBA`Atb0w%;7RFXW=*DE*LLL?hami?o}{6qD&&{C zZqI4)+TMR!R^ZxV%#!R|;Q-&&$3?}sdQ9;+lg1*D_{?8xit>WPg zPc{*-%ZE4nOvHkm#V12Q*Ye@WC@xHbo5if)L2|fWT%_F^xv31LS#EK6xi)ckL4KM zVsdbKskcu(B#|LzF;^p=8P0(pb8i~k<_2f&1TXQO!ziZ2*2aj5xVBFphgvGbuWv}R zYx-H}UvYFg1a9d-*>ZhyM4@IT;}Ife&Hz&2uVU@$brP?H+&JN!1B5)9c?ImYoVMCA zjl~g&_IqmOAp+qng+muy3P@i>ud@xa1#DP970M-qnv7b6BNct)s9eP|^_-?w7f0Bl zPE^eO!t~VRy?&3ESq0#v?e2IN4nWa=*T->Q|JudX^=P%9&nwe3+jAR!jPC-*+ncdq zDq$o3WaXj3zvX-~gRMd=ruN<0o(Y*PM7KG2;RnYB$U=Cws{LDUw$b?ck^gIpvWdKv zOagfbdz$o#+hANi&&R2)`HDq>wc^XyV#+EexH5re+ZI^BF%#-(*uCST8b(>Y*zNtn zvWSl55rmKhju(I8;m#WpzepAHv5~@-D%xlF76Dy@lne9wn3-cxq*ertLt;+p-uSxL zgXHsVJK%%iaMWR162hy5K%j)*bSWnx79>u7bDOJFQ>ln*;5Z^|*-XPXblpVWgf#b3 z?EOe;DOLij8i0Y4on`J{pr`LK=y-&(kzF zSYu?>yk8H4Mug!M_LU9UjC_9J;a(VsDx$T>gj z@{m^=f>z1$ghn*u(AVwZ6+D+eC}=A(?|l_NmTgJu1Pyf)yTf=KNfH_v1+Qv;(mo=w z4eN1cV~Z{6AU;YVN!0!~lu0ihF5jxKsE6H%O%1C%*NOzn>e|tpVLam{!tmP(K3vHg zJ|B)>d2m#4D-fN~ya`n4i;S?icK17wg~f0j6YSLJrJ5?|8b2gr@|)uwfBvhv_Rqua zKY;GIL>HE0zVrKWXNV)g!8%W!(TOETI$CyYwXVDSbzWB~-#D-Pg>*}!%3e!!Z~zNF zeD4mKn3!j{PB&$;UQ8MKM-i@z-!1SRS81RgBtDLUYb^S6oM}d9D9d zmi3~Sp7k!`asEhj8CY+`vKRBdjrElE0KLB^r`v{DLPW@U9yzH(2{nyhHlS(a=DY3! z4$pFNDhALu~;Qz=`>K|00B;$y8xwh7=#5S%o2nN9|y4#y)NQc!oiOXYad`a-B_-MScD3_ zYmB8Bg&DDdnOeNlAT_2&uo>8oCw1y&AW6ODhU&q=32kcsJrfBnqPobj*~3p@XX!o& zjg3Ank0c<}8*JLxccA0-W5Dryv;JQt84+m6P!A&}3=6WC@~7%rnRb)6{j@o-Xi1~H zQe|Uq&Kv}>n3y8s!lN81<<&)6!p!#%PUvV(La-l&_#64|TXt5QyI5FQpf)~Kd5%Wn z+#erpy;LVz@N>sL?Aj&`xPqMrF<&D$2XT!DYM@BSMz_g3bZ;vXH)TG&cQQXB|0Z48 z>NyQi0sqkKZw+HE?_Lm_|H|t*>$hP5_H%510zL`8(g9Zvh8}Dg5=D0YJRy zXl`6xyPInQ$+PBkr=F59S78OK^4vi>_&|@CVul{zm+M#%-{JP_D zs9+i4j}eTtu$Mb>r^32KwWH~Am> zLc`QZ5zOB4jxp#!a>$MY;yiA74fxk?Kr6-8);yH;0~-0p#M=zk>EbiaU+;-+P`QuG z8nB;TuKfLX*zC`UWGqPzJK;QU*muJmg@(!8Rl<`~UvG;bKrll3iqgDFv1ys#j5t91 zF@K>z7>8{Z3pa~=$e(CSmO;XVFuyx}G(y(GWP$~fd`S!>qd9$EsDV);ACOMj+7e-$ zs-Z2R(0SB-g2o8Q5n;bW&PA|uq;ocSr}Y_9pW-}?etZpK6h>q}68P6fj0-D5;xUrc zJ~i?js^xit%!FpMYUxS|J6_v{l$2kWon>;C8~Pb4UA4Sg(&|kb!GwCj+~?n z`}s)gLLCy45^m>3+hh8YE91w2_+^Q@0xkzHWmUlB+K)LlpQPQ*Ky`Th^3VI zp#m!#96S^PI%gec22nquC+hvepup1Y=EA2KU^hnl0=9Esss5dJ(rFZ9E;}`NRY(%a zj*(^LEKOEAiwWt0-wD3%xvw6S9c3VyBUzAKX_n6)^P0cSesIzze*xEZ4__~s%)(PtA-8~ip8X!-dOs>MHUTH!bE4McsD+2S0FrHHUz z{u%MJzoTQ2@H!Vq76biFAR@h)&%D9*34M z%ZCvbDm)dy-;G+$W!?~*Sqwn}8p4Uq-wul|=BE6N756(RY~^;ogE;IUSn3~Fy$847 zJc-{dPUh^>FS4?4DOrbO@#S1;kGdHR$k)3fKWV1q5_e+$>6p#RrwiPf@Az+Ku*SXg zSaw>AJ{%&G>H+G#;7t#jUF%+n0;pF=_L|3^C?=oZ$i!RftKi2NpU;fCjeNtKLtY(z zJ#ctFfHCookkI$9^BV9ts`BWkzXB@gFZ75(qWkAX-V-kR(z=-EEa$Ni+hr{yOIdq5 zgrFR3-bR{;%-lD5V!<8g3G-qJ3m<-3BfdkZiZq5fjC_?B{u7OFII@*st#w(MGZa*xXM>9=2%*FLx(Dp%v-^yE7muAOI-cuePH9@I9RV zu-qD=lucRz3n@wv$eq0W7dN-l^8lo6Ml>lte@jS>;`EfwzE52idOl5}7t-rYh~G-X z;OC|lBdf4S9N}#fR#4qQ`jKAJnv?=#2ra|{fuXcTDXfOpN0wr)ud*4in?h3}S^nkm)zIij z5V_SX)m6-t9K;cl*ND|7j7A8D@62LZNT5QwU|z}0P}ZG^Mq}c4lQ&1MF6E257KK3+ zt!r%%=#L#m5WZ*$1^;XsH9?Rn(H1-ZsA68I9U=$4#Cd#aOW>PrdHxz3L$Oafg!HSK z?E;1k6a*sRj9@oIZ?7!@aQIzU`-O6mif*qe0-kis>tsOToHhs z^v_yvAVvbbjsUF^?1m+^kARO~XBe>K8J(~<)dmCw7{cxzF2+CAXl-+hTDb`_ys(|h z3?EBtxJ_OGrb~@+eauU_XZRucJiEnYy%X3b4!+hKCqHC<5m~Hng3}IeAmMk zQ~LgSKb~Qqam|2)8}}8yM{>R7#0WzHvQC|&ZUH#~A$mC+I}6sE5EC*C#M7#(cY>{G zV(l_Wcm~7G=2}+tG!0S~%tA;>LH%h9d84Rio#(e-oVV?kFdB4S<#ldH#It6R&<8BY zU8KYz90SX>v-+K$k|x^y&Q(nGqs~lxf0`3w_=l1L(Rp3peOvj~*u1tL;)W&MW^_Mx zL@CZ#^*9@`{@}GWSDGRTm24y08>{Au+DD>r1G1-j&{I|}2OrUk0h%f#P zNva$YtyBWZn~G3Y=+9Q>vntvZvP-c6#MM|72eS5R_WmgQn@n@2-0+F;92h!`q1h@^1z^1mo?m?bb}&mofC(ytFiN_1|g$GJ^!W!fPYz7sz5qRPa%`kbx23&B-4$S= za|XO8qx+44AtOG^=8bQC<2A6r15!P(4nT3?^2A^2iZI*zcoFchaPaVAi1?}Apd9CI z6%^zeFN4LYsfvXo0zzdm;PRhnxK~`tGIcbAWeAj*GyS|81GVetfIr31g`j<3RrgJ~$9?0b2l_zipONGTJw6XMg(Dv$`q!a}$H*A}hY2jN_Zk0P z0-U9$8Hw(*$-uzz_iV_yENM;7s`?96C^_%kQxjTzI!K z5AGLRpc(*}fMpsLtOJKh=t)TR)d=_8JBCyQ+pv`FAMV%_A08D@Y z1YqTl>I=d0MV$J@okv<)n&XI`p8llupmN0aMVaGHA{l7QUM<^?ue`iGFyV0EKpr~l z5NaQmQbhbVF+hHh$E2xA$ygxrOIAcDE$SmG66boNq1RaIAOV7LUIAPQUn%0k$3KXf zGni=%$stnARzpiFKb_a~?#|(}?HSf9Y4sL5^XU-BBM?aon3pV|7F4L@XE<~MDOr#) zoEy|Utv74D&D+naVl<+7jux<5hlNNsys}iLTnUz&lFFYGF&JD3n@oird)X>rd=4>u zv%M}?Q!MEZedd&9DSpqEC^tOMeG%Wo>u-d|R@a~s)NRU3`^eHO#*8S0nTR{u#t*-s zeu!D$SWb~pZDXm|e@I<+CZ$HTseTs1L1O$7CEo$kr10_EA$J2=4#q@hUd^jFLEV@- z`xG@0l0-M!&9cD1{k>GUn532=)Dck{=qNB4%o0MdkTQkv&>&nAAqz67#y8MLKPiT= zOgvR|F^z6dDFA~^#e$xWaA|)NZAoZ-zOkL{vy`) zn6Qi1pnrZH%=o#=p^h{rpJhpwq6eiKx~N`T^*Vvax5#YOn;E;jko1A_&Fp?_QYoQOg z1Y}gOy~-2?^{3SCXUHB20d4&L>DvE!>F|8Y2{4_2eo(t#gy78IpQ;M{t|m+G!3K}% zvVJ-QLP~33Sd~!v_;}(^9imlNn0nm|gpWZ$&~H0bZ1uWqT7;eNJpVOp2y`1}s(|?& zbpX&h+HC3?Ef^a*d%QTT7az5VYpg#iCmOZUHC;UC4GDknv@XROHJ}Wq!EVg^keZcm z_b~f4S>S2Zy`}FZwhf&H8)qB-1d@KtRx`!_kEXK>tE%g|@S#Jxq@_!`OG$x4C?%cJ zC0)`f-Hmh!(jC&>NSA~NNO#w_c)siPCk6<6?KS6^V~!i_g7YpptabSDt+*M1(uqW! z$Y=#bEwqDn5qvicwHhb#U zH=8Pq?(xZ|-iIHWWx5xdK#kTY zymgZ-YVc@qYHEHF3Ld01LA@T~0Lf=s(nWSj1x-I{dZ>munA*Tv!Jf7b0?CjUMODzU z7*r#ag2Gk2k5X;(qQIuHpv=ck%crymp%i|)$FYWqqd~FOoLH2p7bEXxCnD(Upf&X5 zb8v&@q)jFig?q&Bx#}x#T||Y6i(7Bh(-|@-OBRAhMl($@V2Cb8B@KlS$DtDqfsGlF zdWl1W8&_rN8v-B5jwP>?&Jl_>52xdg*j2}y=dDHD8D|wSJcQaw>*p!+NAepiFNNZE zryd*oJf>8Xd@N2E@kFu8PO0WN8WAVAO@-q)*)uw!plo-gbq~lBt=hM`DiLHtmXG%c zk;8z@5GO|UP2}mJ%4n)b?Z@}|S@4&^E#(Z7NK8_k*1Ir4y1tEky#T}RhWkp>c9cc- zyTZovNt6YK^gO8y;3V!s4C+fkGcyK%R4P)C{=gh$L^K{YhY}T9?ZQFUz&&vdI3K2? zUwF^_9skZ3jid-Z6*Hc3rM%HnQ`7o;MJ35g!6t%CLGI|}xXQjBk`dBW2;GBEJ1_Fl zyD7@@oO>|>C`3`DB50TE7p2Y3w;=DP^eIh{W!=v0{`v&0II4l9rF2W`Kz=}302J4M zAAhKUe6Txk&ohGD9gmHE)90kVbQ>=FKRu=e4InzrgiE((WqFkngh)MyrIFx0r&xo8 zr=@hmx>@kLu-yU~<_xT?hale!RGVN|T6fN-WV0yC`wO^dLA}rlGE#^Tb-)6)7K;Ob zd4lBiPoP%t?_2|^Y&xsRn*hxv_v0NZWqH$9TASA$Vn8Qodq~~Z{D`(jPrOuCdwe^4 zDA#tN_kA63KeBYGCFdUZZf8vvh78ziKvz5Yjl&|hi33C#)+~h%>!bRX z`%T-Q&iCZ<%1b|5a#H1p8HWhRP(s)IWhNGys*GT?7Ec`e$%V`nL$}?vtd{CS2MDJ+ zR!Z75WY}Aq>sVNk6BE@vCUPJQKO$K7E!gldT2`V2D%u@8+@0S;!x)Pw z!R*I1{OWKR6k<3rE}0212UQBNixaLf&8D{4Ba@ZJ|FnwSI+-IV%M@rxDb?i$t(Uc0 zR0;eM=BbmGhRN`j?DAs7IuQMGAidus!#;Y@-Pg`#&UQI0Ojfh6r&hAu6K0iH@>rt> zij%8Q6bG>cWMcc@HzxG?qNCyRCa5Eco@2%J$@2UA zlaU2MI#ps2>jN=1aV0F1{Qs5*o{Grq?kUP8vyP9Cx1aQ_e#m-E(RWx6YS(DH-|K4y zxhE;jv5j|!BGF_XtiB+O=dBlp+(|8 zofbN^CJ<<@=GuYe#Ix;oB!lHfM=la?(8%x|rV^lJYd5eKAKV>1ErG?{9orA7oSRWX zpU;vNBtbljjS6*s1UHZk!vV@P2?U6s6LYiIWpWrv71hO-R=jJw*;)oEKX4jAjtRl; zw=8=)O%~zf`JKQRuaX5*FwM7GdeQ*WB{2m5K_D+)vhGh8bCaknkP2L4e|RzyLu65w zHAG7DVOV;}Xmy#QIPXHnw;)e$Q*E6}8TB{4!gHb7Ydu9S_#gn`xs{WSddTpim2Lxd zAo$u8YDNLU@d5ywIfglcu)ryAJ_MB>pz45d82L1Ai%#EpKka^|pFby`sa)0ou#-0khVg#b(>Eim zsd_+wEY-jAJnodC`SY|0Bv8{9tY3S8zlj<#)9&+x7QZ!*8N;|I;2Vj}+%ra8V?WF* zxkwWSeT|2zxj!@g-T1U?O8T4ur6yVRF9b$pAUjNssGf9I+mh*{SUByf!D6jIR(VO( zpjwm@PUx<|ca-5C!GsgSBsz<1?pQoM&U&ushPs5A1|7cj1pk5w&RU zE7r8$1kyU?u#EBFG|qe|jUe8n-RM;8uwu0n?l!mS$_deqym{s6)Z3UxL=iV?S=e; zy9z;dfvt(_tOMitjBVj|zZZXE*FJPBdB0)6eVszl*?C~#+xT-esMRG9g5f-9VM7tYz$F)CySRpg1%gal7SfZzAu|wjEU% zjAi6_T~OD0I`l(Rk8w90_dm^GpgQ4X?PfdL(oE>ze59?p|a1CSF^lcpLjT;*uWVT%9m z+7)>_s#z#agtv?vEg9s9O&cs`PRkbL9371RYwcW5e-Fok5TFb&4e&>>xQ|noVeF57 zp=bofb^|8C?YF?)hQ6?3I}pA6I9-V{aS4hx(O0F zz0@MFF?S5&Ec&N2inqKxF^xol5R#-44DXK@(yfl#Ql`VG)U3z|=4-;Nuh z%Xqlif>&4q!9q%{KnUIYZ;dTtw~n6C`*HHf1_WK7CP0V>c@^6DKEAN!tv`AI zWF3hs(4Lx?Y`%Q->6`0H=WlJwEg}+GMgU4=%k8URMJ~+zY|edl+>NwyHvh7xzBWdI z-GecNl8}P(NAAfGG2UM>-l4S&l(=K?Jd#&kK2#fX5-9fg`jw8tz!NI3mM#(Uu}c~+ z7$5pjI0n!_esRTyvE1mdS| zfAiI)7CtzbHC-IO02yRoxTKjX-Ct(ueCE75>B!O$3W7{4<{Sc+N4L8aZ+AnK^`);k zV0*HBRyqqk1?Ibfod>B;v*Yff%_7QXY$*-Dzs{uL76t!wD-W*t%*;m)qwX=jmgVoM zFu|H1)yiNPQ;BHM<`K}8C@#JiaZ~VgBj{k}*z(dn+4bjN2uVn#8H7ZH5`SjQV8Y48 znh8?9=y-a}F-kARiP#?x?2b(!*fG+a1?fZ^B%``tb}B%Ur&hbmU)PI##sd>FG;G9WVp1Ej#xMD5I6xTf z2_FHoOwAEU^F8JR+QY*1$EEE+-1p`&ja%UPgxW-t3GMC_xuS4`0oUW{Fx&mXt(I|^ zW=*C7$)Bn1rfo~ID)7*+W?K%#o%XuL4c27WH45s?PU_PPI?Y+-0HVtcq5T% zDzAJz_nXeNG|6DzHdl9*u)j&@LLY?A%ah-ZA-Ih`UuaK)ZCnD?B_+GhvU~f}UHev% zswK!vBtk<4xQL3zKVfX`_nbC1%mhmyY6gVvK>luSz5y=?5T-%$*7KUYLKqITVrQqT z+dvE&XO`yOt2&HV2poM#1%cl6#k$u3JNCG=`p7G>3ch6^Rkq+H03Krc%u;{JqBX5C zbgo(T>0JO0m8C(zf^RMAmZn)oVvbzY118LLxV<#zPTHn^1)T-`n>pnnlM1gNR6?5e zbCE?aVNl8Ak&umKVm65(QG4PD`|7I?4wJR&3i&qQzV`*;N0}d}_2Y=mL=ZC=poLJT z&~&Q67vtthll#nz9RzOq`se+Yq{2o+BNB1GluQbR$G~b}#2Vsf|IGerrl*8u zAtI=dcafd6->_S;q-2rDJ;^$Tra2WGEkeLyK8AkPYcW!~dp%~ansU(j>)bDmi-Tqs zAw?}@T7)=qc$7&r806q1Wj%Un9MSn^uB!Tt_80?RU#UM%-Qxek51xp zWL|PZxn4BSpuaHFFOwyd(YGW(v=)`3STpM}?BMmC_U*S5UQ4eY-K$qcSRz$aszdjs zbgR~|lqvSNF0o_^)7tV`?SB+n`%y93XjCM*-e)i48PL;>A@s8tM#Z-1_FTYxXR?&4 zoVyoFpRwF{Ts_SCfi|`H!$X6K)m-8DZMEl(dK+(YC)r)ekMfm+AXo@%v-v&vd1vHw zu62F|Y`0vupQ*>3;0hX0Y%%}U?w%$5aIW$wh1hhw`=g=>G$xa3j0N`%zW`8hXCf!Z z^5%SsYiMW|j)Bo4-hEWS!E>7*dbFQ&W$e~8UmAV+Xz_BvS8*b$pKC#8F=@zv0%c&F zpyI$sW815;_CG12>ZzLE;V(!kodbE~&v9+yl%USC0ivCDi;E3!Cn;MSP)ym{8X0Tq zfCM3OF|7g!u~JE3ajBT-%@-$-T^|J+ECAIz-KRwd@Ge#>5s&09tl`Zl?yop8JA8a&(K( z`>3Or<#nvaF`=}Bs38q2dhiP?XWX$ddR?+=uiRxCs5)GXDoK^$MNz{k{NeNJEoR(} zXvsYer(}QCE7QWb`88g0Q|23U#B4b#zk=zeK?~%F5hSSJ2ok$$g!wIgY!Ek5PF9NU zp_Q)Ef{jBy@IjV2W)XPx_{{Tang{hP^%P>TBf;s( zv;|nOg-?F8hEbL+$e&fI^9RR2qQe|?;<&6Vz50B}{?jIhJE${>UCE7AMtm_%(s^^X z%il92WYA#1u(G57by7d9yM1*;ZHDi1wJwO!R2BT(e?R-nW~tjxv_C52)1LZ5rOpTK z&b3FZno8blhRzQTMlz7#H`3MY@PWscCAdLldfwYyEzZ&S+N2fVd!`nX#N=px z!y}X-+c(xCBLR2XNk)$7uQ9helbctVtK4y7-}5^}UY3fOxJhq(K}g~VZA3=kk$!_J z!+;)4#}M5q&xygTU&E+4QOL>-aoWz^JMpm2Gs1A(2ftRu&8YVw7mNb_3z_^zY?gjpPHu{X(=# zR@d_{d0Vm5)2>kr9AxgiOzuP`yS@lIFD?4__s%K~e+9N9KqNRljLw&xz&_p_(1@o} zD{c=|9l1SL^`?@MG5;XqJ9y9!ho?v!7{)XmYE%gMi;z1rSV;Wy{%^m404LBA1Rj^> zaRnSes45WX7k=x33M!4yR&Qg%g7Fa06`7|P_@3jFK7gFq-8nmc;m^yBAkwZiKVOnD zG3{4FCouR1bka#JN&pr^dd?+CMH_SYR!gjDN(vZG&ou8r<7%KS_=#pO6jgXhsvxa{ zVsu<8{2ZtOR%vn6W}|5{38bl%MDJD1GZ>FAU8DAzUx65RAnslDzN{NF2UpeosIB)D z2>D0z!A3tGch=NQ-D?K!vj_6=i$~Zy@-8$&SUJWzBt2GRJj3D|bEpQbn@#%dw7 z^14_eD9E31e5oZcj{ilt4iDv_B$_cJcKpQ>{-m(%3IB#SIDevC&Y#((;N=s5*{)KF z>IpL?=F|NRM#I8N7{FYRm6ee1vgBXMXdud7!%NqmE$X2)-%0Z$ z&{QJj>4uJIXS9g8i=DzNHw~wha8DCGp-Pbw$P_K>%@tPG(Gt=!oS))Q;Q&T7R4(xR~Ib*Rb4rH8ogO|j)tc*tl5;xO=Sj%KTpX{_plCM-5EuFGMXA6SRT6KuRKHn;(6-(aB#F zgVC~4P#K>hKFoA~$dLzSYpKeZ3y2jw#}M^C4+ zqRuyEro;E$7cGjwXo^e}3QY%j_O`Zt#Gt3Lau_K_s zVOs@4_RP!-ddYKA9f;y*vI!YMctMcgrBYb;FQWI8JV-6#PO@cH#1-4SHWmrPfHb7X&qC^YF?OBBQDYT;|M)KBKBQ!~m0 zKmsjCZ7=oQa{LaC_km0|p6La0=k;9{Ohz#{LD0MQ=ca;AI7;jcut@>3j;*J*+L!M} zlA~pqdMwwzO2?(ev4sU|I6WN1K88 z+Jy+iP*)%8Cn^(DCeQ#jTG`$CKhJrXAi+#Uv0<7xJo86J|3{l}F$pw4YXHPY=+1;pZXWbU)kF=R3V2wbw{?&njnRMON?k%9FjUVL=Kol;YHw zI333n)^Qk~@e&GxkXUzMi8HKnEL5i~7%gGD89kmxCzTPzM(It~CBBQ=_>#Hpu9lfV z&MekX{yS@ngqIkHrofM4>qw-5_XcC>?<*cUb`oClJ;|>)d)L%RZ@6qxkY3akk&Q>f zce`}(!rIS;29%iBl}Qo&CGQ8Uu$g{r!>K2UpihmiV&!F1+3>Q_Y7bg#o8EjsF6c!@$4 z2)-PQr*A)>lvB`rEet|dD_SoXfqdo#u1r3ek!piZ{}GD{K|4pMkA5dV}ejbb0 z{LpIm_EOpTGRD6VVcIgc4FtMdFa_G;x0pm(wd+RzZ>R;zJgS3V?>k4(U1&&Le#j{) zDVYj50Hq&@p&49eQCUHZETG96qL7x#`vo$Iw?WHq!z>7Au%)i=3lLn2LGc{1SvTF=9FD4A;L#U(u<( zJRD^ExNu;2`i718oMAkL=8vBp#o-K|mY+m(vWo>mXdun#1bKvnF_+g-eVh0GJE}=o z2%6&r!HIDxJ2Ugkc7CI44>>jU84dqz=1IZz*vtG)b28+>l4SzSH+P+L+B0?Aq|;R& zQ*ia#Y#2>waiP^+q5SDF8m$7CJ7&X~_uHI&I#_&AsL-juBGd*~js*@x-JiUD!1%W) zsJi(lKj)3Ur@ITos>^%>h(}eXry#X#E#|x#`e}lI5F|o0nP>Auc%za9%a=ZM!O=yw z8~O9fj=a0^p@@i{8I|rvmrK482~rrs?+ax_8O^>&-vPQE z6|eUouo!gUFzm*l>6W;fTBZYB4e;W(-{QTP((_SY_PWw#WI6SJ;jN^jW3B08ZyydM zx7qgM#x?)5hwI;~l*)$(Us*{pDzw!vfiMRs8U!8SESQaZ7v$!4Rg{&NgB#`ipkei3 zwrpvooJTsUv}tcH^;18Rf9fIj>8ZDjODv1~;T&lY-g*dGcwy5Z((%F2>AN#E za_@a|#I*keYElvY%fFAdi?tRYQTrEA)YpgkfdAqf7>t6?rvjU*Tmr^Dpv@o?@%A*; zpHBbWT&!6ao@xs$cvVLIrsgU-&%c1sjHC~a9#H(6_p66_t<6tujFY%c4wGk(kBf^k zWnae>kkR0TqJa(S>QY3#zR_C=7LCg6g|E1fDh+FJw*#@=698)VM1DjNanaR~E~EgY zIQJOBa!M_uIfT^xZ){8A5Gg44$?mR>veaI91Hlu#o-n8uH)1rNI6ebqzwqbSUc6Y4 zey#bh=cBcc3{c(CXMfX26i3uNd=P?Z~J8%ifdP!+1~;#=nHQ z=7G2vDMtk~zmSWWlHHfvY#v)#M@=~{<;=gzYY}$bs4xx4-f_M+Ei&BuYa~7IYaq?& zJ3o_2KR}SHDKz1Qj++vKTsSKj7irII0*gwJQ_JsjZ5Yt@IG2_-Ki1{X9*fvSu99EL zt`(10J%N9E;o81jX}S}su4|_#{V@+)=AS6L&PFvV!j{iRj?JB>*KK<1^AGR*iZ+Da zRCLiT@zzh%(d7G2RnI^6HX6~|Fw~-A?&{cGeu&Nzur2O?oU7ojgW89@t0D+3F?Jft zb9~u)_lx#r#!`_hKSj2YF#;YaOjk>2wJhF3OHLjg7Q6js4s3v2@(Cmy> zx)J8yU})e-ya8qpnhE6L)mi%WBmYUyqIy1{It<68-rms?m)+>iUIL!SHHiZ47VB%d zr%QWV^OyjSXo0g4UB;cytrxryAo-GVZO2+?01S5$HB8Tjy@;5ik?BZ^{rH=s83zZutV zcCuJ)^8#N8W#9m(*o03?SXdY+4L3*mR!0i$xrrZu(t^Ri=8L%be0yF}O2dFeY7Gi0*FGp_`- zWz*8acZJO{XmQE-k*7p!)z4!IbT5e8%XlV%IpvQ?)J+Q+F; zu%CYCb@`JI<=W=dYN|ANS0bQjE>x=2S|=_Ot3Kg<5uDL}GpvH-@_DMb*2|j)xiVxI zJ-xxR&cP(HQh_2GLBiIIx-|QM4u^wEX1(!qEZN~w-exwOHj>fDOr_XB8@#58nsrHp1848QpN3H(B*xp+MuKFZXe5nM#? z>&@mwE1Y_?o;Nfm8Z{22NfdJQsNK1RboF0eD>Ux-k?YENxbA&34 z^*I5p-mhUMQciA&CLDf=)1WhwWZnH2i$pNFy#|tbzv;j1{O~bBzztU~TOsA@>ye~k zv_>Bs!e|zvC8CC<(m5Tx-L*RWQti?3{!L3la1hdlz)4zSdi$e`uEKJivtG)+@njE- z8o9Q0yeT2-PGniva?2Hr*B4iGWM1o@@{xB>x}IQWuzH0<>@dR750vpYkZLGq<545% z@zYDRnyQIjp0=(7oM+Qcp-6t&m#tK@$s`eAYArtKNlPOaT+eR@g_k3}O~uc}t8=_1fa z43R~S5m#_{0WD~aIkD#64u>uhaONoJgb2rH+zxB04$Q=aO&$&p-pOlU*o_@WN?= z+6`8W_juJQi*1tz;Mj*`Hh%n3`ix=VccvrUf|U&h&wRbdV6 z4{;%qZ-fY$0$l>_gK$3%v4m=@<}%2BS{FR)Y_5~{4kn}v%(s^Af-s=itn6qm&Z|{U z#2fZU#&5?&Igh>EjuKv0g}V_G2{dm(W_{(=RblHx!-mJp$yN@_iB%pU@XoXdd(y;0 zvh5{vIlPUY(N=}#2j`v7VLUpq+l-T4jk#&5RDP8~l`5MtluH=+Zi)S*ynp|MOKY*# z8Zy98RZ?R4J<8z?&X&my4L7u1t+XicGy{iZCdr2Uw(^UaQpOVYY8;Op3oiq$%PvIRFNkyA0eBew1o6Auqkut zk|W??JbE^jK2lu9X*v}pndU>%{Dh0TaFP;#DK&t>F!xL0H}#vFtE+KvmwGkV|G%o9 zRS8seFj3M3dk4UP{yd2lx9JVT2X{LQ7_qLl`v9sGz+BD(5+UfAZZo?ww`~B`0ZfNL z!{r5-D0|y9P&r@+e3pp76t!8bX|EYDywX7}`|Nsf4`c)<7PxK!afDdd?YPtWZN&e) z0E6^d->PSY|78aN*GD$9YYku*+UM97f!OZH)Ag?;rjg5l9|5>R#3@D3y<;R2z;wR! zm7$rOH{~LZBzXCfkHLv_JLfC8r7$q|da*YBjZ08__B2D|>39Z^xq;~(jI#1aM^Vr~bWmiX)SNG1?*Yx{k`|KPFjxrLtm zNdF;onIHAhd5-5MaLavV_3^TyZ&SfJ6PRpe1u~PlZyMp2&$+-U$zs7FD=T~e z8W(P$GEida3|{y{QH@?3^8AolWOU?)K{ki=9o@_vhGDq;$t{pfEMELCUVg+`%em}@ z8}Ha6niv`uikKJ#M}&mK{4f_E#nmibvGbB_RZ2n$QM3mO4K~Dp#_AJw2W&Ibgny^} zM3fi?KlTzmdPttZ8O-pc}U{Z2*e?8(FO?Z!?L_UL#6FH z7R6DX4*m*niOFNok;$&8g(ICJ5g2mL3{mb|2xmX#>0&P+r)E|p?(k|)XJ2n72@hM; z6j!CI-AEOsNWtq}ny(A|xTeaEg2)j_3FDiB!kit7?rURilz-r^^@UgZM_0%eB{_EJ zuvFy!(%T4{0S%e~x5esSN-x1o&QHhN`ktW0nw_P6x0$$!z)AR`=K**9={gMTuB>B| zz^dnozAzLG9uS3mJI{}gkc~%MfD!u)^OU4gh6@? zo^(@l_a&PKzK?argK>`!cRt%zdn>K|z_|_qS4V>diPOR2jZhZ1oU;3mcI`0%v+17D zy5u$R1UgPa>G{l$FP9Xges}^58WP&TF0<4AynVEK3U&YIlQUd;lZo@P!p(tK<^uzL zGK`!wi&1}(a(OIplweaXj*6C#g(zCVjqcmbc+N5bm2MiesxgEkV3yrS_`D&|ezTR1=miG-58&pXYp<|bZF>NX{UdN`PB*O2Xz6=h%tVVk zy4A=%{94=2FTU%Ax89_~-KUGheZ_AlYOmxss|f4g(HCQ4@inMgxX&N6{o=2yp;tE0 zqY1=iQtiH>r}JIyOb|(8$@la*8}cQO^kJ>R`Wh(IBZ2_@!ZMENQ%ObC)3otYUEBB$ zWh@B$+!t;$!*$6x$fa%yEw*o4Ys(7%N$%IIw6lZ444dwz^Qqu5fNZ?>ff-G@)aifGg6uAstuVp$*K+-zaIak`gwiXcUQgyduK_w; zB`mjOq%VCCghGCzA^HGCQg$ngiHnJtM=pCs7*1t^(S=tKQ;;J>*EQ~mTub`Kqr zp>bMoD!SSwEQ!edNcf(L91Y%u4Ac0g_@_} zSnLeIv?^r;QfEiIc*vvK2|yA{QN_0EFa314)wl261oI3WU!}Y;;O>*&UG2Dk zIKs5l-P?ZVrygwT08MijtP1WXUn5?F%Y2;jE1G!>g$KYGdI;zumbYFCKBa)lRRRno z-mdw=seA+rs4EXwtE;O0;7ov7s`c+?{EqKRX5(oW)(?xPr>D?(%ZlsOkAXv^0%--+ zxlnbJU*~+KaUvz^5~Dh@Y2Lcc_fr>07z3@s{|QL1L_C}qYM6fjuO+xM#=(XBj1jg1 zwq1a#Vq#>3@*E4Zd9XMP#83jmy=Cb;!KgFPqhIb!(aW(+R+q*Jdgj#tYcCp^V6E+H zTb@+p(Apo}tin$U-~z=LH8eB?LllxZ-b&n`z$;1IAOMPQmSJ&R+Z~d-8&DLS13a$9 za`hv%A}ZA_z;3N%+X1!5LWRpmo!dF=iZ049;M*D_PAHx2QJd=|(=;UH9}uGV0OWRp zpE^c)7hu=kTf_q0DvitR{RVblB+1;hdrAngNTwHn^c73yK9MOtjLHOB(+;>%z7u&E zM9w3V6NinXfcJyObIZ$LTi-Nb_#o{F$}E)6(#SBSI{oI^?jcbD8Evj)WDL>`o@-C+zw9d#9DU+1P|caulqa!{)XaQ|u_B`NO&7 z%-?l1e-1UwLeNko{0G%3tb~wbR^Jv!P%p&(MJX8MtYpSttg`v~4)4cus#&;4Mf?6K zSFls92{yGn7If|u>m2J+KJ&*5GQL$FN=z7qB!VFwa!-Pj7Yr$xzkZI4k94fkPJn7ht zOayeA6Tx_pqQYzJeR&o1MDX0Usg@*JOcgrss;1rUEY{Ujgu^^D+=#Edb_&$*BY|IU z_K4qkSETL7=9}Y=0Pnv^;6s-O9*Pf|z@PiReYDxxWRxz=Dr~*mDl!^?(&jP*6ZO0< z%0s|D|p#ARm6Cf|+c7Mg(n=S^ne*_3?>p5q;82G(nxVkGHu))*n@d{!9 zG6;azT^x)FKL0$qa{cYR4d(fFEAB=jkNheL<7Z*qgS4Q)Mvn}2cv-h?l7nTow9%OB^)rsa> zo{@j~+)3Zw**)mJ&-njt7t7P^h6@RJqRl5Ld^~_o<8#093Q%d1O!SAV$$^Nw-VNdt zzW`V9Q6u$Iio&xd4E#1TV6dY4jbhFj5G&6>K-=>$W#lKA%J$pS$)`;0D=szIe$dXu z(T*0}{oYp`zt>_4w&jY*Xg0CG4Y8|afssj(7*xd%IhvoUmAbpq&?2spMeirS$;+FJ zDKN$tD89|C5*dyKQn*Xc!WpuE)gd-_kZ+TK7M$Y=TR&|6@z1unvC@CA&R zHoqe4^GA`zpt5l1fy4BrrGOYkVEMlZhLKyMuIujoV`8bc-82NA7AlG5m=;`#3Qs85%9bG!6P;c<>G*Z^69SAv~J!w-QAt-}D- z`V2wQB7&WHVg9|d62D>IX-xN*h_@S+lZWmfWh#}F8r>!96F6RHz4NmX-VwMt$sZNV zv8l8PqO!*qpA5FTXD5MnP_7^4Q~4SO;B(**&*XX~{`<*Am?QiZS85i1-jT&2zZ^A9 z!F{IntfX{c43(mN0EaGH9t*3j#A3HrMwF!5D0kq~_$0YsVlq5MEw@w^t1*)=5=|9{&PZvf6KoVanCFYI_r2|4@q@6};gWva?V^X^ zudbNVGo)n30HyA}hRXy_XlV$vvNFP~JBY-Fs6=4>T&yU9P<0UldzL^BHie83mJq8L zQ<>oLL4+KvxtN~+Q?kfv#(d*60gP6B-uG-%E~Ib1jDVHp%bmqc&(mVi%;YEXhnn|A zqzJ?X(ME&=yz3us?~KWjC;n9B}0t3G53-&{PI z9gS@K_g^AkpxY^F z8(piqI#|oH7gVZvN(=Dt3$b1=Xed1nIYEbc^S}X>kN1#3Hc&dc+YFx@1p#Px%Dul~(||%+i3QtHWrXj#`Vd}E2F8b6wkwRS zM$kB?46-a5`Qj5K!Mpd{_uRv=ESuVRVm0CE{zC(3D_7e@5mU2S%>@aV5BsAw>6r0b zODLi|^a@yzB1P!zHkaT@?fN%VYXN zR5ba)#g`W^UVt~kG^lPTCnrHl%V7P%&Dl1<>9Xm!d4i7mAdp5pWcBD3m#Msk#SZu( z2z^=e1Xm*f-9CZ7K}cU-O>MT;VhTvjN{WgT_CE?Oz~nUpc)NkC09ZC_J#S8-%_~{o(x>78VxeWxT!#2?!o&KAk0%gYO=+6u*vzX zMVV{~dVc$$Y-Ce}N#QZ2DeAp+jvyVlrj2ZlQcb|#`r@m>&U^Hxerw8np-HcTeR;yV z-(KfUg7Tu;sU1rwqyxq&np%>2ZrJ8$rLor=ae^~}$wIss-uK4G2Qsw9f4*rdalPS5 zNlqPui28Ff#NcYJLlXSCxBn?KAi)!Vm0B}oU>W)MG+Lp1hc0})_TyEl2Apl01bXjJ zY(tqXxETl&X(IUxl`cjGXR1lgM4KCnMpwVfgQrH*<&d^6OBvT!e4m!h>|`(s_GsFf zl8BK1$@cs#{77c-E5=l->%4H5qcw{Ua=2SE0kaa9^4p;vpQ4kWbk-Oiqb7T-F^gS7 zMp}z5Kmr|#)<2kEr7Af!nJU$YF(?YDB)A|*`kl{(I+i^M4(nQR3Ayk68E=G#9NOoM3^D}X90$}+@Rl+5oZYMHpC6VBPU#Z zUGQ{aWZU{>|2JlPo8g&@7=H>jEqSlgSG%vfVRk5@v5Na4@0|mzV*BW#amZP}V(gwB z5UFP_@!>M{$>bqj_Y?wTBZL=3wcTRpB?fXt0%lP;(Wp4gk|vhWW*1F?BvqQYa; zhmK4CD93mJ{_HAiYxAB_y~$u)L0MRS|JlNul=3~1+h5VZFyM5vIkkBCbe5F``nfHw zneMOAgnWmvwKtKo1^VsY8En~SW#D6OyTwI-J?R8`?mAu^aj$lBn>OtvDNGD}ReTX?^3CS<%ZAaV>c``VrGA(4 zr7<6nLr1R$akF{&@V26W7JRBf=mZ&MaT!mJi6-#yS)@zZ5cYxfMrW`Pv9-AgfTg&; zdzT)dV+NId6VD?M#efra(sK1z>@%dc3M|R3%RUp8pYF$>_VXp)z3T-^6L^K(ULE-? zm?Za(HiyaO8XwI5Ts+z67daiECKGy|5`!+D5@#R;sBr?CVg~`3e;O@KO-)@+51s!; zXc^OKK~+><>hR-C{Brzx*7SHF3>>ufOG1L2&Rx|e_OcgQ`v{Iqws$W{BDtg!G5%fs zdhDBQ>(zVIG%%K)HlhFrXlf&BCs94J{T09Z@Nrh=Oyz1UlO&E9#12|d!FRLpLtgm165R(?^ZM;SQqS)Z2 zDqFz#FCu^NXJ(kYwSqduH&ObMIu{xkr#?z%8G5r0gV!m22`23CD4YyoWls2O{qoHp zRdE=kOAP-J8?Gb~iu0u?_Iz`^%Rg_o>f^(Zp0!y~*VFUPXKH?0TVK3I#42GJzO+oz zJm}fqkEnf1+p4Uh67Gno!~RK8tiHd(4<-C~OZwfypZ8Ow%ZtJ&&RIjqG;kM|6GV?k zABy(hvRiCiW_*oOOPcGwBxkO8(a&5gU=V?Yl{Pfun|#GQ!pcNC`F3~lpJYr!ej`n1V4f5Pf#=D1yh5h-QA?Um~T|6aNpEZ3tG{h>$eSzYd#Ywx~ z295LlZ2bX?Yyry3yImXqbL(@is^hmjOtE}DL)15`K-_$QV4AM9-kEyl7$p1}Gn5!iSD zzQlPgpEq1wXSw05yF z4b&^H5k31dR31sQcA-G6>px&O8zWVZibya1eIeR3R$Nx5V0?|6%*CWsN1WV{XOwR? zXR$XiNf5nl%}rX%yazgTd%nbk1Sc<2#*O82%_L{#?c4lsd2Oo5=}dx#%k$e;Y^3u= z$<;8ekx9~P$S9EvYY&*}vO@~c_pRSVe@G%?LAgiMo4r=6w)M5ohL`Ah-j(9%D?(93 zWueH15q0P>PWrf!WVQ*#l()aey04y2>Po&8bGJ)~La3lMyfciljIzb5VM%^1yaElpdK68J0S>d-q(*K!4hC`#?-Vr94rSf<>!cBH2YwNVKU2rU zK)#dLO@il@QO!&B3V(4&00-NQunFg>XX?+dNGdxlUQ&RS7y@NS6#2-)`3go9<~@;U zhx=?(x7`@BBHW{DhaWqPIeGC6oFp8di$@IlHKcU`wpE~2z@RdPB{dK6>KJ{W1DOz> zf(|RR3n!!}8&6p|X4e)&86QUv)ldR;>IYF6@+4`hEKSIs-;~t5B1m2!ghCcJLCM#@ zQ=p;ZYT28*3x_v|@s~L@GnQjE9`d_P@eU-Buc`(ZgfR9UWP_=SQ7l0y-FFq+uSRKk z+8axZpC7JoWY#zQhQGVtb)@`_=d~R(!I_&dP@D8x6!a<@fV+APnP{1X#6y@*)i#mY>djOJDz!r+7QZy7&~C#r_YV7>cJp(jnWw!)`= zG)wdp{X_aSoMwb9BU!(_6>)7Oo81T345~?Kk?X%5%k8DBuFGbDumpHws*a+xCPT01 zQM&l?L(=ccNQGWgk8Uf~F6XMyJfhHCr-UA!IR@w*qI1} z@(!k(GJjzmQJIku-_ICJ{tAaSkp=G1ObahnZyw&?SFt@18-XvU$nvLN@25(EIU>Rf z22o;P#3FZ&Y#c0>I%*UKYCE^IcVx2P(Tn`&Vg7lW!z*G1DuVbaKWHF>I4rLyPeCJ1 zzCdOou0|9rA3V)Z!H?g3an_r+8&@OHzKKD2WfS5Ff{bN}e%gGh!AtsJ6bI#hwjp%p zC_71fNq{$GBrPYjkrPkLQ((!tZ>D36RZFm6`BW`?ZmO&7KcvP|tx(eD@38DR^bszA zjs{9O;=rGyl_XXmw(J@PnTDq~K%In9qD!DrRl+h1z+syTu*qZ^Y}cbKOCC)g`pxtF z4t&e!?LB-iIhmWpFQ`2CVF@=iu3Gb2RR(%C?(S`R)z@@A;pjHVZw#!QV6)rww}q zc7mEg-qlj?)C1Utbpz&r%fsJGiA|Yqe?oudNjF&OfQJ>tdN93y{oz7JDk+KOfcA8wiYc|9CivkA1IkS!Q}K^X+&AA1BDs z)WpQuV}whZUV2w6&m~9_b$>99q4HcV)RHF#(I73%O<6)}!pdCI63^61nN<`&C*pMh zF^N&3Nx-jix1Zmz=qN*BL*PkExD>)u3#GTmThG;ObT<_Avv#4jXovyLAcF>~GH$@j?2%JRneVu5W8E6fru>&VXxv3H+ z9`p!iAsrPk@FNOvFXLnj9U~Rrcgy6F%Y1N*l|f_|#r1_`htLq$`7x~J`1*<&Vljw8 zBgFk)C#B{(B9S8~s{BfUkao;=!rL=bqDz|xcCWD$YN47i@h8Oy7LAzyk>%`5XCo-| zgM5dKUIr|&GgHbCYT*XGoW$%})(}TvLV?Li5QnBK^)_0Dp|D{O5079*UjFD9ti6fBy4D$U9IxAat(1Fj>89d?{BxNGtIQ@YItsrFg|Y0m%toZ6)NIEQ|i zj$L0TYilZh!c^P`KB?#1q7rTclbY+g)N9k$YpvPKn`vV??ms59-#)EYI(2g%t~h>3 zc6iqrcv$?G>1wW;f*jE!#cE+5)4nnmdiW%M@%~N$gKz6Tx7K={EQD#(v9hq59@`<_ zaUu;ZuTFfmEn)Gd;q1iIrg^(bY;UsNIpFbq`(6p^@1LA|>>dm4x#Y{w9CgGDepiX0 zM_(w+bhY&K{0nLaeQj-CFtap{#jz+%5(cL<7_Hme+k<&#KVX@@VFG6*9}sGSSxDZ* zt$u!ZZOrR_JGJ}CN(5xat~S1o_VykIvyffLvJ7#ii~)OK|Btuu-*vD@2XaV}GjXu% zZwjQ>TztK6m=$uWf2_r<{<5Xj5U;n=2QCC6B8F2_Bc0^w#_udUMY$5$g+&abmgmEI z%JQN#h`s)_Vq_~TD;a|#o2zYf5C5%wh-!rjEAdB)SEQw5V>9q=DJ>luqHwyqPUM^V z_kb@@xRe|!tokq{@npU+6kWztHeblaBR{O9=Sv%^TokUEGz%OP*;r&>d+U06y*t+T z&>%qxnEa*CMUXHWDJEWJq%iwoCB&ln!#S#NyJ@0OnxA;2!<|*^jHv467`4%dks^!B zQz~tdpcPB*Nm+*RlcQZ!N{kR1Nige@Mn<}yUVh#9u78tokBES zpbSm_PYb|EMXHDhwfXO-jS)aKBoEz;u6|G zR^}4W=h_l8XC&}lDAnMTCOsW5T70-!6!C-{5+d%lU>rJP5AF`>k57+z6-lSdp~q3Z zf;Xkx|NCpij`d*qOS}3Lh4$&<=3XuLYqTLpf+X`zPsfX*(rxap_g0(zZbXsDhW4oz zSK5q+qy8VQCL)Fy9P8iNnK;BrUVV#bUtS&;Ke`BHPtRNJJjJ%#Jh=`ftF?2eE>aI} z{0YV9b0dEWTg>8~ z#$m8=+1Z^yQ11yJ133dcsaNzLL)|qyj(~8zadB<+2~hMex42leJH85|+C1zPHoj%w z0MCiop6bRT6Jz!K8k(?LqJ#&p%E>R5s-#y+jPNaS#2B#Q$JuCs!IKil*;a&YhH<6W z%AZSBSLo6`I2fvG-v%;9Z0t}oW&UGaB-cA!(%o{MJaxZZ`qUd^` zCQ3{QB0)x{Er7it6<6S-77D#aK*9|{)mTHd9fz_G(pvka<`zQ3G|qg77Z)-}Kj1&A zWG#(pnzW3Zg%xsqQ7;d{Ofj`oM3?FfD~yT7kw-eLABZ#bk@mrCAg|1a5+g)|;oMP^ zk>&AtKZ~kc1C6zI?V7Gii~gXGn@_M^w+*2lNL(vifE8`agEH^Ii^{plhiZ31JRH&lX}ZY(`aQLoo6ATC5V zR1lO7#)8!nYB9FZtQqJOhGm5hVw6MWfo6bK2)3evp!|T9CaZZZf@zgpq5nOKTy<{V zHJ@3}>&)(dw`PHxTq(1Uhagf3yg2WE$6$k5r%ot&VPq_b!P%NFCZV0BHcWr{IXYR? zQ$tw=zy9^zzs06~Cx^%ih6DV6V>};>*k<=5u-sm*7s=9BtSvUGz!!RI{}n^Jq7jMu zr61^YTtij?{ULv*bWiZJc?(n!5fM=;z>tG+C^&;#!FmZaa6qN+xzXm~5-qYSjhN1D zrL3g%dI8KQ$u*V#zhInv1t9e;*k$N~4{*Ml_l4%?=d;_%xMV9C`2S}B`bBRAAyhcV6OX zdrmYnP}G#X$rbB#Pmm`6D%(~H$B6TNIpKF-yH2yUgiw4wdCi|HvZ1JL=&9WoHHJP6d0o9vaZXugoWN ziwENZU8jbOK+efV9F2OCoS5rqQ!5DTz@&sYTZHj()~IsYMG}gWW=_KhFplJe_V;J6 zUD;5ODOG}#V8p3-GPI)kY)cs>rP4@D6(N|==2{;R$8;%46L`us!WBb(c`?N6=5rL@pWVm{4`7 z1V`cotZ!81Zpg9dCWvTK?@-Mt=cJRNAaJ_lGc2>zL{NqZH6f;YT&n-cz@89M5i@R2 z*+ILb3D1byV%}2CP zU;pHf(7!eePjWY^b&BS*$dn&sk~L3{Hk^FPKCWepVk4<_{`YWf5ly*KL%Gr!B3j>F zoTvBrrAGEY-(iS>;cmV2rqAub z9aGW0oNekBXCHx0mz>Gdty)S~)5oR70!VfDuW$F~FSUs-)xzgckfb>h$M)@$X5Pn% zvz37VP7!>KTdz`>Vz+L8-h?Pz3x4fa1p~>{46Q0MeJ!o4*LogQFxg&@3pD{nwyUpq z*?8Ix{2S)KfLy6F91@g*gutWOm6Xz#Guve3>bR>GT7DGGS#19EpBd2eB+bS= zxH>qT#*m3j0+vTL#?3g-6fHC9{V+|ed*D~=gcxBD77@?i;lA-d$O?^hqzIIc^gXJl zxW(JM@uW53jSKvJZwr6GcQ_LDO^q@#7ZKjVJ?$vU9*Ng%A-KSk9cCUD3eOTaqN#ecWvNjdOA9E8dZ{z1RP&C7#r0ay3u4B3u&RC z0$N@>q%X6$#az-!BpcT8z_ z%ms)o4-?~U?_#)2sj3J1t~#}l1}w5sFHDcTx)8)H9#gc4X8YxXa zuiYu(J*9!0YD+MhF^q*I>V>l{&U+j>DT6NlQn?<+v|t6|iAHjy9kU-|3kgBo2tR@g zLyvRuLJCK#=#|Yrxne5kt(Rv(XwL3wlGIhh^Odpp)oqMB|NGywV&v<$uYm_UPudbr znQtI00%V2vDTnq;?hDFwEK#j3TYcSCURRCnC5m>!0ywnxUfhe1Z<;1r{EY}omRk-W zK^g>qyB@z)tT{0MN)LE;ATSDCgEOVL4Z=&dPd&ds3{o*|Asm_6dyFFA`n}ioapM!X zW!&WPJwzkG{nD`TXe3mB6_U+wA)9Y`TyOt5$(ZM-Q>_2p?p#H_`Qr`2qH#d~aLmSZ z!ikuT_l{Fe#!OS^ZO{59jN42CYq!<&xBox2(QV^U=mOJE@E9^^`cI!=uG(QO0Q9># z{(n~h?ddBH1JLm#B`1pkd|%9-wyy50he;!+(SAX&^!MDMBjE8?z{kaPKU~I6;&`i~;;_UG+$4`@?KLbguRP6D1DoLYOQ*+j56}N%ijXQQ zD%i$^E2QB{-lkt3WIB2N2gXF^OZ&sCtE(lsj;8BBia<4I?I7%?tgI|0CAA*D3buJ_ zS%P(ud!f?bVsymAFdY>)%BnBlDEexd4QaH=qaeHXYb0VxK1YgI7t-LWy}1qQ!RNo_ z`q9lLp1qvld|)1hbVu75|~o6lyXq(>oJq2rh3ZVyZZ`JEA}cV^+KDba26?x{=o>D0}F6o@8DBGu8x`N)FvA#MVmypN9HZS>+xb6&|qIM<(lIE)Rz>;Cbx zWdz1Z$Q8V#%URm_bsj{z;fI?8JPjmH9W&8nMM+><{trk*kYCw>0|NuE8);ygQX`b| z1VzxRQ;}tJB`hrLmHslgx;o1*TbCT8G?bp6?n1M=`t8I=L8UzMRm+Y;z;3i$9jj>v z#M0o|aO}64xF33SmCXsvSuL(V!G!)5xYiOG!wv-5Z+>oSfsOZRqd&S4kY>7s2)sHu zs)zVfl9OL8jm8Cl_ge53!!jbmjfO|TU}3x9 zQc5R}CKF}K^~^$rQY zfrCI{Ec6jq-6XJ*XawOvk8XP$OWFmA0|p~W3=&J>5mqtG1ydR#myLMQJFCRo)#DaNb1>E%~bvk%h`{UQp}lG zDGvsowFiQuy&zL)_~FV*ich`sBP>Fa3xf1c!@`B~>ocYX-D%t~)!C3?a+ZOi#Y3_c z5DulLWH>hb>@4UkuS3Pvru-G8umreg{V#=7JZMMCN-CPCTk(%B11TOqlMD%*qQz+LP+3Le0{6r8 z1;l0C4R8o=aaBX1fN{!|$Mtwwb+zN8HJG*T#=K^o%&{x*^YXg;ZUG1V3uyNvMf!lm z;|yp=>nubn44_}(E?=X*B2vwjsGa{^6-RjW-%0>}I@uc+Jslk}Ain|5LSH?2C2et6 zVB}B|x|C&<&&z@U^F6qfc;S6tDcnPGcIFX!^F+`o zDNXbnojN4&s{+I8f%tDEyqHL)iF+u4hEoK2gVIP-8hh zHWXlg#3i17XTcv9g-dBCW7B)I)sJ zgN{MU309O#!6~DC)ck_ZSG-YhsjL}GPIfZZC%lqk$I9%=2DE=cCY1|^fh2NrLs2xuY352-^;ni>Gz{2F248MAbvQW%$# znz{u7s-}R`4%AJlHn*Vg3B1bOdfRZ*zV++~YMRqQqZgNV&|-G(O`BHs=Oj|C%|`b1-T2S;%fyLA1Hvi_U#I zhv7INz}*ydI|y={3laD4okhz;7cdd`94>BTSqx}led%DaHWB%|U2QzJWn6G#z>4jA zSJynIJhwy^5z2ivNw#ABr-I|b&Bw(3=wt404F%nfj^jPH6*1+}>WM&A$6p3I*-uI+9uzC%%>1Sb$${49Aq8U8j-~a9lp9*3-73^So zgJOM?2fxakO!(YiZjHV9-uY>`I9f+W$h7ml-0@;n`jQG?u)$3X84J)oOHz3JOUs(o zEGz+*vwHQg$93GJB@)2%uJ z6ImbJ5H3Uyo2!&Txk&WE@<^ZSUmr)j16?-D^x{Ep44wqpa`$wq1;-{`SJnoFeR5jx zCSHbmD6F2DW7aXy&i!5->-S3c>$#oqnE6J{4n=LAxLG{Sm99%JKa`&STUYut={erw zU&*3S?H{P78S{I1F&3lqdcGWuf=sZp|1YDe+GsrZDz-zmzKE?rX%9>&PPw&)pwfBu zf&mYCgM+p*@U3hBr=T$t@(RB6@GGqpOt2DHPeMh3D&J*o55DI9`QPiK2AFkzS@y`b z?JNN7x#&y+bNqjs9qDybmKosX9>&P8X1t4?{=E-1%_iibnB7}bLaCa7veGgs z;qdYKW(lN&Yz)9Xk)#6!Vvd+Ehz*!_nZpRUfPrKbgB*oMAV#*9mmiG4V*vvK$L719 z+pb$Nfo~!rBEA5F4&pdWM0hA}jd%tDsI@i5JOLOCpGQDK7k%ri@7MaH?junj-)H)X z+sCR@3i}CddP-r76m=7{g2w_c&(W@9o6sy5ExGsBT+&d4+x2ubk<3a|;U}DZYK=iw z;&?dhgzFq6wRYb`w@rfDa+Z;UcYol0NIpp}lca*nH)rXv9qX0!8KfgfVWy;w$F2_C z7Qy+^%%FkMbXwhV+l7$1{Sw=L;JEqkh_Bo4VV4PqqTOP=KK8bwHQ=S)sq0VlQR?-P z#NE^z-R_$ytJ{~$$zbx%6Miy8_^a87sSzgJ7vHV(ITYo})MtMhk}0x7(?9d=frc%= zGXj3ERyeKK`M(7IyZ+`vfckJ6sB$Z_+V%~}R{F!Qzwu)2cad*K#RhE4^EK|39|`za zO)@mQ(QwZ5V#e#mE{|H{&G){40oF@%D17#fY<;sY+zOUHqAyv^M;l>u>7DchruE%^ zhqDypZO*^VPC=F*d@DU>6t6dLwgNG6YwyUnS_w9{QPehPhwE%FWPo}+ufJtpDma~Cabm6VGkE+AvZx%cWe#HE#WaZUWIP-PC>Jl?;f4_=VW4+`QXuQ_F z6-4o@s8{z{&PkrF9XYyF$=~xqh>2wJw}{X0&ZNq$^{hDS>(6UWpYYf-O>|R4j(%mv zlHadU+)V{!_;Svk4BYQ@ciUv~{Pyic9)0=adZ&g_C((`Nd#pwx;6x~9#oc<*QNXgq z9`La84eY=+e=40&_%Swl`ft^vyx|Z%oWrB&az9s-H@>ZsGHu5vj>eAFgGQ%H*dEWE z%w_-J^q;AdfKozPSWbMpX;!4*h#o{m!m6A2cF}-I&()r<`u-@33@X=mBFS z(C1G!gmkFY|2PqL0*+3tD2CQrThIkrzVqpjPaxp7j~(6BI5OESA<+tmgg5$P9PY;jWd z03$9l2M1v1dV%f!>-8g0O>#>&uP_Thg*qX;S&7hOj`#8u0eJ$iNWidZYU5X`#Y7sH z1^9klnpc~|Kxh>R_Gz>?1z%+CA>ozawl}yY9J2=>_X?2$1nO#p`O##SwVace``njT zdMAVGmZ=@$eD@|Y%jT-2WN#@wDK#Ipv#N`f+(1gW=it6#rcZbRgpiayGucb76s9^n z^VGfkY;*JB(w;qo+?}-BQ8}I(4>1fz)eHx*_!@_Pb2W}M4>t&x`Tal!V~1+xNKJY` zS^!p13bq?D8pf!~d4w7|S=khGyQdxH5P9a>`6&iSJRx&z(XiRn#vktT>q|>tik*AE zm~a_}hR|dgD*f>gZD_XujmynP-?5JE`y~nq>}Yk?_1K$kV}+6Nmn$22^O~1`_sL~(>3%Yz$zO*Z6mmDw^h9=kT?Zah3{L+np?nYh z?lKqQaijk{c+-7bt!{^DP=4GfEH)f}7X2>XtO-@Y&Wxf#W;2ASn1d$irk@4#dT}w} z=ZY(=Z&SPj%Mixz6C(A{_21gu;Q{rc^%l|kNqB4jMwU14TaHxc$!d`FSUAy4#lxX#SO##GR5|qwO+u6eVv& z7`k8L{22Wv4J-`_En>U>Wamyi8Cn?QmJ};uEAQ&ErCg%igk295)^~L2M0T7lUO9DK zMJcS(gPT@~rG!>NQ~9;ednL`fSAl8rK8PA{GyT%Pqms$r30Q+b0ajg6;YI@7?ype^ zW9;p$G&FneDy#9p2{7nzzEGjJ{#VsC4r(1l4SeMvXpE??sybcvUeM9&1SmFu=!_JJ z(L4rTUF&@P~XE_t>2Kw$F!${*Ih zvt;6XdXPD#&35%2d7+xOawMelA5S#{C3KgiOz*{|7A#3`o;3MAZ2KaPvf4>ZwRV;E8IF?Mr z)_1pV@MJwkNlKw8((n3aDsn&;XHVtITTt^$0cC1~SaBkSSMVuVCUfy!B{8}F^D0G1 zn}dE5{C8-aZi;UooAMAXiJnnLMjgCQ7e+_@%Zi>)lTc$MJ3m}jWol)}Z}_*u+w z;)l=g*jJi7tdwu!H_)dpNn`izF5bu%vAr!Yb!6m;^*$jK9jq(P7e9oncVjgtQof@2 z`k(0^wXy0oWf`ql?b~Z>nv{aFICCq4eN+SvM6GU#s-(rUChCO~3lYs&`AP{VjFO+O z`V8F1R}BU}mFpDLgd6r=oD~=gW-IrbeviZE+WgeNFwsdUWrIg7A3PH{pd<_oZ~w+G zt4*m8UW5g3=wUYw>3H~u2qfY2JmH58Ia1b_YY2AQ&}0bVn}Q2eeBzR z%Yy#}s%WVI>yu3cxpJeTMdc^19PCT3AgW`vii_fXiqf$O+lzr1ip}rU#l3rTGIcf$ z*WG7VhSFThwEuN>Rg3p@5Wl7JF->dzV7)(xW>$YaS)t@_Jbiw%;L|S$!&2EvQ`7ag zod8uQ9v-jStDLRM5*AHcuXHsjV&hYOR~rsesq=jP58bjPvbhYKWgNPnoG!D}Ipdch zn@@ft&>ysHE|zDAa0YMGST4y8ocNh=W2KD+2cd_V(1LXPE0oF)52Ti{V3A6qbv%Yl zven$4WV(3V6g^GvN6*o#)*x7)_jt13Eh~t=J<{xrFkn5txknCpMtRK1ezug$j3*xxAVE=aQ)T1-P(*jHYmG9jbPU#drdJx@g$JY7hb{&TE1|8hD1GVMY4 zF_<6d+k4}XgF+v6YD&<_KYAKZ%{h_YKU|$``y9LL(ViAId{{zQ>H6bL;A_IQ8*qDO zbEzY<+LU@7i$gyf&vt_0;jeRwSnNZ5@}A*$)5ncoN3|I-VFBpXTA!E0#<;nVS~{-` zGohOyW_tnB4|re9bg49ZX}AJe737(v`r72m$mfA9E=3KzBc(Y9#d-#D%! z$%o?^UQye6_3GO@=T2m z?KgZ>g&y(^|F)sjmBTR4`quT|&Ru(po(-SboW(=i_@7o!oaQCjwZk{xWj+!^4%eF; zn(G=fMOy3$e#i6>x^2lQmx)ximoM_QONmxV1Yk=4x8~@tw$5jh@nOyL>}{$8UVxC$9;~# zm76e-xG4e5xnRXXCZLkWX?`%C4$SVRYJ$UnPW8SnZD{Z4p#yyO)kh0dU4B5HJ;+A4 zf|LmGG5}lpXnrea2^w>Na_b}WTCW2Z{OW5CCZ;1oX%V0U(hK7WSRbjANH=`EghdNtb zf6fu$Huu1n$WE@W_3;`b(5Hcx&i$xVU-O*`mpS7bJ?sQoTY4_nmSQX(Y@H@@em(ZG z5ejyXKz5f7lf199ycF{2jKhhKa>|3&Nhbw1=Zy7_*TQg!4M8Co=hs%0jraU$w`7xBw%{QXWM3B$QAlu4~ckAjR@ng>=bE5*B{9NbRybXV)ZwC z7_r{wgl}>B`jg3L{nClj<^TN4*0Hr9LJgewQ%=WGP40R$Jd5I6?1Q-JD7^t_#lO9j`=K&N535-w(d83)CgdXa zYl=vE`1epv`)y3BqMp{&UcB>EqBrSUOYxgUma8129lLn4FXUofYFFwMWQ4-PG3G&I zWyA4vU0m!uYeAu=Z^_RYRCIUDF;{=~m@3{!2jRMA& zy~T2dx9aMyz%I9VXg^kuNYz%|jwPpDw$biIfQ#GExa?6Rqet&1$0e<(rG;ZLesa^+ zmtOH-8*}L6uhR&hD6Zy>CWn5`b(Lz(BW8clx4R(34!jSIlcZGF(n`$9ITMU=Sj#AG zeJHNr2SKt$#l>yhxhxAvRE$Mc%q7()DORl*-+-9i5jI&*<< zG`u0$p#S~DKr)R&EcwQWi6d*KZN}*;&@JOIC4&Oq0Z8HicDx-(MVsVc*1%94a5#kl zSRGXS^*F{(ATH|Y=qOg~Z<>ARESB*Sa9RoiSwiA0^;)$jQ0D*%!~;NTf(VTQ+3yg_ zP{7GE4Y=Qbh3hatI~r+fYOeaXxCg-F03&kiL| zQYj$dds4}`{=(Ld7Q!c&G?a;jCCxwP&4sCiTND$(?`NgNAfsu>ZCEQ@))6-!WhWdaW^FD!y%g{Mv0WMTL}CxIjE& zJpe|0WqDholS&pJ_Jg$j zt&4yDF{UJS>pBmV*D~`8OG5Z*3G1>y%jX0Pu#Sp~EA?OHi zhpOnZTumc%nP8EV(ciaMI{Spx?sbateC|5d{Fl`9!uy$=B`$0kE0$BMe<5Qt=rh@j zdce-zd;vmK_M4Zkz|)Ly=O{DP*l#rE>9PXQ+!3YyV(4j>P;V?N;8$T!-^rWw#v744 zM*K;@^j==<UM=xi+R?=rnxsz+$09Wu#h@nXH+fq$dj94Ie7TBvk*D(4vF z0z~)Gj9Z5GPSOz+|77~9VLb=pi#rLor=*I4E77#c#%x7fKA1mD>O}Wa88|r1yY`^j z7{v2+KWChWLb z3&57gCQ%RxTbiAPZWasJxnO)L__UVCKb;->2viF1*#k~WTUwX8eesmK`Wk9D=A;iX zNZD*xkqJFCa=)ubx-?X9GBx6`nTAVf z#NT|AT68>(6ZbVchtq`9lGVj>L31fwJWRPHr8tcPCL}61*FmxB#!U(Qg9qup zJZ%TDEj97lDRJML#=qhs48AXm=O1%!l85yCg^V|!)*g<&%QrqQe(OYQetSQrWEfZI zWI%dE_`&!-Rlzi4bEZAFYSrsQ`=hni^Di8>+|I-U*H{%5k(6;*)v}WyJDxv_<5Cq$ar8KKqlf@DToj82Fr%W zn-gG`jp1ryWf2ntC-_>Ehp_-Ej74R6xibec4a49&(Ku}BjLgh1e4@yKr-1Hh`LJO| zd3MVoS!ycPq@g{mzHm$>B8&Gt<_ow!75R+ytPF`GfWlx4U=ZZ7ASxXBqzHuTMC;(n zB@_a1OnyT{l3T#9X{sm}M@RO-y9zj8EkBAoaD;-$&Isx!^z;b8eOq2aaGO5?z>^7reQ4F-{0?iW7u@IUNKDUftccv3Im~Z zMzjv68K-*?OLH^N)7(ZSjUb!EmD43lbXA7o2JnBBhmoEDKn7f38ke?s! z9Pf`-4pGnl4xAZtjm`S6?02HHCEsaBS}|XAcemKxXTbPkkpC@DfA%q2tZ<8?ai5GM z|JV2Qw2o!?nf^C9oxvd8q0kJWi&Q>QRGiCU%4fH> z%8Jc5Cm47(AtGqvzRsz5$WkS#N@Z>JjDl5tj%7MHt*aIKoRPs?XYmk^4J zfO9aA1wn8Y97z|H*K4WxNwj$#rL^RYs>OV?+EXt|BsN+yj_iY6IbL5`)+`-ccT|PW zM~z1$?!SByo_OKTOzgU%%_Om=oI)9laD6oA`itk+{16-CMCY~LXB}nUD%P6o&xY7+ z^W0JHfq`k-i#20rz9;P2?i`GS@(dt>5qF5mMd2$@5@U7wbe(U$s++Fqn z_6PRIn)It&BWGh14Us9_3A8`<^rB2D9POSq?a`XEZ7xGA+9t# zr@-Obz$<_kX+rq90hyFi7|-17H3@g1BOa_A>b$K-eW4E30MH>2h5N z*CKeILC{X>clvmI{3t{POQmgU8hCfHbA5fCpHGd|%VyaQf&*TY|G>M3#sTvtAdF&3 zznxpJw?ZjO;=)ISmA(55PE2i`iIEYcFzM=(v#o9kKrAIicblvzFZUDQ0ze656cjf% zH=M7qhaHWLjo-d~YiO81zXc3aaK^m~HBN3o#56c20Z&c=F$TDo^>i8=8gL<~{ow=e z>)x?4FbtjFt~GUn3e}ABov!W&eO*!Vty*&#@M0j0wbTd0X-QC?%E^T}M*&j?K<@!A zDphEc8YYlYKH~kXktGxa>diM&o=_pr zVK0tRGls|nK^hkYTeyP~7tJ&sJDu}xx4|BYXpUrk-l%Tdqj!@K*?g6sh-9PRLUC#t z)6f-}IdOH;i_He21iOO9v1s$cpe^|89a8AS@X+L-GKg@uOJ&iZJ$cTOK11k;c?F5iXpk2}{wP1#)=NE_|eJ=bQcFwSLH0Sb~;?2=b2<%(q z>C2PoF`Ib@PJ}gCzrjIsIbxPg2Iax+ba-PM!Dq?zP|!#+hP)du!0drYNpP4@hGA8w(Z!4bu16yT8aP5;0$SFKpR_#yM|a%TFW*HHt$NYLyd6mkJ73{ z3V)i!53&n+uTVSn2I`YkxUC2h5dkMW?k8%wo&yN>viMR}-zUm`*jAHWS4l>ykgrnK zpTDG=g-SvX=8vCn=Y=!X7jPP?J@mSMQiMQklb|4x`~p|J6Ab2XqNEU12y{59A^fo~ z>cd!bwi(MnH+vav0@J^;@xX_<7(cgqY1|@zl<=vgZxqiqh5QKcPb*$df;)?vDA_ z1a!AV_V$}n85q}Qi@ToB(kX7s83nt-q-fqO1~#0-o`&NG_&r~EHug3B9WvDn*b9ud zH#7~T6}FhwdjD~Kz<(>yZ47B}#k0;>(c4{eXuRe%T<%xni&4f#*a5+fj;MgcQ06LSq<2efiF)K_TtNK%!UDOcde;Qr$)bd zj7?|v#}nVugl9DEyzA=w9jjZ{0GZ<(iI!1OdFuUwGbS;rO8==1Rj|MOhYmV@!Up~l z!QHNdk@~n~6Ryf(4}=hU1qvQ@?C$TH(<`hAw0jxp>8&6s3$%&R6cXfA?h|}Vpk@&{ zYw16{0`d`1aNW-+P)tlt=70hKRBNx|+|^Yda9IZs!Gl)s87M$>E8o2a8=#W#{|9i7 zK%8~b8-nnPN(J&@z_7gmDw5Yx`1kMM1)MhhK&1w{aBXevbr9eKuo_!H(i;diSzzh5 zWbAbVSdF}%;y@8 zz-Stu(?nMni0)(ILV6=2kZYD6!EX8=(2d-Kf4c&nA3%u%N~?RY%Etd(0;XqVqTZMm zqM*!r-Kc}_0U-$3y8zXsF)#0xb^y9jqk78`kOHCjKMf8Dc?2@H!B`3jut5Vtn04>H zXaJ}`UaHvwSOzZ87)yaViJ14-0D}r?X}B#*FY9P z@Zz!q!bTlQJRxj2bQWv0_UyUSsi72Z*bs4=PeQ;erSv^p%5suIH><1ajkM}a||J-ZcMT9vPcU27lrx6 zX4D5QP)sm_@EWn1Zfmd*m1>)G9!e3(t2zBTQ4X<6gDo%J*Ia|1w%leGE-w|NNKtOON|=?4i}-4 zGleN6f=kwqe7ptuZPRW52^2%na|*o5@^Wg;6nl7Pd4Ai`}rT zFs~4Wl27yZa&^_CC{Y%SM^6G2lp?Z1ct27u{fkO#w<#v1n(fB(veC`7fnm49ol;5Y zW#Zshvp6p#=24M{dY>51eerj~;Y}lC}`m2NQcy}K-k<|@e?2E$R-E4Nnb?$kTauX?( z`k#@%tAQus5RB*c7Z_KsbNdSL%w9E%2?%Wm)xTwV_nCsG3Ua1d9|Yk)X_%f2Q^ zyaH#z)XNvpzG8{_%Bwz)1A7|q$pF#x2kBuTtOon-4>S z@auY&aRb6HnDK*Ke>ZEb$=Jw96pY|X06YjI@Gj&+6*x(Mysrb$(tp3LC0H04@4yXi ztDo`<(PDjiI?r}BPbvgg4hXz+Kqz79exFbvz;|C>U4e{19aGaSaLI`eP`dz498kM( zwEd!Xsg{KwIXE}~!G)kW!m>MUbF{G713DT4^e-xNsIbt$cYzKI4UQ1{&{t_-0<8m( z^#MV801+X}8;1!RB>W5U@4_PBA1%J*(4l?W?Mg zkIEA8OTn)sL&&W-oO)___zsu_7r=Q6A_0K%w*w^ZwzS-VUnMZ>Ng@K}52%7cit0`E z$ty=3kPF(XBe?~)c9PR~y#s?>_PB{e(xr^=o5*K`T@#zxcxXyD0z)oI z6BIa=Vczs5?Rj`vbe#8P!VoQYJi#l>7$X!Y526FvqBO{E54O^RcwwXbg6^B5>b`Jz zlSnw$3x%Fs$(1U_I(M8f)FkwTGv%hDM)i#!joPdk`lt@_M0}I zOpzbk(akyJ!+c_cs0KMdNYz}>?T2WNW5UV9R zT0;#oiKI5q&*|68)iIQ4t%xgcp)w7jq~|h;<7BZK;7ij}^FGFWfPFh=ktu$mq3ER) z{;AKwXLH!Z`qW~DPI;PLZ4@3M0&1yU%b@+lmWold@TCz= ze;*vGc-{@B+QRYvVY`{456O*;W@fw8J=+0Sk;kUtWpc#1Cf=sq+y2kNr*`p>X+FlS zC5}7E7hdtsi+`6%IX5-R^&vsixJ;9X)oTIJvOzfe;e`!*on1fc?bZ0;=Dg$_JjRro zxe`jm1|n5?`uu-~`IGzk*eM*e_a$8Pp-D*8;Uhsh5!?-0b`HbncJ{W~9RId97Y@yB$HZyRKP$mi3lM$M@B+$W~M0_tvjDdKx2hLF3LFO zDkBqe(ngI{UTjNkonL(d*}i?ey1IH5u)mfY|Bt4#49c=?yY@*6h;)lINH<7HcS(15 z3(_6Z4bm+Q5(3gGT@upW-6bOR9rydq<3C1pWSnb1_E>9MK!UT@V6O|}H(2`E%Xf+(z23jA$A3!eP_ z`*J|&X|k4aq;?TS9ZpzTIYC+s8H(NFiJe+pEI<19!S53L0t*WM3qlqP52i})eFT*a zbrY$2SOFJQRaKQeohtaRPlpxfA&;V*TvYfv*tc%-Mt z_C9M*Qb`D!aw48{RhRxkfYZ~=O)`c>!@M#GyXLcJb9Il?FUX`0=B*f!%e zF5MKVnJ^uUJRKL?g`qP?J-Sy^$mC@ASVKHgTB+~VKocU0HSgre^ShmX z5YqT9b141=6LC1exC>$qKt+Yy`R;9~?+{9?sQTs*D?0qhNZU=`(wq9=3u2yhmMNqp z!tD{=`>Og~X?gj1Ds4GRKuz#ZNns>M2>J6y5EaaeIRs%GQ%!Ps7$rok7t%C8_tbHc zoupf*OIZavpP3pt)INXZTQ5B{)V1u2u{IWKb1@v2}T6cgB?Pc`}783yCBe;;PU~r z7XV>&YemwS_*LWMz~aF{@Zk`znzM9m9%nT{=_^Q>j;tTKw(CzTflrIFA^wH8kG0>I z4E8(nFghwrEJ>@IX2d%f9Tjs)pqje+^^tnG>$ z0vl1spFs$9mTTPJZea+*e(Oo-%swa0GbOF~MbTM|9HV9Xs%uOsC!1eFgll7$6V`Y> z&R?hiZ-c|mWc=YnzLKB!0@a5u(`tj4(2?LGL!t@ERJZ@C;6I^{q`rq+ygB^Od)?Ks z7!YEv>hA`D)6I@8=2P$Ak_P|wQyL`yp))1&OCzzm?tU-h2&=!jnTd@hb(4B~0{_A_ z^5@*Gz@@@VtSrRZZ_UJ1HpWJLun`R5kQ6H8D`!VKR^%*rr8z16WSF0MWQwq#UUGaF zAt**hrpA0L$l{2W7VxqQa!MG%m7hYjSp@UPTBU9yNCv>e!!vOFi2&BCV6d^Wvm@rV zOB>%SNTT^7Bcc3%3I~chV+V)R|1Sy95tRmkSs?04VG2GVB?XVXiac2G?>Za^_FY;9 z`aNxJZBC*a4*{^R0oTuWs0Cna9%}3%K%$Jw(}g2Zzl5wC^7l8|*VISP34M)}HXt!la@JChKiAl}-4G z(f+gE{AW-ILU;*Q=GaK^zo(}DOCSJU02-K#009v^^+2EuVoq9VMJPF?lwoz}Zh#ya zm^$0a%MZ^$C=b|rL+e0B^)jpnctwG~$MCz62q;qFlg!M_tIf`qpgkhP_-+rjO_A(t zk@>9s;OG4u6a=dGxjZWK-P+Av#B(Cspxq>8!?W>6rRnRHiz>3Q&& zI(7b1huKSO$$)k|O`N`6T2W~vYj#&Isqz^l<8=uX(J*u|K?H;C7QWd2CKJqD#4NZi zoQmM2gP2}ASKO285fINzoW*x%Qk}o{v~K(Dc5bZY=3k8^!$y;Jsch1x!gwz4XiGyq zuhNcw!#OQ{B#wT`+as6zK1)k?8UF{C=f6D}i;Y$9tJ6LFF7~5PGYq{S+(S@klcapv zX4E;017ES_{tU2|SHWY56g z-Qo7*lhnp|37X$2^xSaBQ5;l1j)ae671f^;cxfnvu{)N{;eMKBs!_Z>h&`n&P{D{T zVQ)yUf>T!TBQeXlmv@iVL@zFPnfo$2<}$Y{8umRwE#Rb<< zbShZ{dzV9(n;~6nN-5`fk^#fZ(-``>>D-@IN@CY6W#?fS>b_d9!TDSH`dZ(5uv6&D z%)ppf63ftvd`xVWbi18sa5u{et&wtru^Tf-WUqt4KnNMk^|_Ss_4NfQbaJw?!~+qM z1%PfdJT!E$)OZFKx**^31q43)z_cn$^Cdz&zqwf($E&KWZ0Of7Ty@@~!D?pw?YD$VpG+5EL8+;_g_%Gf^ zeu|jYNSKG%!P;88(cujQVlDYd;=)A9$L_!=0UUg|5@i(?%XOA<|Hqv=_iFDRjKEl| zHILF_W@Nm+^8t%&u+c5Hc35tz>FDSH**ne6%|DG;N#aQPTxhW({)^=S@?&RWOF-fQ zdIV4y4XWKi;R2SV$o%Qq*~1oQlpAnaV`E|Y2#$jh;wHf3nn)Ex%=?@3*Q?juP!~Xl zP!2DF40s|_0WjFa4b*{f6A)KI7ObPA)4g#Hn6-UFs3D?Y(Za>W1^xwK-*pON6hN~h zDT%n}v(+Dyp8>>Hw(J}n&YwO7|Eeo0qGn z&cxyE4b)AM;`b~Z8**H?!jner# zsS(-|g_-0qH2lJ%Mm)C-znrcLu$V0lLd}!3uS2}|;ihv`P(orwzJ@1*Ek<*NL{O|z z$%b`cL-9HLsF@b=rIbG4=J6~>?&nzLg@38Y`)NX?@ur+g;e3YkyHP=+>p@%N&?5Sp z-)Vv<>`}w1o&8H^$p*_9zk64bh*;x~h7KTn4>LIo zces1E{AnfgBa5fg!=vqw8DWUnomNU^ z7`5Qh$bQM~O=iRW?E7ba&;4fo@p)m@2O}Pzp6IUEQAE6@yv;R7#a!lA8Mx>H3Ql=T zeeVJaMS}Y;WtJ5AMk!Pzcw;F?0C}*yJkp#zCc@~%jQ!xk zL!5ve3Xf#Sa>mSLl)(~}4i1A0VAYQ_S%?r9RZeP1>EpK7zGNCnq>Aw21)%3?9JH{b z!$aUuS{h3>CZOOUGG&Y%Ji5_Ikw8i?Z?3PO*}EF5&61}9Nxh%hfSDZ-3jO~J>Qqi% zVnx-TlbHB1u(gpTMUn912PYIhumzFjH_#O!k&_EXl1Fe9TW9dNO>u^Bsk9+L-Gy|Q zrjdg#PJ^zluAqQ~8e@p!NMN?U#X{CjSsE*`Ukb*MN!+|>DlZ3zC}M}VkD`%haCDwL z5|KDUYI+lDSS6?7f#%rb{N5Xp^gXPoU)V#OQH9s<u}jzg9VbAX3$)=mAz#$v4i?7G}MC?hekk3K<*!$Lu30FzJ+=?YtE^2PtRCv z?`9d&heTc)`rV7Qs;J2Pz0efQa61^R|CKen8U5RiG!;UiipES`HEeGGIz;&HQp$bw zL75~F3sH#Adl2sG$(U2KPj}Nm;Ul?&T&fXvEc5auFEaHD{H(_NtC`#5*%#NcYxQ?V zZDS|P%|C9NCUTfsh1SkP#s^9DCS6Ad1x#dfPhzUITbs+u((g?oS*A-gJB`UtZsO(+ ze%4aD?Ux}i_`P@8+zTVJ)y*t=LdbS8e~yLwXs6#fqZXAF9r=)x8jC_HvGl#F-!W+t zZkW@Nnu;Hu6?T+xm$|Dmn)vQ!-dkps(Ax6uZw1@gOm0H4N}=>cPp>Pj3z-xO24uS2 zuo5euw8oSv_@2O8fkOBp*puV{8Ozp9UN%bp;wBzYX$guJco#f{GT;g6n`FpP?Fh{X z2?CB_G2kaEKls<58I@MU#RRUPxV}nwi^mgzJpn6MUxUTzgo-hv@fP^4?lT@WsY=b8XPT=ljHfCB=6g7p6s= z!*XfQ10F?M9ISzS>qXG_+t}EEH37I^+b@AG!igBn?;|QW2;@|XA#+E#K^5UzB}x4z zHA=K*OAQk?As+P8qh{<2#TJOLQqIm%V%Ra^a4#UtKg=>J4h~i3+F&oplS8yNV5}Nue=eeS3%ujriB30uL>8F4Xy~ zYN|)-{uFZm%|_7AQ+*_Xp80M~yQ3fn*ZpKhB4Pc@$&1;_=L(ZePj8~+xs=n~v_15c zB-SqCb;p}0X*t!kK>V9CY&D)6@}%62`U0 z{<&%rETM4=HFG8&o$}|K&1>ZwP6Hd~TN5Mg6kX*{zsh@V-8&X?<;-}u3UFK3Hra5f zDlK|`q6-4+q($hvyTR6|j|AKs`GM^Qx-AD!b18pFJ=HCY*LoO)e7zsTWLcZ4Tb_!N zs|J>FO)3`ZgW5zMjc$!`-f!D8eh`_{;R zIcp&dqZ?WpD}~$FO+-4+&tuy?6kjIo0ryH z{);fBB=vvlyys}Cwst+#>L|JD9v9qODyL`oChcWWN?0j4F;TK9tAl$t^ZA*qT)_HL zA~{A^!|(51)RpG5>uz+a;RA%LJ|BN`_}l#1_q4aj*8SMFl=y*^vhkSb{IE@^b8TK} z&KsUL%OiIY0g)j+vF>=~B#iN>_}e&5Bu(H^_-k02?LmLb(=&WVF*(`J_(5Tg(+ ztrU?h(^}7ZxuZD6IayX3M)?QJQq%*LS_W%JPfZ;G3XZq>ZqEUIXOyE#!dZH`qI6Ad z0BGChmX=Y`U-G@0<_%~oquFZ6Vin1v)4wVqLE?f^H8BK;K6I7CwGT7x9Xby>b}Uc0 zyAus7eR573$mF7_cD7e(c)-ZPba^jxNW%BuYpc{W-1Zb7S;$)2N05`|`{%~gxy9pl zfCDVO*XUMo7wlz8-zoi*89W z<}${)PVY$A1fNBwn3%X}bZjKw^i5T)VUxtd_^?H~A)e?3Q|CpGp~38trmDBO>^ffx zm1!4}+ACqi_+$?DA~VmQmw~w3%c(ATY@6Kux^H+_Egcuf{n<``fBRQ-Cs4_wc5r}~ zgcj_{{)PMa7ME)bF9EjgmSN~M@BLm`to;PWrpFaA7(Im9Uwqx9mp2=Y>*>+H0j zPAIu%DE-vDu0D?YS}pD~yv;Qa6L1!MoIg7IIKfG`-f1@&@EG^}_k$+d3EnrB5xTX1 z*G#&(E#{K9$ND++)60L%9j=EhY#GIMYk#ZZvuUrM zm`bUYq{yysI0V!Ze;Zm~L6mg=_cNC06R@UM4FB4Y|DfXUL$>m7kKwG1uViT4i?nL( zeEl=mt)5WR#FUzU-tPeEisH3xgSj=Y)gLdrJDZve&+_w<(ic>s@jD+Gs5=DXPFF7X z?aANd8~lLXxih)B5{i4j6Z?4g>fPDjGwY9Q&SRbnNAJ>-U7_TLLt0(}m>nnh8v{gm z$T(B3hQm?WH?Fx!Nyu-f?PSv6BZ;+U6nsF1CYs@;NKg46n>nfy_x?9R7P)k%Yd9kQ zWdcuoxho$dap)AIjvS7@x5=`#MFRvD7O==u}d6xQEaoy*b+98znPn2KGbpTdXus$4Um%2ZY=0v%iNq;U z{NI+0m=)IV`cIHp2EIbG2Ng1|;PqZ3jvd=3O}q^cj#Hu6=i(FMdM-!JuZDQ@aKTS! zBoYtW0Nq!t=!o;!`Gi4CdcNUG2S*4o9@k*}@!0v-|Bq2_f zJDgOL)DQmpE6CA84I{!C6}2>mH6`IKZ8n$5!*6w6zsL2F;w>~{?tOR7IN#mM8)H`G zmoxgzrc7qdUW|!jboE;YitVF(a$WJZ^zaWTlhhzGn%LW1zwW2~&f#U{s8hB`TgQ)l z6Vefb>x-Xn*B&W<(MGXx#j8#qVljK87SLy9Qy#4TaKsx>;o+CM_zOeE#PMrIU0q!h z_hhJfme0)2(4X5l{Lk^0VSU^1*K%?xESFq`+Jr0)GqbTjm7a}jk1o!pTq8|g>{E;! zzpDT05dCU%Zdua%Q*x)9;kMsmp!7Rk&Wn}l%_LFgw=))R(ji{woK=_Yi)7Q%>FM6+ z&Fd2>olL^y-^p#4@n36y1liFX;e1VL>I*k>f{m2=BZo?6yx8z7{4E(>+Gi#>a^UwQ zo58NnA~1T1?~OC$YP^E4tl^&b;cOHBl~Lhq+HA1^C-j|QSRRacb8eSw@rm~L_vl6} zr?2@gmz81<#ccL|*S>#&MvmO_hlGn0-N4gr!4$poSZVqO069c`8$XleQhB+4@FoqSmjjd92237(hnCy8mYcu_K|<` z-Qzs}?##{X@y*H01p78^YvM@0*EEgTI|?QEMM*z#z;-M+w(|2^=`EJa0T>AOIu|>~ z=g-RPCuGX|3`YI$&CR@laYU4p$Ug?F2ezsWoGo6|7OJoZs!Vn#F zQdd`@t`5-_ML=_Fc|%!?R{S!7V9Gpo>C?3ICyN%eIWB5q#D2`DVf$NUjIHG#@(NQ} z5#o$^Bcr1@e0fU9FzW>V`52c}GB#FLX+r)_;h2;_;zNza5@{EQME+I6VWGC~=mtmt z#CWK|Xp4e^65NJlsHn`^A3USX=s2b@)k^@7Yp$ou098~ycf=D5H0BnI7UVc1q6s)- zvY28kG_okxGpwlOhAOg4k$O@4)(Pp#9s&}B+)(&v^4hLc*5+Dh5OvEZ%~Sy#(@6ndhPlBoSPlA4{`^V#T__dk!Xvmw)PD})1jig zawtAERZ4$xdD*9s?X(Ha6qOR(=karVcNabg-fj2HxPEv%Xx7rraw?J5pZi1{-IPyD0oPb{Btr>G zZ$51zqf^L0x%mII0AuYWaj&?|Ik6(3a9!2BX;68|)VvzDQG1BuF#j$ua)wurN^laJ1S>cd~RS_|(_g?8^Dys1l!m-Rgcli+uUE#bMxkjB4u$ovjc3HE)u| zrwJ~FhKl(T(zlHctU~(c?azK&k#Vq*hl8aCPWfHoAMBT}BzA_AT~_$KcZRnk9b$(a@5lDmmy-x>- z3EC)*?lTdUvGZnK0Eu8#M_E@O$nqtd8f~=b1dhgkRuD&UeNrOt^MaJrXI%A-PvD_R zS56}R8WRay+i!#Cv=39|ZUc{vrK`mt@^|Jb3vcklCrxpqKOstbrxr_PJ=H+QVcJBQ-s}ZebCcm{Ly8vg1fKS!+CAQsf&+;bw9}iyg_sy^1Dhq8DOaAvcvEv}G?UpUP?>FjQzVd*sw`3_AyKhHoB>07mXi#J7@r}Jj9~fQ#}!HY zgUA_P{ohe9ugc(T1(({rda9TMAB+@5T*Q)(=MiNG@-RbL-*yE!shhugh5VLD*S0Fg zoBExxjyPiKXH#nGZp^TH{swLd8%smyC+fYFV93G$@2lt|0m1LQO%;;l*JSXbepbTd z<=BaA3L@`EHW$_9n7;x8I;Y<%p8_I z5GkBZYqY8j-p3j~_u=#V3a-8T_&i-!y1zC*?>Gvq^KNIW-o-5h{BEPb_p=DZuNt{! zDIZdo{Jeik$+#ABI;jGsEBK}oDII+uVXh;Z+z0dqMMH}=j^BNIpBhM26ZBhihQ^)``wGs*nUw= z$Wh-NV4mUx|5+Iue;qvBZ*dS4>j6$FE`?0Mm}4cdv$FDde!Kf#rc-PUEMw7ZgAafi%Gpi_16kmXNX$GNHaiWq-m zoO2;j(N=n&uhG~ z&-{K^^nJ6HuC92OTAf0nUep{CA50ciIHi-GT#BTIcUy-f!)eo-99WuF#LE>q!?aTE zW$*nQ+k^gm8nW4T9{K78f7n!sLN?Z)X0QCo7oJ$Jnvt7tzSp@)wD(r_MZNn@!?3<@ z9XXK6DuAX6l`SI^+_UzV>&d6R4)PGRz2@2YMiW<7&gb$>8O0!pv-#%5yN?TcJPugx zJV|81LlG9$$w&d%bz&szlCNqCs$L_NsnzP9cHVY8XK>Yh$(3{^sjPMi!)Tyddw>#Ti#2#JET`{rs}WMb?_>KvTm z5bfYaIv2eME)>)yrmNmMG$8zzSEbNt@87Puh2-V=nEuhNcf{HmhP0?mY1EX_n&SXz zZu1pUzqPa`=I?==mD`?igXgI$U52AA+qsVm{`-Tyk##-Kw}8(2Jb?sI0R&a2-^#2OMRIkm5RZ9WL}`)T;+h&AwEh=hUQ z%Kec-0)bGpn|x4doZ#EJGri<)6h{ZmYcog@hyBv-tMx<^a_d7ph7yZp$b{QbklxaUDcUg-ne((EnmUEYB zoebH(*WKepm5twi-7noE#qK3iJT@C2SRWF`+kta>o!3o??QDhh?F!Ilbaw{<+!Qwx zll`CLCB;P@0TTb+^paeF)7N5LJVlI&c@+|Z0F-0z&)RAlSNZPYML!rD|I{@6j|+FT z`v?O8o4PL$^8&9?u2`PsIB-rH2mwdqSFv$Er_Tfbo$i~R_lWC0ZS^Y<5}pt&XjDI4 z19tM`Rqpe$gKDvLuVpQvr1@KG4}j{JuhLX&2^N<1w1A<<@?irE@$>M44qcA-?R&S{?E<=Bu0-MI^3@; zC*^V3J_E`KD6Nz`4|O$D4mC)?_KP{Dp(($;=1EC-a-?sKkV1HW%sZ9(BKf zpY_BZu<p56VG`JZ+Ol;8J2jRVXe%RpHSO12xQ+~a9%J+3Q2 zR`jre?hgv>FOXg7e7BRj26TQL7NgQ~a&?T~0HVO4`gZ7TuFwA0Fj+admnbM8X^^e| zKR7nCa_@sVxq?5I_qh zlc)hy)oN-0r zm`DC|uzU?80SEtCpvz(@U-OwG^^WwHQJGT!QyCEXK$5k^tdczR(L&DE7~y+AGtX3)^{O7sJz^*MK#ttGp7aqqiS1E;Yt=CFAl5{T z7rF3)LU(sebAHzc4Z|x(<`TTm%N_XgBnQ$|0SeE&cSF0O8`xo1+4qga3)$7W&CS1) zOfS4I}y%Lljmzl|}&rp$LgWLPFxw(ByJ>%|0n^j}+~mo=`TGO=&HF8Ucv{_f+FXWRN{Z0NyQ z!>bm{8*7pRZ@y_)o%`LHm^sE4ehoAC>jO6u;(A-26Rhi_RlfI=p>2K0Yz++xQ(B)g z@nSe=X@V(}jP++K1=24@-^+C_v^XtQQ?nO~M~>T16xmypWVy(GHIVGies7|hSZNSt zeqNbfp;eF3E}^kvEUKbcf$gG0Im_MTe&6=*LL5$hUt!^9OFYBs^J*g}Uua0asubEO zleBZ_)qT!Ik(@@5$<+6w*@NS>&#nB zrE}4I`M3m1#Ljdby4#?Peq+e!I!JgtVDd;gmh=17-$%uWGxs1_Txa5yuBY9c(#v`n zGFhh4Jn!11wN^>a_a!^M@*@$LyKj`g9UKegVskLHAriG64X1QwO;2CQW;UU;`Yyd~ z9_!wc62Qf2{c9V0wU9NBVriR9+aQZ`Uo30ifr6@xDomQL%mkAFs*CL$Gd=$nuy)}? z8r24MR+IUI@9Szaz9aR)vecRm{CUIQ;D7b2c4tIW5^w;u8tm@ARR@)qe}1us+q{>> z`|(%$)vWp}@+|I?1@M=!WaI$-68HrQ=@w`*wUWMkD@TDESKO95e7)(9eNG4kiccOI zXW<)Jx$2dzTB54(pplQ)Rl?QLdL*gLwr zo%Or)LkYE-mD9Bh)X`PC>WZ<9wDkTszb=1&v}3}8zLWK#5jmRm^z&3(xF1Oi=g}oi zQfvolVaE4k=UC$`P*0f2XlCPyFf-Kf!n@ZwE@_@ammzwW;hZa$AG;%z-J!K;F43HLB^&riUvwhZ$PU8($JX-?JK|mAlUlv5!j#X zv8e~fV1E!*jEsz2R8#~6SY@gupv(dZmt-PO(KX;$5d&G}iUTRA>{B0v9)TM4zs&2i zReg+L@V_)JP4W&!mfLD#9RRC`fBgbVD{ib&bM4r;xOcLWSfaE=H-|Go+@&ioFAwZ= zVC_m6-pjV;KvhM^Y`Pw+DyIFv`>W*5ksUnE(>Dt;V>cu z1QuNv5aepgbi1-pWjUu&-QI5Xt3wnGTU{N~axa|hzKxhB7++0dQtcJ?C}ut!t1R*c z8z7HmtHd_`^AXgaL#9bI+dwku!hn-?Ezl^CnyF~f{bObNvGwj}Yi%|~I3xf$rootW zVIm4+KoSJ{J_8+T#Oez8 zIBk|Gs0^6Y%0KKH39uXOq5L?WdGzCOz6%f|3$3hdQeHF4bWr5|t8C`F|Ivos)*=g0 zQLuieNMmgYLm^W_l!oh{r}i# zdR?~zs8}?&!re+k7>li_kxTDo80{B=S~tkAI6Ifx$y{W9a2&a zHdZOI^Xw2uvA?VTDT@;NyWpYcch)b^&x_g|n(tioQE-Xm6Il|!(9g_rI9U5jl`EZ; z3Z1=m52Z1v+Vmv@`Krq50B6n~)r=|)EAD}8;;-)H>~-NV3ipZ`WtxKGclDo(6$r*T zpkx)34d!$;SW$?;^xI5}_pL*ZO6V3(>WF#c(i^m}1QdIZX{4a|_;|ntY}QMp!o|ZQ zGACv0cRiZ52hv&~00kPgg7R`I86Yqp9312Und(543e#m#2Xv-bruNAk;@DkIdI%eAk4TKcwFMqn3@0?e4QEGm+v zl{M^F1+4WL%h1)SO67b5jA~*e-x*Y?jx`2GY4#V}Lf^CKWdgAeg)Z|P^Y*$a~wMC!C$eu@U} zYoJldh^4_IzAiCEMCCyce}!a@T8dcy0hT&tO2wWtR+Ly67AkfyxZ+96vY0fn9~|x) z(!w$hXmcwrB#UQ-24St71b8r@L8w-oj4)7LJ)V)@g$&1ojC(9&G!VX4yd8F(2y&Bv z8r5N?DT1)Y6>;q#Co;ZL+@UdX=(SiLrIC&T>JU7nI^ps%1Re!zmRia6+@=j3dA&YB z4#M|PBOZWfz!VYFrl%sesDqpCHL}F9j?8A`56EWiz2+INP{UMa4HNR_Db!(u_TcpqA1i{OtKh8&Mb`-& z%i)>c9&^npb@xj*xZ1UEgL<~KC{sz7@k zMR^Znaz?}JaQX~(*eRWU!E}A6LSxo%|ANo&kYy&re|XejeH5w=jVth}D63P^DGco0 zu{a#j_mAtdOg6Js)muoI*8&x455#qXF9H65%VC*-dC>*{`oC;7{r&4vGDR0jVR{Va-2WhcATcU4 znL!g)7@!KC&z_&oP8!z)Juj)qso27?T>bp zR>lE_Vn7&&(@p`!bjAkYV9r25IvBAp0p8&~Nb2y*bpY2;7rs}WZ}jIpbDjZ!0ssyy z>lSRLODDl!0#ZcJffZ1<)32RMCNzZ}sT)|D{@V`_aoH3W7lX5_IUq}jN2rvB}E~MfHC?oUpAO0}w(#%m>xcpxyi0f8{R-inC$vo}SmM6>FtE~PrSk-6s z#o58^H_cnqfw)SY`=jdn*yMm+Cc(}4tc>F#RGwhoQF0PhB`$@c0G?n>(P>q2-e^ep z7J1ro4~8xIPLIv4Oyskd>rtWW$F)3}@)(MET3mY#i3=21cPDCL;AM!nNr{So4kJQ& zyUS}NOlX=%ISMaFY@Wv|8Lg-$l|~j!yF)-0gfY&X^y4>-X&%I9RIb!t8j6WYTg=x_ z05|@b8Ey(DK2`z|iQ^3=N&qG+JW3I)1uCs+Ml6zr+Kb=-7!#Ck0?a6gjAOzzlohsz z7amibl^buojDQuEgrI=FAS;fbZ-5XzfJIHhEIsyrlGRuoi$ zXf}C9N46@2&!Tj3tj0+cP;iYRGHfzADiV2UfK31!9AW^b5@ZxjVSw<4qib-!7blpO z3?&#M>yMXbf$`tDHs4{sJ+UvbiaIlgwmKo3miBT$ zx!98Cnt0x%&oYx`O)Q-2eyUORw`rLB<6fe|2f_~}tGA@jc5tDMUbo2)0ZrWjZD&@j zq3DEjPeRWI*u4DrJ0%_GXOHbW*pe>3?KbAjnoKJjxh2*kKa6i1bDw8*o@YTNnjH>r zcXh0~aG8I6VQ+dKRDMwQ;K1RmRO1=7S1F?N(mpW(87yL>Wuy6m@|u;mtWq z)V%E=mw3Ky_V^dvCkl^A^ycRD=ssNaN^9r$l>B>^;d(Bexo@gLk%U%57v_2y*S!Uz znF8zKsLLUx2oebEKR;?C2Vkn;6afen;HK`78%BqRFFgc+ zxB-N5fp-LSIzN_9`r|bXeP@&qk;wsqp&hViz(FkAeYN;cS;qgg0EGDXdpBPHMW9^W zQOUqUx#RCXZA(Xk1}R6-w`J}qnMy_(CvL#BaS3=5*)o6af^Qzrt+vkiWpRJo`5c2m z#A$^AIHrD&H=6xveJ0J|J!9*yWsJ(-zwl^rJqpDj;*1NgKs2MDvyWVREVVh4W+{`V#C2GK;j%l$-ZIti=2!9KThG{m zV6ikMarJCvVG0)VZfmtU*ZxvTHCa09X-jN6)pk0rt&eB?rM7$j*s#bT252(y(1@BK z;8QT1c(gc+kCXXMDnhDl!c2jjNhh)AMrKNtRRAj)N;q3_YFU04OqdAb1OXI=03l|@ zLNH3u#-=Q-5h6}NP*s3c-+a8gdN5Tm@X7kHt-W+cB^QUGMd+sNMY0lx7hVprQ9~jh zeO*+Jq*^+5kRgnTfk_}CNIK6u_7};N=o}v7OQ0uAc$Hi*3?{3HLo_P{$7sBcu^H$n z3}Ss*`@`QHAz>gG3A4hz;t*AX=YtW$&=`lS4IroPcIu{v+k!Cm>5b5kUZ}xI5>gWt z38=zcC8V8HcUFSiFH z)4gpAauYbi;h6N6W-JN%N5&(}ZsqMSVyDJyx#s&pb{E^lSWpwSgx(|rEAc1o>g_SZ z?0*+_)Hp+DivRo=Q#n@zVo9}@h?eaw434)lBjJe9QOj|c>(stJKhz&@Jy!WLzkxj9 zKQtnp3<1yHTT)`CHTELU@Gp0%mDK*vMZE=L&zy8#yzFLa#&&k20C576yQT_dupu75 zhrbVZ&*@x8_3JsVhi1frJ~FDSwdQhzJiRhsXux5BgW;R@U4NNsF5m#8!+C)a*lJCU zSUa%9X?08e5#bp4rSqY7g+(f5O+xxMCxG`=#jISlhcri41UBgbSRVyAj~xR9(r6mV z1USj~n1hbM$q6@=`js$7Fi1?9KZ%|B=Fdn^Uw(XUv)4TL;>mT>@@pgcgsZHy6`^v1 zx=5SIVYT(f=l8h0Z7LC6Ht0-qXsw++9oZ$9d}e6Gp4brM!gX&DuPv0P{Y?($^U-r- z9NX~+cy3xA4?#)}7>K0l@uYR+zyvvhyXoQSwRvFmOY_uIitE=Ing-?f=2w%W{am1z zBm$my_qDa=wz08!T#4lY>QE0#rtw2uZ?84$3sUP&V`Hx6!8Shog(kX{#}8q#XG%{5 zFzdU5+E48dwKZM28NN0CRxmEJhAA`B5y*3ueqVH8s+Dho*z&s=rEON~_R`;V)HmC; zRO8^Rdra&5GCg1V)XT1J3qkijyQ z|BtUqhLlLvtjA0l^m}kt6JSlL0E?PAb;gEEpn}k}KREPBOGO0>+j+xoptqO6oY++# zA07rmy;GTUH9hNhlu4v5ie}5%W9x6JDk&}|!b}wy|BpBEZgq)E!MSxsv1E!B!<~n? zr3%cs)pNErTAT`XI+ZO|ne~(k<)L&^8EI(`#$UTw;h6lg)ZXxdDKzWQBJ52HzFfc&oM=1|C;mU|mDz=j( z3*~9%)F|*m6F=iIW6pM9(NPH~(*lZ}FSHQ!VboK?yD$)=F>hcRlp--V67pfxF*jMy zQOIn}Qls$j&j|vc^T7gRR8*IK?dUtgE3!hk^Rz9DNf=Be4<%d(F_^5|37&wqi^C{7 zXppUUMEV@l$iWd-vWwhE0ub4zYF<&Is0ojT!i0ZiJM3zLmt?czD;2;Md9jW$h{Pit z5dR`xIG{k7U|(2`48A#GUku|+l7j4eK@6}%Av+@($&8Q@AuExL5R%NY`Q7JvKA-RJ zbN=ah%6U8I{eHdf*L`32bzc{PLqh)X<lv6YUb+Srn0B*7ON_%I?w_o(a#PGXrB!7R&sn<~QcoJ$8;d-8`%X$YP==TfDkLjWiN9XndP+sXmrjpedfTJMsC4tlONm z;~Vd3(j}ZX*EjkpJTKk4Go_cx+A>ZNSbD*lP(^$FCVmBXwDR<8c^@8e!nc`WK z2ssYe<5Ai#iWR zZ(oyQ9Jb3dv5hM%cPLc41TNQ?m(jw`)7A@}13Jv0Lr%ZbNX#gu)uwr<-r->l#S!a^ z0yO0?C~Zn_zfYh43&=VQM>kt^9bs*Nj@zsBUJU^DvUm@Lj?N~dTmSy?QdN2Usr!~k zQ8_DatJ^oTiNG15OXN5M)Cmt?w^>_#;k;g0xd0VzOmaK3bLh$E?EFPZVTXXV9wpnt zZ2FY`1hZu(PpQu3A*s$-w=BN2%h+a@O&h%k`$?@9{B+xPoqZe1wRyZ^#lCaj5;lM5 zC;{^ z;2oEFMs&$>i~sEKQ^Xx|=Bp)@$oU|SZ>lef4r0~Wyn}7XudQMji`1%9$V zV%*Y}iF$g9NE&a(Bz%PVt@udP;uyYJ&|R+fgvXDmRm*^fh{_wjYnp(IC@g11N20#Z zS%r~g)P$&(38IVBSNs=ak?5M58pLzXi#=aZf}jLlB65_KH}k$o=P?8yHMO%B%-_#t zM2VFA5X1-^a~WiHk~}VgHTg1qnAJlRzA=#;Jw7vw+s-H6+`WF)w)VTTyZb(;y{9I# zevP{`AHGrD`06`Fc$%fL{`R2ErH-k2EyF$M+|_zT`S{JxZ&Iw1BWWBmMA(GPJY<#c zFJ=ZX7l);XUq}d(QDCNRh(lq2#K}Y=d=E*Qe%-beX^pW$5aHsabi_aCeMgRqi=&Ld zK}O;yq&LJi8QoshR=x z-0$7C44F8Kg_9yS36&cHg>8U%D1)19PJ~V3Q%yT}g_`KA^-U z?X`FoUVUHL9G_WBnGL!vEML%qR1s0;kw_F>2R2DqHH07X#e~O6C@eHFGqLzPTyb17 z@n0xMf%yD&B6^ZmX7hOz3W4&I;k%yB!t#!unT$khjsPKwPew+Ng-10nOYtx^ zm9i3{kIN#Sm=%E&D;n_2w|AgDQY+^ylg=;y1*@=+ckIP9Z8-tULZP+|LwTfU&7{Q5 z$mHigd7p9W7j$GP%TsXD=Nhh+Z<&O@Z#>UQ>wT3#!S(n1n|FHPq$ztVInQ@LBTDLp zf1t?-kt;17OGMzjuU*&jWQ>~m=2M?FUIuDvYDdHQ(emet(L7=GZVRCUj;YJwS*Yf= zIMcLwz!bXOoiVELeM#p9A~){E&WMwhC?rIPoh7*YJGAcINzhM zmR2JnGBzeJgRbC*tG|yE_w(3GE$5w0&U5t(TUTF+xwx;nN^?zp*gmOx*_%|F*VYhn ztM~4=Dnink!Ykh2P6ymWS8hmZc%0a{Do9;Ez#DorAi0(0Qa}Wj1{0I*M|o{WGm{C@ zU**woN(&wwAd3IX3tHpoBoS(e0+ATi5h*i(J zpB_l5A6)Nw;+(&}p-*FK>qDuK$R+1~I&Iq~?W|b$MP$%1Q=FXPW{ci}rL(0|jgEXx z7MinrUXRup8;Bh>kEbVF)zY4(G3;7vJzi)z{$|5{?~90nsb{)Z1^b76+r*>onPYq@ ztfwx6Nc0K;lhVZ(GeM?nd5oMlMcpnUGIq6}&{O<5JT1GZ*4!?+M!41IGM^~Q>-a1C zliZ4hrf%9)PCX9E;dkz8<}Xew-%?J}k|jg5 zns&Rio4Q`&ir0T&rFrmj>gBpZt`rZyRqt@ttj4;C`fT%Ww}hux>FVA7SQQ4et030i zA?6=Vq#TO)s@I00^6za|S>GgQ zG0&e_g@MD-gf&T9U(Ak=8sc@nWH*HzcofT#V&}GarO;RrWyO37+4((vxOpc0tIbEg zw;n&tVo|bn@4ybne@>A8mK(&&qI$AnqMNf9t!f}-c=qMeUN#T zzgX1O!UdQAyHM#K5DyIDmhMF_wJS@_w6dPxC*Qw6rutRl_)vMh@l|rbFc)R|SKYmf z65y=x!XEqG{uLfAvIuwFVww=&f^Zf6oU{l!N7aMZufjhiK9N*qbvoTne|1MukGAeR z_N$4|n`W|U2Xk+-DqEydPeL#M`K;NZYhp~yTr{9+WE9|5nAZ^zgNtAH+R=Am2Z(zs zxFp-9Yf*)r#0%9^PH6sUtod@&8_|jBQ*FU^Q8GhM)<3`fuF=kbER-#D zF%otSemwZxK7FxMmGNdCi(8yETnFAX>U7nbZ!o&uvkz#K9?K}>XW6?j-3_$&u z7j%wj8N7EkKg8FYmX;%;gFUz4N;g{`qA!FH9wg9ing6y-9$w0QzV~*SBt7)#;P&bM zrbKa{k>%jK;NYzd`P3A4j-jKqY?ZT8!=|;o%rSC1K4fB(Y3ogaX8)fb9~}GUcw`)S zOn)Z$ve&np6*%@cf5+Epo<5_JT>HJxzutNncc(XAlaZ%( z`n}R1KcU?DuhM}`-H=m+fVJ^4dU4wIbj@vRXJ^{(>bHaUKfGh8BU=A@=T9%7wBFHD zD%7Nhaj$;<%!fA~x-e526>~1yV(XNsYAg{&#vqw3`oecppmCkb*Z2}i?+0cBirB4~ zwbI5$aSjeVwDq_f7^Nv$D6^(cRX^tGJ{a3@(9 zT95E*viOj^+;BOj5%C%8#+&?unoHfo){U~@7dm)MBJ+JAkbN{Kod95ErHomp2%#4!jjLCY$(yERM+USCk zlve)ZD=MQP+XT?0YrRp92KXw^!k4Uf+X{ex}0b08PkT zQlhUlT2&kAInd-mH2lDE7+O1YGY0itGhkg16|DcNE&fgGMBhXAjmvFySC=^OP=;>0 z6qgWHtcs&_f;*%Lmiwvdo@1jwIdu2q)d~viU8cv6f6UD^tUYE4It(3_ExItYRN2PRv5r@2aKHW-;TXopI8UUhobdaHH6ya@7F()~ zKMW$S7!2Nxfd?VTl>@W3ps}$L7+e6eD1Y+1HRw>FLon__nf})exmx-9I+Zkt%abB zunl^yEhB-zp^V@r$nMG~pdtmJM-xi`(JS*@VubTP5w}lTjnPXO6uGUTA=>Jwt5=*p z1dfZ9m1-2-V7fyu9TEiHQW)+43f(+VbfEQ0h(PA=?Rh0`10@+q8KWR=*nWPxS-{!$ zciIskf%v#MjHWvr%zE{Z)Fa$rSOvt~)wI(=Xf-Q6-HqNC(tkv3T%149l(~wpX7OCT z>H?HFIx!comTw^T7FS-58D%NW3z-J3Y7A2u#H>BV=|R^qDK2gbdZ|0h10w3jKPz5= ztTbe%>k}2w6-lyW{bz6wEZp2dM=d(y@B295!<~Xt?%gV56|Ped$^VBph+pglD->j! z=lua2X)&%+OrWyQegmv%fJel|#C(Alh}b&ap97Xlw@bqvRSZ|~t8>F!XduJ7MxqOk zmh?_82{eTWh}i#|8IwP{D6@MpcR21zCi(q|O9;c7lh9YE;a9Ird8oSzFu9v)d$AE` zEP1~_ZD|ze;_<{61B+~vi&4VW~P=JOOw?Qly zZsNvF(={HRb$A0@43+}4xWN1%58B;dKc=!OgP*pDo*u@gTWr>^5XS8ozzYh31Tzvm z4!(=KKwFD3#crfP5g(L-y;ik+eBNjycQWs}z+RnebYFz1rIf;W1FA=eJPfcPAK){> zBV63Rl{Q|Wm;g;PcX~0WF9LG6OigE@x!(pt;Ob+rjJ=Nb*4j@uT)_x|iN?)$`$iDr&R(o!Ek zT_(No++4~Ufgtij;4$NDNo?I@zIXv7O(%NSugZy_oXyFfW5-bCu862hx_^HUqXo{Q zPv^KKIXILi{{V9zWIx^L?P?6XOd6(~M4y68xnU-aRa8_|EpNm95le8wfh^`2$XH$` zz7jcb2j5d~eyNd1;hIcdPYyi7OnLk)6SV5lQe{^uNcW)M?_*6NV!C+(Y}O_5tAc{S z%)&hEY>|W&|5uX(yR8d@Fp=Vd_|oiM(QTP(W0=Nfmq`&?EX3qO!}Yw|}Q$R?f=OlG;i|MWyE>ED@7dzwIzA9B4$_*BgCv z@wK^qFMH<2$?WEvC;y4cCmLGZqqxHT;wNs6tY8qhMyZ>{Uv7f^+xgwXj4sJv8YJ(>P_?QJ1lN8d<24l=~Bk)Onf2px*aQQ|?P7a&I2o6G~ zkb{mjugvsxE!8BQIH1fq)W;XVobr31h-`js7`CjD`ZIn91TKF9GZ{`QjLE&<*8I}a z5@yZ>TC+YaZ{gTxRZbS>Z6!d%&0`AM>VOREYrUf~vT5VZ*Jm0lpqxJmj}$CCun;|s za9V>}ZaX-%!CXs2=y{0AXWrho2=|Jf6}+y!wL zn7D?6_PamEa0H$n_zDQgv4WUhWFoa-f$7@-2=-1esR=_cZO~nYbt~*Jejg;QA$$)G z3`hd^6BgQB=sCED32qE83aK8p&1Nyopz_0f7-A3FmM z@`-~<8BJ#@n`txSSM8!DW0&AZ0qUbK=B7@n z>bQ*Y^+pJk*b1gscgrGIscv7G9h)RFDK&NN{$m=<$nKHtvp%FJoJun9%RZr(>DYeI z|69-?s5NBx4HGUBfez~r0cKu2<{m>1@&E@vQ6eq7dSi zq$6D<(vXV@L+DaTqfqRi9?p{v-JE?3!k)NcaFc;Ma2V7Er)S>vTIR;Z|8fC5@vKfp zZ}Wpami*+OU%#9TGx0Ccdcja=WMm}F5xGGA6?(RthiP2F0oL;Txl9^c%Qs>KFg08oVKHf;I%HT%yNAgaAZJIM4q; zrU!pSi|vD_1e*$skUG>u%0P&)9nRg@Jp>CGNa;YY9OkFy-Nc2}6H-!2)6al=0ISUv z*jgg$>m!9R<>dvOJ5thCJ&Ewwp+I_vb1IFk=kYe3jiu#3v;kla0Cm7cg;YPcRR=#~ zcZdCh>j!PKF#79_Rn9Zb1w1o~Q4~?-ox;^`mfxPDxzX7Q%thDgC-42K9(@1)J#03U zj`%7PNLT`Q2XjK!^-e-)c1vCILQk&AS-5H(1Z8)J3&eTt%T1OIER$d8B&1|}Wa=#C z@4ma%;Wu?}os8jMV4>3fdQpc(`R`eKQkNCA~b-VZyIaTIQ(R@Rqk@>J!At9ZUwgX8x(zn!1Ot0*XU_Ky*nBnB!X~+z4!XOW-XurYA z+O_q^GLb7jEj!ym+-7BIDc!7E`%L`m@6H>y3*&9xkqY*JBK7)sSr_$~@ZjSyfmuOD zF~`qDh{VuSFQDw6uwfoAu0G<0UPWg6H_mLskmv5)YHA;gg8#GI=$LMdckjlVerObW zKx+A7Q$6(t?K4UmJx%A|)`ddYA-N>WPsnLRR%_dR#;d+h=E`0FL{yqtA5JdP;%xlR z@MxgBZrrZ>$^6*|wAo8;ZGC+zi}kx^e1mtX1Lm96*74PQB^ zN+Rf2Vl+`QKghH6o*Z3C2_gsSpccOK%?;`?lwq4}g22!Drq7#O)(MG-Md zu{CZ(8JsL&y*c}iZFz@{jSW`}QtFD-ELewaWW4S_k0@!f&78k4F5(!e-j+Wf$+&VL zaGCdDc*1brRjsaduc4iuDIcf!8GU2Tjw@o&3QAlN0pH?{Mjo83 z6_-nXnYr~LS5JSnfXQ!J!d&>80*c3#8a?{%mb0j}-5K!*~_oDFI!Yb3hGCZ5O&%3KJG9evP#q`UvIXTwDR~P5ZVAM7W1kKoMw;et8U&s z9!Rs6mi|ug`x@UBj%=;>S4FzrbiR!iW|ia|-bS9X4-R>kap5)KAn^X0FQR6-n0}M! zu5(+5+Rux8{G@hoNq#^bX*@C6Tb24GQJE0U?$pjV6)u092u)J-E?9-8X}=dmF!Ieo zwIeB2MP2>URLDwBM5oF8OXDX@GhbT$Jz`ulFP-infEyK+AxY8XY_q7 z$A)M$p)B>_2d+g%?W%M#3!i~On zH;8gc+GO&AEDHC*qIl?zU$KBrH|Vyc#tgX|`zK9!@cNopvn}qV%!#iJeHo5L+R32q=HWG1=oyd!V7~R zE6H`d-_{%kEt12C1?#@HwR&pd#98aj38YaX$4E=<{qm~5@`2@qklzK88g5bwhMqMSZN(rUldsz~%=m&TQbE!@766kSTqB zvI`$UHwvMWSX00Wp>_W-NUPZFwtUdIx5=b8@YTjNrD>Stm5Quw?5+vMrOml&qyJmV z^`UysW>|hC6FHh%ntLtnZLoKo=AAUHy1v9=c!Bl&Zgx~szZTc=cKT-TwaZsapZ2&l zOupF>|K;s^=03;AgHu5j;;D1CkUn$$y1>@fOzbp5AoyM!>E+d>k)p~^au&aomAf_s zh198XzKC!N2%el9bsLGlF1&TRXX&=OXqen1c=9D0jXRUoa9Q4(Y3AqRNZv&=N4m$e zPaiQ?Ytwk{Hzham$0nqRc)yP?YOZOJBX!(1VFvCs-(*J$KSqwD%!F#`F|Z>@N(W23v9ordOt;Z_L#2C$@#6KszT zd!=o`{iFo6oePyyAi25W{{5gciV&oA92?mqpZ>(NJ$jZ89P;EDWj${hX5@%z8CSfm z0>2TToVD~b&h0}AKHarAq(G0<@Ayr-Jvu76oL9UdaYe0rck)wIapfN)cXe0iEOpRg zv;K?XF5poGe(7+iJsMl{YBB~+c7p-0lB`N$bm2nOB}8@!O)+(Y)70p{I1shUC#m6z z^8Q_^V%Z2;97br_%TrrHt*=q~NXJ1um@r(+ z*qB!(ZFpr-Ttl0p5auawjR_ZCEvmV!KE_#GUOw$4YNY|}AKj{Sni$1Jjm%&W)BXi-V*Ym#e#xy2qD+C|Kdx2&;8MsGl+l2N4+0X zBZ%t~xCZG%9VzAZ5~JsB#5~fz;ow}0ZQ+w_`so#mh7E~h2Z`Uyh&K?fmHO;%85%7aQ*=TE%J_bJZ-yV<9b=1^e|^#I zNWxF>uMhZ3p+ooqUKlB!Rt)_U^Vs@76Z?K<;0vrASf6BPt3<$7Cs-c)p+ zptlo@M;fJ+U&UV7=XE7u@OM0 zdnhMhrjsa&81Z*=E-x?7wQkA?swaSkF#K8=zK7zcsL1-$b6KfjX;3<$?^Vy0x-KH} z2e!CPq6k=*LDCJ0F6g3M7~X|aBQYf<53C+R5+4vr&Vikm4YS#RV&&zv`)~_ijN;Z0&3{=rMknV;R+j@7Ys|mssoJ-a1nR`AahXA3Di%Zq7M*tCcjz8 z-l}%;B0wamsi_MXitea)Fa#yY_bCUI>_#CKDAa;k7t|(>vvnbyof`pmWBm30qKOXu`!qnA!lOBd-UiUJU-~3golU2hzAk9_!dA9D8+54x+oOg0XP9qPq&tD zrmq_Q`1dqc0D{q`ync%92L{MpE@sDGDV@`vK?#WraMmA?TYjvrX1qcT5s zE*xz2(FJ1j;ORf$0!CVGabs;E-ub3##wTvKFE9U`|NXW1GQ+?*RgE?8FviH;vsan%BS|r_TCLw>KNsyG8w0(o7h+4w z75V=BXtEt+fe4PS=*U)6S6SH(Ffw3QahNF2_~ydT#|Qfm;BHteP*PDZLc;j*_ye0#5FDODV9F=Rp(|7yC;w$~J0E zDUH3h17k9H;AX9UkZS;hd;lE?vw*c?w0!jaY+GwLa=8$=F65X6Fw~e>qm)F;&d%;m z-vH<<#^o8_D((40&Cu`(a8lQAXX{@2|Gh=`TPAKxf4=22$n|@U#^jzfy|}o0NA>te zUQ)@*dW%WwqwIm*lT6DjQ|G%W!>&9Tg;mF*zOIIQ>#ej>b@jS&N2(klA>Hwe*>|@T zWkrxJfHst+hc)`|W?npoT_JcxfFam?b-1XP+#fXmfFOhR`0rQGiZ_MpIrlBuu)=e^VJjo3O&;y5G#x9D5C>PJlPHAOCKFeclLD6@-L@n$N@`KwG5e zzBb^!ndb_FR)9W*Ksi2QNxuockN$x6!;qiw&6~3h^*@_Ub#yWkQRL)f{XGCJz-y`5 zuFKGW%6;xTYc6@){pF|ao4m)zL$$qi^u)wBvX_b0vc9h*U=L@?d3OsAPkUz6wW?)@ z?k#W({pQl?&3^Dh6fr-xIv2tP?Z>#D&a3x6o9{NgGncumSn=?t#Agx1*6W7xTB37D%ksC=k@!TM zHpz2^%uyCoYSUr;vJBiiT5i9-y6!?6B;@$n5iP0qc*`761vm!j2c15A_;BGu?~kEi zJBXZlp>5*E4a!WxPH>chQ*V-ho~0$5RJZ67RWZJ>h#F-@OZ8<)NZk2us_ zzP|YeE}jq@H{2ej&|E7CImH9t;5%;}+u96j*W&Cp&8mZ~y7BbO%PY!<2qRI(_2Fp~ z;R{a+xwZY}MTV+GLR9wy)YD$=6u^Sw@@YB6!2MHoD*!GfGv3I~$@vBuXzAE>yCM4A zl^hMb)1yCtj+q%4+~T?sdM1UA)PX%25YSv=4-4w;`Pnh_f_x$10uK3Sb!4%s<#)iZ zn%*;L{R}-8EVLcW28S$+Ju|xsC^I-iQ0QH(nQ}@WEm413Wl-}SV#Uf?-rM8El=A5_ zyf)3JxoX@hw~!AaQOLKr1h)v7M{mm{_A8QVEtTKWCVhY?peQrV#x07tc&k^7&8vt# zk}7~iVC_lB&s|HCAH$1^>1yU=#&eg0qCZ?e{^iN>{G|4;X;V|=L!iX{gq{rC^$O!A z4?yk-(E1f3;^KbqV@YRcXQ5z$2G}G9#i`}(btmeo=5-W(`nI+8x%wdJzy16S!i60; zp`oF&C=hakZK}iWTD8d%SSMA{##H2PCDY<8wy$Rzv zgM)83nu#98bm@yCfmh0%e!()2DRg!A=$i<`fkb1&Qix2B=EMreIp6-iU#gHUN%f z7?Ce1D8MvYdU~3t{Q<8%hE_LR7<4{jzHaaATqGf}qlQ-EH8|P_02l4=hmkNihu{gd z0_>l`tw*7#O<@D*m(j~NApZeBHg;dQmUpaplpS^2We&#s-4|TT_wGT(x)#{p@ zFK7;9=S%c!<+W)nS0)r9MwQi$Vz0 zKST9XPP9pQKK9ns(IxiU6#25E1GVAI?<5`Sjnddu(##|JAm@2&YNo3TkEM6FRiiZG zP@bNEPW|VffQpHq(~e|6*1RPC1ibB92n(5_P~4WPh`~4RW9Pt($Va?o&EPQY#nKx^C>pG@PfK>Y4|Z*NIcgBemU~$}ZoA4?hsk*01^X zR_Sn934w{jg_y(i9azISY$_~~pM*EK3DRw7(v;&1?q_GR#aU3de5=y0Kqu=`To}@+ zm~azFM9{Ez34}9)1;E(rv9(CthZfY@6c)nSCFrblwzx%c>QCO@;&{Yb6j2t`xhpi$ z?us#00;Hc4yZRy@D#w7&hZ4CWY4+~BdUNah&n_u3cRex$)&cLU1rBaVjV$p;;>vIL&vL|^YS=BgX za?8l~p($5&q92lC-;5>B_tJi;pHP5Av!>J6@)BaPrwfzG88@L%s4i0GqsV%^0LGw7 z)#n3Qi>TNPjb7gzzD!*+X`c$}P95xZ5-TqcZ;b>G5FQmGkEI zL8e=^mh~sTz4xMv?T^lPu;Zb>#F<@yOwEWXi<2`J$w*@!a2YRi<8GdmRIiB;4Ooyx zv!QW+QssM#SO-1Cky9fOiLU0PYy0UXVG`qLbU6dQ6X*z$OkA zET@26U`FGnDfda3@Xs49SzN#Jj1cEy`11(|le+MW%8y_24umty)La?fq`8 z>2a%3SXW&&jcq`v*M0c3P-dA?U-`@=jqUVcQ4>@%Yrfw0v@l4Z;tTlui53Lz-FzobmZ5B?F(cvob0|A1M(ReY^hC++ zmsGDlzu4D_EWHwrl6LwqL%+TW062SA{~!6(jiK`+f+CLp;t%(^vmd$w4n4(#U)hp*cYS`VE zRZ^no`g2#8|8(#DRyWcMJeyQRqW8WZuJok(v;;$u4P=n;Hr-`K0|SGst2y^x`%R6J z1MMlY|HbCehuyHO8ng`6Fgi-ec!lN4~qqY8AcF$7FZ{?r=|?1A^4`JZvQ7Zq?q z`Z`zdxDPxisT)~3p8=GD^kr0dV=WGI@BaA(aCa`Bk-9HM5gNioO$#LOrB`+;4-O7q z+dul$X-rb#$RaCsOyQh$gUvG5Ufxp??-R&R1@}Hl=T>Zu@ zVO!_^Q`eFc`Py4@v14oCPWl&6JgFO`|NZ5wimdT0==Te5D7i}gigjq*5}?DG#d9?L zF2#JP{Uu?5!ZplfSAnx6jg49zq!PMWkl3v_?ftcI+E#L2OhmAPtT__GDi-D>fd-AbceD}^=k_{gf;WO;^PA!<%WwBG>Ujrv2RgHGWzDITGQn>Vb1Ewqj({cu zBe*ky3G;nn@T>n&`_cUwXFMEAo<1?}u6<3m`8O zb=^t>2<2+D#mULZys=^CpMYR@_Zp4Tx+$kxUXX~0>B4%n;LoT7x4UZrcq}r0e&U#4 zDu@Z>=4#p$aOz0p@D9kG^UP<{$!RWMI4J=V-;a-mo*Z;^zD&AUPl1B`eW0eq+dbKkcp*;PCwa)`R_*H6dB0(+yiwbdY#QV~cSFG~DL_C^$naQ;og=szf6HhO7mn2Ex^J|9UyZuG{< zu--wBpPxUL87-kpQQX-0c(D;$`V2prU>TI@3TVQ=$;Eb<&x;RP3z5pr-=pT?+INJsUU({T{)!s82?|qu+=Gx8#f;4`0Ol|<%Jv_ zetvoRjpN`PTm1J*Lr>6P-#$BzU_9-(@ZWD8sTytZI4mp-5?m*(kJT+v$f#SLEg7z>FMF?SA(eI@eb8zz*QUK|w(f>1pWci3l42%)`OP z=FrULMal&QNm+*i;4;aN15W~4w;(^42iC!0Y)nfl z5qbt#P+=hmNUnK6$Gbb>GCm$2WRh_OFLcUa-o>GAN|<)=-2)s&?Pr2k(Sx>_)~}!6 z0jyWB-pmF}0jpP?r_Ik^u+HsC$hc93#0O00nw6@6x1o}tfivLS}C|E2+_5nhj4Lv^v_7{{plWSg(9M(-q z!IFo(Y{H?dtINvD3P>=tG&D{AySDKK7)N)2c3Qr*0kEVELx>C#dniPm2?4m}yb2Im%O2n8YyGZ5)LMO|iI zgF+wVy8oVx|ExLgUN)7T?QMUk(OkjP75e*~ot+Sj0YU;Ptw3NO0@aKL$dfRstUi{; z_GIt}MAW0>V-hf#wH5+&nU9CZ5!AII(K33Oi2~U|2qRFzl#uq zIw?MW^u92l(Qo~>3sj3?r!0ySQ$=^(%($9+W}wxy7ydpq@b8JJ{pe(Ft|CoFb~EsF z+ZzM^WdBb9&bUj|1@hsagiV?}Yyg7=3}$upeuk>|`C0I)28ORaUS;ysg1Tbj;`aCF zvJp4o69EqDNxu)AGGLel0%i`8Hrwn9J3HtY2SN>oVkMJfQ)%{GAub957JnH)6 zP>38`+uPlZxO5f6Hw3T`6g6KoHX0P`Km!1v#tc;;1qhD--(?@Fx?etl0RaJ^v}^h* zrtK?U1eQ+?Tcj3=|8E@G!DHdXOFiZ)Y|qprBQTrbg22w|2dVHfjlRX z;{v<8{p`rm%gaknF1&zKTU(oV02(n_1uvj*6$YxtPkdGu7O?Vd4m#Wc(i8^AfHlpd zU)cy$EVwiI*duFRv`Ia%>|m7wz19lT)QFAU+TDd+Yz+7e91LK1%t%gdCHl19$Qeq_ zN_|H^zee5xjz?txf}qjp{QP`KJ3tRUVnFYOmc_ru!=SPTH}*}3c_aL4P}jsjEub2o zr1S`uBm={sfSQ-r9^8=OXV>9_<-}9+;Nju|-7{vfQ8j~mTZ#!1Qh-dMsJ6AWr6r2K z8GHm;!oN6e>DY(eAT>3pmX;P&oz>X13AC2qR{Igz5bttu6c*w#UfxvHX$%;)Tf-~#`heF+E7Scd|2 zB)kLG&{Zf+Rg1%YtqnOM-?b%udJ4-f_JW0w)G%QwJkzGQfPFL5hAX~*VBi`xQT1-2Mrq*d-q#pq0@-o{ zEvd~a*=iMb zwFq;2HtsSHdS!sT96;z4Hu7OtEi5E6{R;iTe-rJ0K79YBzQyNsi3OYw_f^usAak6t z^XV3t&L%GcWI%DDRxU}26&cQqgy50J_JuiZ#Xs8OnZ_0Mv4^5$athkkn1l)v3$SET ziM^a(EYc;QMX?4cj!b;P^Kcz)3Qlvj6zePj;`iAC_v(!0%#jdw#>62)NUx_QKiQe=?;=)Db>cs9yd0r0YB0(O^(Pq7yy&{y&QknID?z zL8Uwk6I#1J_;Pc*30XwESK$0K{@rhdsqp`OzFLQRx!-TeqF`5@uC|pA>%KCLJ3Vf{ zV^la%Wx^v^gBgan?I@FJ1Lw}CDwBJ@cpwX&1f3@Smt6n31m?r%TQG8y7R( z1fPiCAt7{oc>PHH^Sh^+1b#O9RcdC z5igad=8C@~Ag_>`-FxFS1sO`aziP7F&E?yE*Ds+vNJ96<63{iFf2^MCrdE0v+T0aT z6sAB&jOz-9ZLeQ&V$)k9VfK_B?n98hbvToN%fZDiNWJ8XF;} z8<66M8f|`abFN|Y9^6eYyCpyvKjyW&ZO!k`JVPHvBw}7fa(xgwJUu;c++dJHVsZzN z+AOtOP=N{^)2T)mVa9Fj?L${6uAy(=wt5B9_egY}L5|b>E)fzgLSBw(b z{<}CcXl#OOd{p}_H8t}objx)KOpSr=I+)+=vmu8}Wm|i@k4G*Cp7dDXdL+I4>gBX9 zh~Wdj_#E0L-{^5qpR~40WtV^$=<1ZcIo06{fHfeO0wVLxmGz@9ee|;a#Fxpq3hGkX ziz!@K{@D$CnxMLZRzTs(qUIUy-zD7aax5Jq=#^PVDr)=X#w=1om2-g@4{9t5ig$ub z77YZG^TaVYJy3FKQ&0oN{D803hvApRh&SJ)wK*!G8+)T{2N(6s$&-|G( z|0SzV3xUc4PU0AOct_fxEe|iR*PoBo2E0BkCYd(k-{4H3C!S9HlrS!b^YB zt~qO47gvDXbvNl;^P1l9$z6dO$Y#o=q3ffCi$G{&e<2grxG}UiWb_^hF-ZBL>iVRsjb6ga z@H?A4eO$V<;{DQ>X_v|C-}v%r3=X3OHvuA8+!z!~i{ipaF3*US;hXB}hl}Fz1zCCzvee%?a{I-1d> ziljUI;;9zYw@!ayj*W-e1mWfa)JPZ|AAoX~p6CT@oPZ3HA>N*-J!{}S?Ti=i z|LUOnnNkDUR*G>$T6YoyOP)^fx@QZ;zgwT513BK;_5Eh@;z!$Sz>(~d=|rYJNl8iR zwtO%SAV_uzG^$|A#yXaF02W%`OXm6_`)RP3{}Uh{wc`I9ZI}sLdO!fAk8G(yqPLG8 zzAdgZDqfJts@{lQCA<>FLD0eUK2~77u#6q8K%tmv!$6E*^z31W>=063ma*TR9@njw zSH6lno<(hfk{C{MNQ98m(?L8zn5A~}jYP?! zn14jQ z80CwF9p7X*?M0vJLGlAQso$9Z^9*ykiX1afe1Xx+=8sa~iLIX3e*@z{>jMitU zdvDxG&_U7A5b(RqHZBZ_ziOLiJS&3o_K?IZoK`2kz%=C0#`h}L%uEE?X)%TY^K%pv zn8%9;YBBiEQu7qWor6|~)N9Dpn%^}49+zS=zt~mMpxZD{690ojam1k>vVRK2RSJ-W zH+}KuPMIz+u3qW~1_oXu0b^t63TXWc8nb#HG=oE1YZ%{u_&Dzy-Dp%neR{wET}=>)I| z49vz5JRYb+LT zuG$Uf@?^MwvO2YBdANenF=K3UneqjSQ;M z7=@imrSJd@*-??#?9PsdeGGyw;mh<~L58aq15cp3+z5t@YFnYL7Q12YTtBu3GP(Rv z=L&oN_(*c|AWX`b=h6pOT|=-`TwcDg`PE7E%Xh7c3ArW~3aL(xI7qlfX_Km>*wx&xLF>#(Gy`=B4CUU1>N|GV zoaPUp9OB8K76zlO9FGV_4f=MN)x0EfP?(NKmuaROwi5m_|?n-pR7JW1cHLP8$U ze%5NTubTprtC3MQn+i-TegGf`qgZaRzhT7xYQg~^99-G3D|nvIIaP~wIwlrsib_jK zVAKstEXX&sDKHBiqCJ>BOhLEQToqVsFxkWX(I0pz4|Rcy0$eW+_3WAU&^$~&ZH{%e zd-j{xwf{c7|LUiO%jT1}|MBm=tkhN(Z=i04Lyd30wiB0dg$B z4>#PzLtuIs__)h3To!p@67{Tu&|rt2k?|%#FpxkC3kV3Xvq#xX?d|PBJsu3hXBN=9@(o1SqYVqmFQNPSt(_O5Rqih zvNJN0S+ZyL_B%iK^E}_gGriAVn@+T0) z_P9I=SwAeT8%fa65(KjkHtyD5hE)2~-RYnBoXuD{f!z~0&i(lgubIez<=FKXufgD$ zBe2kO%MDxP^fPBLxPa7o7S8|>761|Bw_o4c_)k>!eB$B{!0Zj6VDz62|NZ-Vw4j@B4viePz3q%jpy+&}QJynJ z=YZl7pA5*~&^NUb_cN7Ga9Aaly;1eyi5N@ErwM#4@K~z{ved^&x)l?_v5Ba>-Z5H`AoX*apSAUN3m~7SX;H&tPE!sy`I}IHAO56b^z6W;#Cjtlx z@-}J$T%>gUya|llaT>{6jjvz-O@K`SQw9yqpoj)53BRKY>pW0P57h*Fp%rVoxiHq6 z2++HSukocwE?lVz#H93VnTQw64R;h3>*2Wy;5mp< z;?UtUPi=F%v7Z+m%&{X!q)}|$1Q!})E0BA~JJO@d%634N0*jUFm5PvQxiMhWu z^uxWWkPX2_8VBwqFJB9|2%z&Ko@>S1*V&(p!jjtC+u>J@TRPmjJ%U8};=E;$Y~lx9 zZrq%8C^!S~22p2KoSMPkg|2`=tda1f|E=%f;LufhchM4a#6f=Jr-ky;@+gJP@@t;a z_1!XpcI7AogECoxyU+&EZm+vP`b(_$?aSP=XAiJ5$L&p$d++7R>Ker~@k8h|jW(IHpawk0V@N=kz0jG}!p5WgIt z55Nv;;$PsSb$@oep`j6$mE`~^59(h)Qm+_0ZY@EfY3#M-Qq<=OmK@N3h%K=n^1PT> zudU#M7=^pl-HCP%7M3JjY23r-7HiPpvV~hCz@ofLVSfjb3ofc3WqQXgffJZk6Jpt& zVa0$derQi276xJ}ap6K9>XGc<7tft*Gvzb4u)t@02f7L{S+VAe^oOjHGdphmt24!d z1}8i+L_!eS$&^0=O~Yay@M3TZmB@a%mP$X`pIr)V>oIET3XQ*r!>X@DwbMIz^m2#= z6G8sFqo&q^^UY&aNE{K5nhDGWdcSQYy^NsVe221>6t(^2d#kvzilxcwYK4t-b;rJ` zp`?tX1Z`jT+4}E&0f3u_x3@_BLXMqPG%H!oDtjy4!S8l#uKPdjsh`@KwEFX=o1obM z49fnL5931Ub0v_sAW^|wZ0c?iswrJ=wPp+lm)0k<2>uiJocfuaXbk83@x0KEqh%)e z#RGpAYqk+M@xw4|NH{`q*jY4Qmls2Dm=#=AQu^asieR3CDTV9&A4D|b%wz=nnw%-D zlmzu?6HMQHn=X4$58X=oP5DsNfzCBnjlt>1^hlOEtZ(a3*QfOJ_9hBUz_j{%TN_4B zV1*mSjb6Td*^E?ajQS9@moF1~uk;RR8o2cXjQbm+c(G}fL!i(96{DHW)ja)7_Wd9N zRo%o|<5nwj3X0}$-;T+&n(|3**s{APS)W)psj?6A^z=j;2Q3rl0?7Cg`aR7a2qXZ5 zF@M|7&H|msC;^z#L2)x3xe{ZF^dV50U65(; z?WGLp#?F}woQS_C*!#H796R)`f5NFOa9NY@X?pJ8`${PTC{%pC$Ma^PM^>S1iql6G+5m7>H8cC=*Ayw;PZDJNaa zTW0I5ePAbMX;O$%8q-Pj^Pk>cJkIJ@o3pt+NYP7ApfvoOpxn%|xeXA{0Br;&(D>tH zHeBT`S*+*Evh6GE=;?6>$sU&7*8g&hF@ymPGTyonbubjq;7o0AZ&N;_rl!{8j>J6Y zuvmwy}_4EApcI;H*9 zwC8VrL;;R+MM_EvnJ*5m445041%CmTa&&a`fNwdU5+-XZsG#}mhC{gOk9T} z;DrL6p)RE#W@b1V-^bsu3K73)$&*!o{`DE%0pAwL@dK(_kquCOVt^g=*1E)a7QKYC zD5CSe3M(kTJ8rq4ym+UR)eoQutx=>!yy^-nFp(g_at0jz4RG>3HviTjQG~JVo0pQ7 z2Tc^{0&^0s0^H6OAQz`0_oI|Y6O&<(|Gv*Yh5Z&3?4S`ZudFmTHwV~85n?d2%%fwX z0?MfDh9Y!~$1&tUXrcArM^4^8xkO$CDS(88#4DA1IH*+t6omYZ#D`x$gRqa7bAY1T zX&`C1%Bv?iiUBoj6ZVGT3`vtA7*jKw&kZuG8uaL*(?ZQD6`!|}AO3vN4R!-lcQ?F! zX6w9$=-fIpVx**~$(R`*2(&jHc+wKHS|EPwRKtI9*s%_g>%hqmOq{m3@Mk$JQW z?~ZFHfxX_PkUg;FDV~!t7^5)nLOYWH} z&dqY@aM96_Djo*8>Pn!1a@Elg3tD4g`a`LmXnLT8pdbBt|L_E>i)deBGx14|#Xz3} z6}8!c^hb0*@1Hu37Sd4jj&iivW*5JCD4(y5Zdb|YOWdhvA4OBM?K|$vI_F3E`S5-3 zxqrlJ92)PW*-kT9pBsK2%AJD_I-zv+w^Qw@uJ>E5O+9}X7Mvnvw_kCGQ4%Cr6qjg} z$i_Rk2-$ze@D*i_!@#!J{eQ`ixV5`mX71pY&h_AY93g;}fBiHG$%iaWC+j5hAGmz5 z`H7hYtb^!rNwjohZ~{e+MDW9q_ei4ykDnVZsolvX$bKZcOxqHKTGmUhuzOl z@);#!boJdZmWm2W+kL)+)6)l^yumBsZUDgpCMcBe(>iWN)4bfxK}Wu7l5di*M?%z3G<`Urh60>!Um0< zMH%Rae%iLWh;j$rRo>nwH0<_$shs(y%cBUj{rin3&xJD39mk|F{5K zm05fz=*fqws(gGEe+(R*nM}KO3)R4=^dH5OZ~H0dEBS-(a&}n>P}=SMau255?NC`# z&J-edxp95W`~A=_I_f~Px`nf*oCZ(w(6{~>=lE$gD{HE`jOXe|dS=t?ZZx09bAN;03BJ!h_vY zt^K}=2v}BYSh5bN3-1LEa*AK&s|^=XcNXMpppIBQHnXIoYl3nMV)u>L_4C+MGwoA zw=5`AYENa}e9j~;KE!+PnTMAC&r zKneB$4^A}~vNAX-JQ2&7e;B{5#<6esK6v^wN~`K<0MB@7Gwk~koopPoR@M<}-+!EZ z!*4?V^P{i`*UH{Ose+R-b^dN2(oZw*@ooQ_m%XB{CPOAg^T%z6PVZ7ZHIYTnUh*i3ppi?&ik!v>KL)GI3k?|pB40=;=Xe)lQLlb za;lIYMMGXY!^{}(g7W>a+rU-|01TC#9e@v9|t@*z-j1`k$suU?w^@XC^ zE@Fc|NE>{ZR?wxSN{I>k!{N! zTxrdczC+B##_Z@mZ$%kiu&nCL`tbdi!4Q?o-M4kq4pKk4?Gm9#qT|T5GGo!So|c1-`ZSO&>bhwVz%EHI5T&z{FU?xGPB9=L32V`yYCSzySy0} z%*YdpZE8`Y3zXdXW<@ClRG7dD6KAfbW~+Q*h1FfL2NkdVI_gpB9l~<sK8I(r~n zxI)VJil1|G_dYm3$l#2qhZtpPz2=fYmwnu!fGShI@(gqf?F*Nd563j!l%hlhdvZ(G z?NYrOy~s@^k!_B86q{}5&-p&|)zml!Y_5N67u|(hG>6q=DCak3Yu=lb{nZ-9fSL7- zs=UfcI!ekGP}M(vd@$3!migdn0%E1~wF-?iU}eyI&7-Ac3rLhf(pUO>YF;FxLaO ze-Rkg#Psx56o|dI(e6OehlWq;_TKn5u#2$r6IC@mBcp@B2@*>SY_RuEulp5)HrY-48W*B2L#>)aXh>^ip=$9TH+s8n6h(o!)TDN#u0V# zoa%Y2f~uShfcsihoz6Sm@1Lmh0XzeZ3B;OIQjcRk51Ggg9OEWCa9|P!SgyKz@1s=t z_)$P14E-*uo(Dh;@t1FpO6;KjQ$vaQT8yce&HU~he}#>_7RJ+Zu+F(&{TTGydA?(t z7Z?0$?&D{G3hgB!0h=X<+Z{O&fR@aaD_3M>S_P&9y*UCoLF}Jg!c=GIbB%q=zJtDK zjsP_98Wmz@+zA>|*O}D-)m!z?IZ#SMHOD}8Kk`jW2ZM#hY%Qp9D1k8!{-gBvWt!$& zf+j%=^iO%9GlO3MjVaz<2Y!!QR*pya=AC#62FZUbpW5&@?sdRB@vPfHblvGI zg)~LCvKSy&*U$bl-gEq5MB#(Z8Kis%z=~QG6rm!t>Px_y09#T@Jj1eF0xOMETTw;D zxX{@ePZ?}H@TF{qFR;XWxgJ-00;d!1@H%YlCT(*1FVq9^W^B{fe^EWc@5TmfId_fk z0^qd00tA3B$rZRe*;h4I2GeEy##W21r1Ug zOsdcWMtHp*fgc3D*-z?i}u}cy8MGoBlB8HQW7TCL%>sC0ek7Q zhyBur*HnvFe61qHa+0>z*tt4F%XW;zTBc=^zmFrUNRicjE4{Zm7aDdj$7^R>Kks_F z_7fE9_Of5C?~OP?GILMGiy16BRyHY|G`pe#PG29z?b~cji$>fX53?4P1l7 z@m9c108I=%(}_ZfDo$Ha4*vtTDaV`}hE7-P?i69EI=GW=^MA16h}WbP6Q&Wgn2z*O zF)?t_btfzH1e%A4u(rO=1CA&f>$J2qP=-LHJVZw3eU=H5VE6#KqBI0m6kIPWV7142 zjX?n`!lVxvn<@3DR;xe4_F!xPbUrC$Z>Xk5`B7;N6x)^A0bT`-o$W2cZ-ylj*lb>2 zJ9Be$Xyt%dK7RZd%6V+m41yb;tzZDqyJ-F)^emWWv|0%Omgz-Tg$BO;76IKJxIcte zE%xcUt7sUJ9a5PdP2)MErQD#8vdfG&tK zCxEy+I}2nOtl;@O4()yZ=50!f&O0rINQ=yC;Is8&rlOqk9WxOda94n~z7Ti63kos* z%NoWCYE(<}4ZIYz=a8r#pa=8|0SfB*_selWAi<9x3phHo|1F#s zr5~=!h9$wYK>M8*2l6n!E=+YiK$}EDi;0X}M)c6eGz3QuEw(SgQw;5oC+xg{?)M|* zV=v(`9z)#{Hy2Rv5bzK|W0l@Q6IkBq*;3+X(G6ZGsFEP8;+Lb>*UHpi!X<`eGQrvn zBL+y)Fw;Cj{0l;P;d?WN7ft@%TI30sDIR-M%utVL1^D6s&VeRkK`p_>0oJ2Puq~hn zTj;%4i?76qMC17c_YUIjQ5bqQ*VV;bdf;~Z_H9hEz^Me#YbV%)lVkM)7N3ILWB%`X z@r97gVM^_WA(m?#Wc+t=TyezC+xH5`jRH7@!bN$(Dt+gk!Ja2<8IkKOl&yGa>>1B9_K<&%KR0a&Y;thhm}E1O8rw7Zpf zA|rw#gkUuQ^CLV@)7NTN1)BKy1cO2IbLby!0X6zC=W7Q zt&ATmX+M)akMD*jiyyxQdjtBKgS6+aWL_f(sIiv38>bDy0Ut^W$A$H3J+e5 zf!6Y1zKI{9rKMFXDFUud_*;!j)`9d7JXkS;?;I>Eu(lmKZD52;8UMLtVP_S1WW>@- zX?jLf?)N2l&($!-pZX$t!|}_+&X(QQH$O_3{^@u3zG;^_x729d;%Wk+6(@}BUv`aC zD$X1Q?ffq0B%Q44olQ&oe`?Sy;U54+&Q6!eO3HO*n472MsQxWNnvV)3dB(Yf4$|5U zVOhYX`OkM`?lZ=hQ5^f#CO`-WLRKwI0s@OTuy~pC^Yib3;@jKvG~^KGkD4jpkkUTt z+qt=|WAKN%a@R!_f~;f{(;Q5M;RykLaGkmDH9oQC%Z%m$RqFB5B!LM??>N*4_V3Th z%`GV@x#&E`6i9gJ7J8tw{1)aB7N&XO2-t5yOv0>^IG`#tHuk_p9hBu#&Bu6ovk=j+ z$PY;e-L7&ppBc&m`~v_RqlT7NlI2%gX<8tFV`@8x@-HzV0ZP(5Bn(KXu&Nn46cCwn zYz+-xVtj!5(TwoVFI)|LT-2wS^5VF`Fv$rC4L61GCV$@8q)v~&^@CRttZF0HiB$IwkGtBdjCU0W3P5U$`fbFs5~vk6kkSPPs8uOq(N z`pXAT=wBMHVJzn->+ft}6%zCTbb@{ig2;CV78;wH4pLBHEGX8DJYo&6X#B?vjbues zddGPTqhn*SFQDUA2`Wz1wir>fSp%%dz$j!+? zNh5V~hQXST#_9!}A10wiC`ILg+7eDA3u0aX0*d-Bd2Zg18?|XqJxomvWJ)V7tkue6 zZm{R=VMIs>6kMYiNdm`3dNRXd0pwoN zXP*U(i?CDsJYo`T1o2Ms5^x;Q$!();S;s}k-(tC*lBT33Cmr)X<^6lg2_@S~|I*xV z7slqJxiyaLbFk!ItG3@BaC)^J!PEX=sjJ^4x$BmJ{(=wix3Cw5-<#qw@F%{=R9z$F z;*@I9aP70m?*9&RnYBDC`h#xe72@!r6T*4J2f?&;0J#VOFCj4zrw3&#h;8=gVn8uR z;(&4nWA=;aW`L|-uKbcSg%FM5D*+cqrG$|;gvVW-o+qBMq0bW)h1vs?hjBPTn$f&P zb0MHyw9MK*?hL)=iur%VO3ErKm)w`ZQUVzq1<(uFMBqIfGRi%D`ZT6s!wpdwBwc_Q z1jZY1M6?ok(c5ciCAUXA;MfyZ9-ePEa{*%G>PV%0KP-Lf^l8_)*_QY!@L7;A57ku7 zn*}D|jG`u89*#2kT2q6;4A1%VM?3C;~|Ifdk+g zmBDfVYkDER(EM=y3+7&!n!#9OSD`dGvQa~y3AYGSMw~<}`1A|G(3uf}ue>hfha-b6 zA>d6Pr>2%lpF}P)DY{EY6=%;jnexepC3W1|+T6spPCgV@fw1`sk}e!ltBCd-lj9-X>4bPhL<&&m0S-cm4im5| zI5e0uBk0f{k^|YrR*SeaO=qM&cFbd;$X=WDT?>mDh4gn=Sw4d zcf`NcS=+9HED^%NA!Nxb<{`KfY{v=bGH5#GdMFeY|{Ql4l!o zs(ztPMtg;^87)FFP8a5M=g*x(Nr3374U+%psGPbM%6x)J6-F=*D{gNr+eKPDlTE}z zcz<#N=NGB^%@MW4R>|XsA+e{Zxd&$rw19TKpZRMXk^P@Nd2-rMvg|Mvga^}%p4mK= zy#ZhYC-)P!MH2u5a0zN^(zV`sZB3e>)@Sy6oPY|h7l{GF&+(;o98ZuO;G#f7!`_h7 zbQ4EdF!n4v`(sKdi5HMeztq&g$VQ#gj}u{Qwtovn$W59N!h(iuQz?|8pdg^~mw*Yk22bxBsQ2;C@$ZZG=iaafX4ewLWC&+4Z^vW7EbOp3p$yFcf{$K` zBTx~nr9+48TNA#ZCV=dYd4bihc(Ls&TlhHqgOaU(S_z20`i4#3u}z)1ma2$0v$GC@ zvA;|cTglA+;%7`?Vv97izPgG>PMCRAfU;45Lx*9ITgf&iN7w}MmW#o2ZqNrqb8&ir zEw5BkI&O7t_mbaEzyCmDV>l zKDQuP{57wu9$kvsdKmW*O|nB@;k1Z}z`z-nk~$j^>KlXkC!FjX*9H=D4rS-8Y7H3% z+1_D&E2IAC-JHR|OFHG1ok1g8&hCu8hv<>5UQo_Mr-k%YWd2aBzN-rdY?eI`TqAn1 z`;RYPR(>fAVnf>&M+x7xEaNS7v5`}TiLI-f$IC(jc6PaQB3h^@D!SQ6IXRAIy$v(! zYAPe|+OK$Ir@c%0TBsxI#A!Lb*Xz$qPP9#Q=kLg(bWxhjBmtQYEVydJ;6iRgX zWZ(Bf%$7tm5U7A68Nni+vENHp=)l!J04U3kE&M_5gd^kp))v_z>+vUjh)zkguDeIO4ZA(S%eyZ7wT=G2CP6fmL( z4<6t{$H9E=gD&T_jM|(jP~%+VR+g7L2?S;gLnSyl_kTSD2Z}AUIG8xWgjwsoA#HyF zVlU*q%+==S_W7!PxGn?D8-D^C29LEI?gm^q_-xFH;)gl6&Gn8E$G>?qh;$k#>Z+_PDvAq; zIbb)(V5d;L^dnFRFegQXN9&@Krjd;Jdh7|N9DFP+egp{~NOdIWM5w7zCBkw)fqgC+ zT~=zx1p1L@vbmUAL%w(5IL}kGQE(pvdeB086XR;!^aS=w!6Q-JHAZ*+_6u4n5Na^7DJ2=du4?^=< zghCGm1$_-#uBLHk?5i-vCIMV$Sk$AOv%PYq8bqA?ai|Xi6C`DB;p0w46I9G&E7PN+ zXUHm%YZl!ez=S1!pml9w6y+L>29dfWUcY9<)R3N@JMg-SN>H6Sd>9~bNnkhreA|ce z2rVrXX&~#}oqho*5V{u8SGQ9Hlw+0HQtMD^7FL*&QBd&m@RXuK1iT5*KdHZ2B_=hf zu~BT{W!C*t+4RP!_43K=$usG%2I$NS^>gCi8EuvP>=0eWxuc3-A5sCp#=PiaSE#vOJ~tA1e|+N;e-= z)C~`HykuP-sWGhe@{ES&>{;}Xyhd~E*oYf=jOhL^7t4@=osUYzW04AhDazPwpYa9? zjJ36j$=ceTS;j6k@s(5*?quRdhB}TVUILiUViOFm^MpKjvWXzNo2%h;+|3LMDq6MZxAd}S8jA9YUT#N8bnG3zw=fhpZ zrn-?95MJ-h@tKv$cV&w2KVqDD?SRStg9lxjVknPLQqJxCC|VE}d3VQQux#|Drgon= znsPsEpJlN%`-kbGy1Ay*p3ez`Zojl8rFe}Crrb&r_L8Ve5nE`ogbrtG3JpWFK5rZR&cqe{VK(cZ9FdJwG+0ouRp(Q@9q!M zmR2TP^KFi#KXf|!!SlRcYYmqv#6d=4bo~>m#OBn_B-wMc?-v`N z%c?8pJ(zf|A;;iC<*mf_eL42`#x?JJc0G5I|n`mwUbPPpVS zkb5!VF~}nG_-iyusH+^|7kFT@|2)GYrjc=Wn#NsoHsv&W%O495mksh$2MT4TXBVEB zY?7A?mHuT^R_+;3D>;4om~y&t*`GX39_z7OulF}=cV6A=Q1KY_4jf7AzR_Ro$7;xi|Au;TX4WOP>1w_v~|nuU^Yv6l5SUN>}+i;N^oFnk<~2%?|= zapP!K;Y9sVu!JY16{3iFD_=$(&oi?zRs`m7%V9& z8Wq!64(}_cIZ7-A#lK*z9Z;1_sWqQ)fr}c7tgpU@f0DlPe6e+Addy94Hj5#~`R%|4 zz2M4#<)tNwy5&OVL=zgXs^ZHkGhyemG8oopowDk>$faUs`0h=hao;0(Wngwo`k$rU%sI_(@5TPG_c^N8~I!`LTEq8<0+qMRjnhdh_%Og7{>;x6s- zhlkhV^JsXd$8u@IHHs5UzvZ^~)O#)XHJ7dZYCiZ{BH3+e^Px_0cX#gDlXsPznnGLy zUhit1S6$53+4lIc$-y8|)$F;vVDrAkd6*@P_-5!MfM-7t!LIaA(P!XT|W z7SXVoA@_Q^BEg+!vfzUo=^k>*=yKD&!rjDV+(dkKTmPKcbER&8i0CpOQQlf^)0x}t z(_Oy^H;$3YNQ1$(LtAH;Fubi>^;v|i+mc?z6&D@PP&(#hYh@*AQ}&ZXQ}nORbKKDH z!^?ioe_r$-ig!P#FYd5e)x|BmdE)@ZB@WJXebI@J$%!hlO~pHv*24vb4@Xr0CRX0{ zI(_p^bUhF4-(T!T1-BQilet-CX?Jve8j!d544T{-ouoY z@1aE@R2$`gl{|jXd0m`(R@TJeI#Jo0a-3`9q177mnO*lcq8Ie<2JY`4iEOkr6esh% zHJQuT@bzF~WU<>=Z1dC|spg@hjAiQ+v%6^|wl&K(hZ&08PSiNQw3nUjp(1)n#*|IG zD@nmBA<|8rD083EM^EZG*3HQC{A3}|#cqaD85R;J|1X}YIE-yMC-J|{zdR{% zL8rX++rrvk+-0GRu23_l8t*isB@#|cQWB&L&#F7_J@~0WxhE{2W%BJU!evo0C=$7` zzTD$LdV4rRCvrvv8(yxiD1q8v&KhOt+nM&=NZ6T_HM}M~)$L7>(m6&k+HZJ#-HN!! zE4TaFtt~gJ=_bx=r&DZ(T&sBZGv4fVe0-Geq9pOnXyM2EJ8VW)ta~M8DRc#kK3QMZ zIryOCUa9+rp9!Tpn7g=xDM8R78Bs%jrb3!$e(0>8=kcK9r5BNKLhRljK1-9G zK#0y3g*+h!^}?tbzvA15T2GmEY|ZMH_$@`nEDmnk`3Ecc-G8Z*{ITn-xgm>Bgv)a-LnZg5dE+{E24`pum4+V(ZNn62v^p2tu# zzE45O?v@dG7!jr8_CKqJ#G( zDVC-0{Vydr>My#9wk79Zk~b7>b9{NS+K!X4j)qNvlFyq&!C=mLmo&*ak|W+9nS9Lo zuOFo0N{&Cb>$2d@oGZi}+@@j2`bzG?@)=j)o?>T^tyfz0>0q%VnHR$9x~`~Rld~`C zQx1D_$3{*xXvj9o;9|}sHYbh!xDotHY^}~-bO*>Wj)1(jHYlEL`Y;o59In^ikbZcl z#&MZ7S+-lj(l#V1Nn4qWZ!pwv@u~LDa_@t)@WE#Gw-qK`9a?C&SB@(mOyQyJ+ zzNY7YFR5mnpNFA)sdPT#D)>|nDoBQ4-`QZ~@(qD|JG<(TIHox1>FIYrs>Z8%;P?S% zc!jNA$?0ER=xfG}+J9F0@AEe}_xxB3%PHyp=-pbW>W`Ie&-y5+F@?-uNi`2;Cwaby zgNcY(YWMDJ1>pp`j^CNMV>*Qh;`c^e8~W_AImBOD{?_%f5NWv z&`^*D@V!~c&X7h(--xIC#ynFr$)Jz0N~a$c`R)l9*_eqR zZB8c=PVT088CY|0b(JO4rg~p0iM-cuMjHBl3$XKkhau6JE!AybjI|5>`L= zWXB~52pA_H499YA7F2+b_!Hecf6xL7jrj*IdB zUL9E*@@!PbFL!bqEe)YJCbL%6Ct#N|j ze_*TpTue@%1biNYuCx17Ycx)LD-M-X=ilL{u-uMU~O|v~6f9U?*0$0=S z5o$-<4GOzOF{*NzMF&}zr@p~&iGCPAoQm}Ct==u`O>MB3Up+KQc_o>u%fR?x+>Y_; z?1f)vf+Jr~samj|d>&S6J>Pv}Ka=^l%bzS?ANgNWfBC@gs@#m;Nw+Z{^SC86+h}o2 zroW{{uXUA`LL`g>btYkFdzR62d%>i)X-=%Ur|CRiED;g6=`LoXU13BOL_|Hl0ir%a zg4gS>7k}`)!84trX;EM}wU17c7@KM1g+FoYrbfQ+QP?xn*_g!2H>ju48BTO`SDn&r zwh&5gN%C{XY7PksS?{}l2#GB`-YOu!%s#|VwCmFqce9iWgb!+6gX-)N&$}Qc78?fR zJ9Vs9XQ^NEnN?of5D*Y>?zqj+{^Ys=AHuIM_C4{!a#gFO!E*jWU33~#c zi@zwFx>-kySNbKymVaEi$(DdhvGD3Qq_^B28gnvAZ(qRl%E zPkpj4`>)X^zPMi2-QeG`Cnc+UGv)Zr(s|>{+~>UID76%wkca+zpNukB(och?udk!i zb^b=C;X=R2=&Q0je*+i3iT?dykmYJ)>@krs1pQorEuKf}f9X>V;~EIG)YeN4@c3p^WM zqc~n}A99UW@1(+Yva3Yp)l8R(%gM5dz14_h$v%ZKk$m}}%*iTsg;>?s$K^%wL}={L zW`fWi8@lQgf1S=Gr4uiIE5CVR?BP#H_bCCaXS16nc08P`c0Z}xAEr?9=fO&!PGQ;T zaowqT|E`O_JA<5YNaSX9NFEVvt===0u~W3!pu9mSxvtsT0|$3vCPWC-uX;oE2t_vXtJQdzhJ)qfaJS z%Ko{!Gt(*P7auQ8HU0S;ecm z(z3A7-RvpLL0$7#Tj#+&z~+7Y)pq$?Q?PSaNY*&Ywz(O|^*$rLCqf;@RGox=HrL^L zh>uoAg$7q3Nu-|G`4q>WZ3op0y=X}c$!;)6DJGj{ez+1ybd&EuL#6&X8HG^aMfv?i zd;Jez-OX-(r8-HEGE|B7aP=|SqgN}g5bcUIl`=hLQE^Pkl!=kJnO9d@V72iwr-s@E z`^ry*_&cjmPh)cb7m4#_X0sgo8(8&%^3}P966J_fRFUUwT@>nhJ# z)Y}lnxld338R@;(Ppp<Xk4-WswH z*-W2r-rV~Bdiy!$@8_B-uD|~=it!3nNhuNomvmW*+-%n;W{xygL7Dq*X4}ISbe(k* z@>*^mGUoP}+~-$n{c1*kBJlyogCjnRIe#pQwL++4_Df7BYgAfRUCZ4|4JYto5pDs0 znNI&_b=33x7F*7;H+|nydJ}Q)B0fNQ^!cOg$0Q#mndR-;$B&PcOn%S4M0%}bahr>f zfd2jS%{X<%tNX+ykNXY*Ee$-MO^cr&DWB@@i`^ouQ%;fC;vgd2RGy^Mx!pPTWc7|O zAB2){lO@Gh%SPs&2vmKaVWYHUy~x=RNE0phP?c_XRv2G(FsWAhPwzwZFZMULCs~v| zO_OgB@L=?jtGvN?lv^sH*4()zRYr5FOsAdefSb$t$oBLLOE81H8Nu;ggDC7Vu@bw5 zdVM`v8cqBamJi*dZ{MA;Px9h6mXIVNr60TGO(oMD|6mB5xTJ^b)lreq`(_`S5xLWzdf~~4;8oL%)v!kpVPo@;2ZrYsK$+tFx9?x%c1c7>Wro?L5xsc=FsuRPr@Wi>9r zE?$GO@tu+PhIEU~|2UUid!&@ft4SX}=vZO1uTX4v!u(6lW{%!$D(a0J-jy^4N;R>P zdZ~dgB_p4h?)enTJup5I_T@Uid@^@Ww8H)z?@x2YUMc1EM9SV1N#cIY(#uOsi(CbR1e}(9r`(y->5ZOYH)aSjNhzIU{xs~ReEaq1`&^V?-|tsl0A)-BYV;kQ^&sVNQP8PIpH_M7QNzsZqHc+m9;q`3oQFnfZ zS?neB)jj+eUH4@v77fB`nH`byQU<>munA^xa`Q&-V19j<{qb}mj z2HB=Qqpm%1W-Dini&xQKsD8@&{5x>=!K`~9e?4^79%vR=dUiHPB|xIgH`{TzjpcaS^R~iav*e`Cx6@xgy_lT1 z^MSbBJ0+>pf0rtJ?2bfV@XvQQ?-GF=GVEF0n(6JYPh;v*JkGqlT?w|(MVBeGc!g)! zaflWCA_cp=&)i<_K6C8Gl@p4m!lg}o^nJ)fcCj$+lBlJLetcS)NbJb|?CfJ~H+TDy z-8kz<<;&vA;z#|p^2l?F)|A4%77;`1ht?ZxxDu|@nCGtP%6%Crx5*lFZ%uf4B0^ib zLZ4lF)Pw@==AB>l4Z@h}ys2uByNIe;&<^Mm%{^x-x8hQe1Qqs|YoN~v-=h0%*Wvfl zSBO4#f+%u$)n~8A0O{a{&!Z1t-Evk2Je?!+)Jn~dUS(FIdcmjV$+Sr&&*Iv^w_n{) zk)+3~`PWn<*uDgjJK@D8jo27qNMyTnqFH40rTsoDImq-1HvJek<@lfS zY|QJFEe*X6PHoz3zOduq8Z@)ECH`!|q;$f|Yap}hy;ej1AbJbS{6o*9FKu;zY->XV z_GnA@4VzoH3Ykyg9V?!4eGuPyCp&3>5Ru~kO{Mcn(Z2Hgn!|ns|B)(9d2miCl8k82 zu{k1=v)!CFem8@!1X8^*wKHzJscOwc9C$2V>X0FsUDA+;*3+ldAG9NxPDN5GhR$

QQ+B$^z!|q+jm?ZDAh$vjSW;3wx z_;Jjxb06N%C;fAMx59OeZ2j;M4YJM;7nG#QG(P0*o!e7p_u-_s#^3(PpS5hbzes>trW~0AUJv?PtO<>S;IM{ z?kkoL>$!DJJ68j$2Qb^B!!}!5l@A4T|LU3 zcTrl^pZxny%)EB+(9P4TqNBe51dHz*{KtLvzWc=34cXwi6UOV45(9ga^tQiRNO;YM z2F!B3Ui>mY0_MQB4bcq>@jnt-r6W%UHudkUo<3@?+ez;+ax{hBraj+tb}J!$zPqhwRfK<4)ydNM_lY0# z_PF%gcB;}a;xYw8UkC8r+bEJUjBcU{{(d6_$_>P6w^h{xx*{Ef89H;e=-bIvi5CD%%hvUZp2 zXzXKmXz}cb`PKdAQ0vdst)w68&N~MioX#xD4Q%J_pY!%U8(ncEkHxNGSA|Tb znovq@rB_R;;Q_K+ybo-Ow#&szCdThLJw|KPI*c@8QV#i@Vse zsXICOB+VZST+z(P3lot^e=${GSQ)l{-!-?ty}g*@=?(ex%~QUQJ=kB1lf+;5jjTK! z`Spy=>Fd`s;j{cplvjS=@Ry4ZNHQ|4tr|;m1cW{b_Qa&X8q;24Da^9PSZ>N8+E_GG&L1?d}v*v3O%#6*s>Ks*rT?psj}5#opR`>7dsd0 z*~@t`8Lzl+knR0!VOABHWgyq)(;`|?nkA4sEtJRl<=TmKtwQUJzvBDejze-EJJ73r zw5KJ9sxm9%^LC9y+C-O-$Zhuzvxoavh(8(seS2_nc70rxO`QLqic4>ZBT_twzWz)t zuwJ&wvj6!x{(6~D>HI01j69}}`I6la)AByHcb9@fDWl$wY!oxilW(M>lRwS*%wRlb zLY_#+$VEHH5l-L^OD$kaDF)leW=u&#DWTk!b){W-k2r=Z%F4=h%e0m+TNy4-EtO)s zY1F51#;4$)y%%-W0B*x|jkP0GLCdIgRr}8TAJV)7yuCO(6(_@j#73fTh~^kH-d}pH z8n?65F!F7~j=#&fF(AlJ@Y7O7wu$ycWwY0?L$E~Q*S68GuekR=dnCO&!o|zrRqZp& zW3Z1Y=jBwXX~);V%@agPr$4p!I5d@R_m~*c?p+z!3K;piIrmVubh-U>PVtWV=>Uh? znm(s51}x%vDXLz1MWmdrOC4P(<~=Nf3#W1%?#l52zc}A7G%37y_@CX9`+awjGd-rU zHD$@zopD5V_o*YbdQ{#WeqYju@*^{&b-J=S1V3c(8DDUxc8ocG?$m*^nzHOfKYeLK z1AYW#&qbVxVKp$Y;NRNE%sXAM!dZQY)Vy5?_uSQ3B6_j7%{ z#pLQ>sl@to;#kKt14ycy(ECjUGGt--@2};tv&qW2cd5HcG4nPUQUr?V{u&b zg<#HURf%n3$$MMt`AoeFMFSBtIu|&I?)7clz7y^(Au%V}aKq+i$wFs_`9d472mhLd z>BPY6Tiv;4Gf64C21i8BzE0ZOE|vHc=hAoCYkP9WST}%r|D%(1>5Fy7g0W4*cNoOp zJ>X=S`u+Lr|2SXjkyv~{yKjI!*7-S`By)^1ITH!lZMNNVEJSro{@#=? zlY1k(zlQ$N{dDhe{Okq)V#W5&f#QRC7xn~jc^`bk9cEv5(d)$Hy07tSoMnz%s#zPq zz8_^=csd=vq#RFnilg>PCh3t!YBsUV%*iJ2*^SHorgUxJUYFQ*9nRYkv7M?LG*95& z!^ZvRwde1Bs{crr>M2xoBA_htGoGL`KldRhdGtdb%O^isqtIQs4m&XSXKbn0}G$T)XY!)m$?>UFG%h>CJ> z-vJ=rv+c8IFZoz$5YNV2sEx$C$2#>!H+(_r(39ualgl|UNm-OM!S_#QL;C6P?`$Ub zTwUw`lxwPryQ~b+!nXXUQT=(npzp9`*kguMwVUSrNbP9cMi&nQXRPs>`*FGN)OXi+ zt&-OIke>X}!z>)MAl1g!aGl6IgEgCqzmCk@TW{&NPr=$f`w~W-|8W5{|E9)|-|@NQ zt7fs6%tu+KeVFe|K=;zxGZ*ExkZ6yqF)gce#0pZx5_``(Px9?%^1ptKe$QM+r0r>` z4^%{YWH)y|HaHh9IceLwoGWGfi^G0!o<#YJ%)v2(CyJ*IS}@a)iOzX>)JHj{R*XdD z_X_;|6`--ZI{QjYV4z`q^b=*y`+w2mv^b#txnO(c-qxpr_zKNafr)|dCj1Tm{`s!5J<(O>+1@HAB+%LXLVWU( z^?B*bOdKU|D3AFC&Bo0w#PF7JUq9>jtlMKpPC|Sv`?TA}?`1V~Gs$_GE4#aZ``w-O znn+9eDbZ5WH$fo8$O{%`Y5xa-DY~huAQuF+ug>?-5yq_I{x7XladCS79#O8wSLu9$ zc-x$(KD-ye9MUL6iED4UQZ>_oYVW-xSvGD$?=xQa2-{hgo(i9*{=ol1@C)B9&gufb z{;Bl!k2TlSLr&fARJ7}kxt|CFSakG#FYJ-cozaan$?B3HiP z8yqCu9UYU&HkFAsd-lZV3poE{@UwO*y5qyaC-*itgGm1$O=ley1^0dZAste>V+iRk z0cntu4oT^*hek>ok?t-P5TtA95b4gLLn)CG5RiT^-*^4~amgA6XJqcZ=bp3oXW#!D zi1sw3Q0+IQbN@)=zCrh0f!clY&ky8=cdM;HPM*$fQ0Fjah^nlt8N1p2-sbxTLR1^Q zo{_K5;w4^Dz|(#mo4es?WmDEX?$SC>D*+5?#>rxGiDzaF@TGrcu*BJ}5@U?7r4{(iY`!U?XBs3zWTCuX!>N1RU0PiD zVRxzPlesT$R?b|m_MFS!-I;lMsUDI)Nf0|4RkxWYM)06&m8n#{vvl@KQ&2?9CxYWK zG@j4;+r|KeClE!9Ku3jFySXoy7jmOT;-geQ@#`@uRB&Ey36mHey%=l@d`1aVz5Ocxbw)NEx z9~~leX^7eP$h=W;b;g-SKc{VX$P=2kQgu@&o>padO^>(S)M$LVA|4o4bVzRBO5;_) zVNI^kvCy`T3F&FeX|%iJGnQ8-hH089Q55)u>=9DPpwkVhL9%B-A~5ToPS;_8Z*`z{ z8@e*4%#nOBB7N?(920?^y2!L8B-R_P!(qk2KQiw!Oe-`gLzc?)_E?OXHCUAru?}=8 zb6K9L6E{w1A1s_YT~9JP&k7_pP|%4fDMNLl#h%u>7{i_B{L5W#P7>AvEa8pDx<+Z9 z=Zk!2)Ob>`qJhLERITiVnnrm_N#bCTso*I(bHD!!US0npk8c!PiBa6zi)uFq|NRxu zr|u?ACr`%Pi#0_?)u1Ry9gL@ORBYX*Y{aE$F5>v_R~(~fs2ugR{H8hhB$&F<*)c*; zIS-E?D-(f2g04Ij5xYr@5aPzbLNvyPIpm@u3+nH76WRnqaQQ!L^*k0C>|>>?Nx%gQ zCVT2K2k&aRGq27H9s+JKCc^+g6b#kZk7NNZ=dR%4s=zxQC)BKszj4_+X^o4n7l^as zE-u|fiw}K%2czKr>Qj6i4h$?J@{{9NUHR~jMzNhVQlsFgm%!!H#|W>$wsQl=q$Baa#x z3sapA=8)rHWWJI{hgfTBu1xQfD>=kwO`bB75H;nj{`uKn^C_6ZiftY-zfEsv`kNCc zSr%4b|K%H(NwZYwW)0cD{BhFC?)U2Ahx>Yg3g>&2^0k$FM&ZOT)3Rv%C$XZg3ls8p zC+GIsz%jhM+j-^-Oqt7%Fk4-oZ`fV3|9bSw z^m@8wH-Gt=r858@ed)^t{lMo2Eqe6@&43EbnNpm0og= z60I2ovBY(iSf{Ow735eMfp-SIxo&3Y8a@v@`B+?Ct@jGBQrH z2xgsW#1fy8e)?$5Qol^FK2LoOBZt-})}}k0m`mL|!+KO})RO5pH{S7or;U^^Qwu#Utq@vCoOzk96*(41#hYpxd#YS>T4aI3-a|%I zl!g3LQRXdz?3El+t*#KU9jm~i@TZ0eE1ZUiVw*#D3|42NXwUAY#AJjaow##sqH?$< zRli9IHX}BFz2-3c&Tnn&cPlGfY1TC&axc^7IK;RKIA7UdVqtp7;NcSn=;Nr%449!z zJV8~IDP@6A6Y?6pq({_#v7!*$>1GBVbDyb+qG+KO;&4`pN1`SVw-3UL%(Ab}UiGNx?vybwTVSXo}qP7*WZ4aRJsHSXTB0xD>rExGT( z;s$mAFxj`zXcpqtqf$g)dSAVn}#DKA+-82C2_S;an<4$`eUJkDKh&-p-HIU z3407F`NL3AvH6Y2DA}JXCovHky-GT*VCF2hsD~XY?MEd2Cm163nb?D)UH_UyMz#+d zM@~Lv*EeJ~b|E+u#qpyE91ods5I(wx-rd`aZTT^kK7>c8{B?a9dbph9_w+n%8l@q| z*jh$8xY!(34!9x-V;Aet6mg4V!IyYa{uc3aM4;L&MB#T`-F580=E?{~RO*ZPPH%?b z1QIR@J1rTTW)YZcd-w|F5HvWPguZ|0LrNBE#rEcP6ul+y#cYZKts5$aX~@Sop+w}E zoz6!0qufP)n+i}WY(~bZ_9a%HYwhX02!{Fxu2ywgY=HTOrcIY| zGeUbmYEA?_rSGshYX7Rj#I=>Bl>8+|HK^m#CzWl+sl$_rl=$n-`Q7Z7aihQED`gMY zO*KNrIxCaSr>n)$i`v5s1SHj+e*!E&!YV;~4^?C8kHWZw$G=}mB)xdaqW`oTNgWO+7}R{Pq{*XRYOI~AZIc^|La8(81$4L&j#fr--P0o$nMV##rN4eSEU zuVRJ|8xHn?ON;aHb}HeW=Q4ED%>m}C892ULju1{aFhDl>CkIZdK3@$-c+tIRcCyf7%Hm*Y!J_p)?kwV} zdv*P~Y3`z6$R*vxTWQ~QU~xX&JhR;uSjE4Jas=M%aF8^;ApK~x?cM_TguNQqUmJz` z8TZdc@*1BMGa%)Hw<(?&kL~b;cA+}psJ%~+b+35jr_5PytKCM;Ju!KsyYs4ao6qUz zcnXw_tS+C1MW3?m^rvfupvNs$gjY-@ce|n}Puy7?V zWTRxMDY>DkTAbo#XyjOUeM{IBtq+BCVMr|jL&>zpXB|Y26Q9H)nxiD<@-0whBKya) zL;~+A>tk#CEOLj^=jWa$5v_Oqi2R-)OytC<&r@yE`ma_}lJDxr`%|y|m@Ae;EStpB z4`zxrTL#=6$*0;VPp~_;P9J=xiZh_#u>}f7MbpRHVnC zjQVx&|E{eii+zO6%NVb>harcR*5{ghuJMi>lJtyBx|o}R>YfT)26wt%+WhWm82Z!p`yC^#Jr%M@N<>xH-V;d;X*N^r`t39Yn!wS43CX8@ zKOfdS&lsMrkSg?f;<=_?j+C4FT4y7s&L8}9v_e>SXnZ&B-yh!{F6pxG?`M}geyxaS zlvZ|tRS0%P43& zlTXAlQmVh)-+JD(=GviHjHM};ug5dBu$j>H=hF%QmyXl&g1dm3>vPs(tx@ZIdgmgT z9VhvNAGrN9Nh+y8@CRjF*hr44SN{8F&JZCr9%)dU(3#YT00fV&JZj*B0VeVJGLH)o z6r;Z3i3*_V76RL|%dH+nslL4|Fx4rSUqgF?NKDt~F3i9$gkLN;?S~f09t<$m47R3k# zCgjzzdJxpPD67c&O8jkQqoHFl5kIqUx5?G<5}5~p&yZrG*~D*>1>;$O6v%lKf9H=H ze*)J#qC64#Y4KtQHL@Q{+jbrGjwOFiqB!h~=X7cQz1utv3S8_ny`O4&*qpz-6>9Rl zXjI?ohZ6}oKw4^M1e=t!vyHVqo?Hgq9Qq{pY zyo`B1S8dp3ZmF$ZCB3U_o%-8Smo0t!qqGG44+;d8-3*PAA@KVoUTws2aZLYCW5hA< zpl40u*qP%K9E27aNKcFqTdFUN9asAOt9nu0wwL~QgAQhYHdcd|#lg6!h{Yp23>%ZH zjr{MYim0y9Yed)A*fUiE#C6ht`0@w=Asy(pzu`U)v>iE7d4#X)6Isr%R0s~HNR}E( zpTfHrKfX!cvyKkj%vsgT7+5x#TQbn|rL)fvFJk^kF>JJ(3hU~u|L^6MLg$|rsR517 z=${gcrxJ7491H4KSH^+zuyOkYb1&nxr2Z5^!FTY)p?ahWlJ*u2l~-uX$xCy|7Shwf z-(Ys%K3b&n^lHJH++^Xc4&O$zCM5n0ps>}kK~)S4N=o2db8inX-|^gsPQds=GGwf- zzld}Hq43&Tt{Ik(`kq1K|2Ip4W++?}&fyq`LHvZRq~do*nz9@;W+p%Dc7s~dsKr%R zHLbKv(_AN5FqoJ(C-X+9$g$pjkD^w9V4Z2H&8_oSm~ zSlvLCv&Z-~SzfXknV8UYlL(~Hjr<7?Jb7~fM%Bxvqi%p=_1Mat6OK-~E}nM%1j06;G`YB3tkQzJw2cFl3SfJK8q~*NPza~|jz(%+GZ*a-iquC$vVn<-M9}>$7I!;OR;zhsAgBV4 z!5Ix0Kb?o;;1D;lZ%Ym1;^yu;gfDZoY!MXC0*iehaLi>?{RC`|+l2v%ix27npYCjJ zsxyd0n9i3y-b?LByL=}uz6qfagQ`)N?~ zak=STbn0oMEUrn78M7n7VLS@1Ff30EC6j;m5SNC}qBmrfGy!O}$~m1YUSSt1vUOpT z>C<4dT;@zs^>c_L(L2K!F>JnR^k1i!50-dw^jP4iJk)uvf6e}p^&2VeTj5t7TH0-z zY3NK8svtX%VK9i%!BD?P!f>FJ7=*<|8jQo@(ScuSIOLD#CBNKMJ4PV{pN|>n8S6}( zn@hlxt(Do*0w8G(>r8Nx8f+%EaA+!NmWAv`JHGs9JYg@A6BsGPJRfQYg8t>{nIg=R zds6D~_JSnN4)*JJ0=5qL)vgx?o(lil_!<)CNyi=5t=+>1v2%_>Rl+EDrrO3FBY%qI zRAEU;3C&UV*Ca#fw}aBfnqxUQjPnnLA|iOu2^E(XFf*T<1bq?9sf7L5(PXC4waWD~$d6az)J zpzB#6`A$_4U2zfk(L78=p7M=@O^SdZw-=G}YgB8VsKwt-UURWQk`I+5_ojY#OTso- z0>q~xmR)CI57%3^To!acS+qjiVUwDrjTekqLOfG{yY8>PNM6-)k%$#XMlB;RiR>$?$3oyMKhU_Hzwzh(h9>of_O{aT zA>}7Dd?9-wvsvE>H-;E*uX_f;gXgIDGJN_Oi;O;Y9GnfRHDp_Rvt@j^o9|!csJ?KYI|kNA)7^K#*sJ^e zUbLCxUzL53V44-C^NY^A!~Zhil4n^HK;?yDbf}R{qHSW*IddA<-`{U#sZbmP>^47N zUs3_H91!jQ1F#-Q(aatQ3xU^xk{*=PU=c3u(4X96&Hx;c%jNC-8=qC9|2lMAuR4Hc zE#1h4bJHFy>>_( z%o<$x6v06#9&mLZKWl@}W011H}vzqWcv`Y6{+j#j|Ln0TkV!YH<33DOTWrWJ-Jg z=RNQic}=|}U3DL+TqS`?JX}pMQ3f3^7@vZW4k~ct|No1ymJw$z`3;>ED}qCe9P;Gp zCk9-qI&WP|{ZYcJ{Nu1p&J^o!2jrXNInQQwte=WDU)0QCPyU+?$;sP|uc+M)ym`0xQ17$q_FBjGCU8Pz=BQ(t zR6jZtP5Nso>VfoUd`oq?sfEwq^grKAypp^6#EY)kJF%ug=zj$}skhLu!cZh+MW%O7 zY8^>8vfar&dl!^zZM>S9-0n`&Y$+D#o%QGJA2p5t+C?@FQF1cPWym_NJ8KS8G9 zF)AalS>+H%kIJGxx{Q<_FCrfEw)jbkM1jJ6UaQ0^b}m;v7UCUbrNm)W>uu{dFT77N zF&h#4z)FLoTWYrPBG%Yg$ZbePYUGaTI=jgE9`vHU z=o5;pS?&$rmQ3MRDAxNk9ky*hjV|&PVI8(^2(#Ocr zMV1?p1T0rPS0M^k*unTPdP|jN-hT4UV8akjj^6*~De9IjfW7cbuDddOHQZo(KHB~| zg4DZW&Gr2xCeD&*s@5Y)=d;yN2+9+XBC=Gl^((if z2HGv0!6u~7Tms6n+F7fZBmZTtuNH~q!rD>m0%#a;zvmq>TD{@#rvd5d zU|oirvxH9?=J?rd{;8~}5CcZQCW;hZoBLmj@H@~iXv}A77w7p*zR8Mz)p_ijfAV5} zb{0JI2{8RCF%qO(^Y*Lo{(Zi(`CNf4GM1Nq+7MW9%O}LnRIIJ7xiV%9b24#9>Uwwt z#+>v_1HFLov#Mn$8|)jGwfZW`_MyU^bL6d6<*CC~d`u^@P;;(5r{!@gzN>$OdEG7w z3gG=o03?Gxh=#osPokFf^}Sm4c^K<0>%9K$_@ea@{mJJrTiM}v2>vG@yyo8+naUjz zU{k$y1z;tlN3d4Gi>5>FzPDoDH;2w*(G$Rpd1{A`S{h=mEt-sqiE6gOOCcFCnq`mMg@sdwws_<$JXkm)GZRzCNLf z3dT-Fq>t!8dO1z4i-t1g?8=6{9PP#%5d(Sd%J$~pDxa@4J1j<9j3BgH<<*Uy}~R+H2FCp4me z3c*Jk=Xag$d~`G>zK3ktbk^T%;)46lA=~@AiyVw`>r#!Hz54t5cbly);>yp4U){ew zukncVh`}FP@?!t5>wd{tHd;hCg@ePK?buV<^QCb(c^EXX+N z)A5Xb!gTnU)RDY&9Q3p9OE<^sUYj@|{6qwsKNiiMwS|93=_4Rt0Xee z+bjpfaj9Ipe<)BHtbM-8_dmeDY}Y7<<=nY1PIpP3ZyEh}%l^ZcFIDQR%~i5G$veMM z8d;=|t2F+xL5G5zvWa{p+y81U!AF+_-M|Dg?cN^x2^_|`%+ZSW4F{8#>}+gN1`2t8 z$_Xgf5r!X=Ge^YS*E8+Y((>7($msLjrd|%@UQ^0_ESB#NX-;m?yQDEA#(YUce|k8~ zaCbs?igwIK1v=-;eo6qC$4dpd0EM`7$KONZwng|V==S9;UrZ^`Qj5438LS08+=2RG zb7!AJzoxt48_Xg{a2vF$!$0&Q2ZPE%$n+z)7NiViTK6vizA0@>eom;znsEbE^|n1q z7(n=T*v0;BgiWCy0f4IW$w>aLIQWvYlL%FsNHIF<@uCs)0y)_9{2NMCFwqB_ z0dWaNr2c0PT#S~o&v3y7NCe#hC85T`H|9i&C?wepY~kR<5@mVhGo;u?y|8IB15_21 zNEi_erB4dY?uJA$7_Sh?j8n>)AiYAnT!>L};@8~pYh|n-u-?>ZH*PfwMHG4-^k4JC zvwk}yz+aPS5E>EZ;>>7IauNbyvSU>z9Q|bkh+nHk`vpRpH~UojVpszi(Rf%t4ZEXmsi<0+3I0_C%Q6m z(@a3Yts$=$Kr6G7zKP$TcTsL8^WfZm_1d^r{XzP*G!jg)xS6vE9S1@sgM%$?uUYGp zu&E|)`EPNTg!s99aMJAHly#@hreVcXm_yMEA$>D`n@cyK2T!N9N(d zOq<@Mmnof;YpJ2huq9f{b*`${K34Y0Afr306 z&LZfBj`{6rC>|q5^RM6K<851f`7DJ}pQxXFA|w-Q?~%@b>(n;7v0a3x9mR>fQ;R7=2au- zj3>|q%?S6ri5cN)bJ{&Z2k7s(Qt4&4T<6CBa`2=eEZp4d>5@xQtmhywFnc>WIhmx= zRdv4)Tso%dNf$Bc>%9T40gTFCS9bzhc#hw`WMn@V!`uf&oF;u^>%tnY%}ZBE=Zx%J}3cSO~b|S71bl zO=*YfY3NufxVv)J4jK!`X1MZ}Ea6do79czE6r`T%%8!`eBzi5Si? zZB(hVg~5=bX_rxSji_*FE)oU*3%<$h23!==X51$v%IiN?8mR#H>n$T5T}YqSp0Sd> z%uY|wr{sy<(T>xnL4&gh(G@o*HG<8l8X?TJcYG7*oNG9gR>y@;3;p26a}X^e){#PQ1Y8#ydunl!$$ zkqP-|5l+%8Jd+H;7slf=D+;EYiEco!APFMN!cb9h7-fg>@sY@Q%))~uJN^s;{OR!$ z@?$DSPUZp0lnwGGSVBN|xc2kaqckYJm89tzffHd`TAC)poP*>s84k|$u;dt*@JSVJ z$!=$7r{Cz1W*~7;^WCsr!I|RzEwC^h{%ZLA`e^6bj^RGI<}Xm~ieD@+(98P}scL_q z=k|_?vdV-^g0neq`OV{5s-VqM3;?-DH=TH)9yF*(yJA150RHicdh{D!p$xQt0l37N zR5H!(^BpVWMkjBJ8=Wa3*wWzfuU>YQfY|hRR$D#m3Xsj5b~=wyg7LE`zLov(_Grm9 zXZzfA;k-`>LNI_kk*+w^{`x|U8w#>LLiIV&Atm;^MtR){Pum!%F(8}$tC#O3jqjcH zoQ??&noKYG$N$9{fQ`Z5+mDKMyZ6I`7?|oKZGhfo`PZ*Y&2BT#3rs-q{pW}6`OzvC zgh~A#mNVuu@%jl>jTfr=|B8f*@2+fd)7+q@l#|uqMTVit6+%M{R*=f*pg?YojvYdj zn2hbwjfPja1EJD@zG9DWmO?vWyrdQ)Ye@p=Yj ztQF;gv0K<7Fm2O%-@`{Ka+QOwE;Dz!a!EyK=J(7C*=LQMWO!EfozDE`baZa$a2T!% z+xE@J*U9z-gTg$^b!6d?gYwN#DaCLpiBmVO@F zzP+FwR4F^g!A%-!jRBF8QiGt%5I#|ZDJ9J+)7@;l$$}|10mweV>8-4&Hcp1`f7LLI zM}d~9w6+KOPk4nuIBEXT$xbU9%0Q&J(pH-N`aCuZR+_!PcjhU7eU3;cjsA^oQJ9qJ zx}w2OBwrbWsu+v18R`iurpNpa`@@gXW{iSl3$>G&p+X8qL0*VgE>0vv+0l=mj`n0p z#IiwxxC`_$)Kf~n(Tc1{%Df%=Xg_qyB^|S45eCGyRJxTLzssxF5U;REoc_bvh?jv* zS@?VH>w|#K$6C-XoH_rkg^{t9v$M13rK7vQ;Wt3`0iS7uHN-b32M3{qX&<|#DW(5R zm`n@|$OVH7K%eb}fV;MU^O8M{$s&=eB*7R8%UP5eDm-b_yQz;$Q$dK7!v zi1&dt)WFEd5&1a|)({zh=`TONRzS0c16F*_82A}{St6i&+$C%vY3*YjWa z`$6P}z)rsZ*VlT1DM+RPCjdm|(@)+7q9<1YEP@w=Ul+fUd|*QH1`u9gk`$Cky*eWW z@QBUjmLI?iccprfqmX0A-}ZZ1YAqslz$dbQR?*>>6i+}P*}<5-5H{dD4oBsY&R!f8 zdmjZ&N<@~zSxIZpn+S#zc{12x1r{c5?r2n1RX$jyWOpr^=u)~ck{D9o@!=cd+RLD3 zSgF`YU{B7WDyJ}-#R?Ecvh(A*Mi?W>qTxbF4I@5olqTZyAnQP79Hq?UHI;&;LiuGS z=pbgxzZE$f>7Ru|iII?)&ANv#4&MO^@56N{RtC2T-$U4Q$$W{s8Q(K2F2bfPJe5ec z7B|bAw-#5oR}*U!xJmd(4#+jSQ8FH+PI=lwA$@O4Wn_?_VUW3UlF=bE;1nhC zCQo~bvs&Y!#=3nI3C1pd>~Hm9*#vcZ?u};6WQ>`ntu@s4viIiFk_stqAn`=tHtBNJ zw8Zh|Sm6ptCzik}!!)2o=_wj3aPr+=6o@^tAQ}>#>{frMO0@JKtlX1h{Enw#de4!o zi?5>vF>yu0jPF4Z5%CbXTNv4HbAH8wCKW}Y=rDbfNjS)(6M|rDhT)3+rKHe`A?3;y3A}CxAZ62Lo<{pO zEn#2b@bHkllv+I2EEG=I=M}OHiYOR%BRKOj=;&A{q+vUlHR2yV)$s9oJl=onMwgY3 z#3aIK2)7`!8^@59kx@X#BI1E*4QlXnHp-p(5^TtJ4-x!J!IECzUXL6b>%s?tp-w-G z4W-ZgLZItwK6du@d+|#$^@~oRaiS`UM-S{Roy0h1vt>BT(hA@U2~lNDjPut~hHiMg zbx)j`^FbE2-R;h@5Do#zLjHvtM+HJ6aFQT|hBzpK>b29o*x0Kw? zr9=Wm!E><*CuB&;>a$1W=-7Xw{sN){gSyToUsBzv23Ropvs5@p0P>3e&^uC5FqREp z{1IA0M`u)GJ!pzevVCmh9T3_dx7$UzCF^?n@2C(78tfK91rbISLS@rfIn^3|?Hz*sZQBZI5i^!%>ODjR3V9*} z!-x%Tkbl8|$q!)-PJ>Y5W&9W3Vmh7NxbZ&TH98e1Is_et`Y{+^`g499gU~$6clzO|Y2)$MP(WD|thGpY>R{6-hr*7E)sGNl|I^jR^5v2pyZ zu-9IDu=vr_;eR}VHWA{E)c8ks$B~9-9!n-`s#OR5HCgaRw?;A^6dOVeahNp3DqS?z zGtuGjgcKFJoH1)NbzR=SIPSRdcF>N)>%sI|a!bW-cov|C)-nm#K+FD!oVo=a3P!Ov zL+0#PM(uNYt&oje>8?oUI%K&r92%hr?Zp?NvqP1`=+$g1-4wENnl@LI5uDYXCng~c z$E)d?_Ih`ezkGbZSW{nz&C%d4BDsT^Aj?JCT%}4S;ND;R4X%wWjui$|B zY6w#desYlp207m!1E^G?pgS%VF>gOLq`NLnk4^nEDf37jB=eJuaIpV~)8R7ne(5w`GF3x4+FYkt)W(&bL%m=ZzQ2o^WAkgP+=u{eo`l>!NM2OZdw1 zoHLbhp~D*txk1EL&KMKH2PdJJsFDXOAUTcPd0&r5 zv029w6A?X<2y7ZE*JQvp+2*xxb8RIy!M|0Z#YU5$@E;xo`-{DIM;c)t-nYm*fz)!} zksaZj?@)+2S^uURe*p|S`cF$)mK{XeZ7$iwV$hA^*EqMq;qcN@MP>VOF9DqTeI9JE z2Xl~E#*)8o0*@3G_0D=z&e?(uEwHPwup8=BoVw?Et>CTDbu*n5^%h8r+Ao&&*DvJ^ zyZ62nkdQZg_=QMnBIi8e0(m2^j#_LFfZ{GVYJl43z-@rqA7C#)n5h4a!UAX|0)1cl zuK_@jUv(QWg$JF1U%mg6lAM9@pTI;7M8rQp1kS_i!Q-^?#>G2Tfd&FvR}Rc`e?^Yucx7w2~uOZx;S0e=n63=3D;O*P*Z1wwFg zxKE`7&1mq<)R2yUfoo?e8IOw_sa}=Q9n^NSWf^qRb2c#+_z;%)J#{v{PkU%K z^y=*LXb}a~HtrZr$#GgLI|_;v1(0GWG6Gr}$V7IxTiA*j{T53T&ZS@d&RT+0?p&u& zbw{4h&%rh^ma&kP2%hPI&vDgS4L&i(x73nqwIAu0bN}ozq96hx)*i^8H29^~O05Ni z7*~FJ|F<%EOaFJsi?igrR5D>xAhBV-QGs$=YWmjieYy-wctmbXd~qYLjCC8_MNAPj zeBocIBr0RE&igw01$RNEzjG?&PmmJ}RZ;B{m3Dtqmb9NC9y-o<4q80^_BUb<#`(Wv z@d&AF{xw;4cP;4Xgv?lH3`54Tl0cfeZQA8fkdjrIPG7|5?I+`7rCe0rz)r*u)>1~{ z14&)i*FWy}f}nNUN>6j9PxkQXnVD+al4kt{m{J44x25qiItaP|p@(W7Na_2n zEym-C){&bGpC0GvVDy~{V!!}g7^h>q)ny=_IzjYzDQKC~cuq=J?NjuR!4JXE{u0o& zW855?$b{T~M37Q67UKQvdbsHVZ89_T*id|sEO!X7k)t4b4-nbq+Xh6AL8;FR$VUJ> z>IES6(=XAb&6+jO)NrPR01q2<5!AEzK;yCzocyk-AchHuc5dgY-+BG}@g8vVv(n;z z&(v|8z4;I&-(aV|3dSfSb=YI`EAh_1dLGp|z@^E$5tj)gA21ip1W=vyL;L6TvT#?<8Qe+`jq%h#*`tT>#1pNT?#nNPW}X- zM5{mz2zO>LR+(4j$|$d@dI#Pp8#_Bdsn1PLCaQfA*#-SKVq!0lasU|kwz+QoK!XsA zIsIoG6k{OH>3aR)ZXFP~D?vO)XXjdxxna7lv2pu^#LYq1!@X@mpm%=!?GcaF;Dism zXdN)hzo+q2ERg8nb`yAoYN9Lkg|QPXXUXGYK?Albv5m3^g<617TG;i&YXL+!JQMXGWoX3cgNE9LJVZq7#} zn2DkD(vkV3-h5^RV%Ne&|N{8=3`P%~x?KMJgySVn+O4;$R826$^(7<7n&sV#k-m)*q zp@?2#OWd=tNq8lvP@t5om%|ttR#;(8O(HM)0*jS%1?^IgIx?Ota;0O3K$Lvsrz^i z#@-@U-els4AX_?({9qjSEjQhfB(5-Os`4880q(`^SSguHIu>hSg)RR)cW`jX2Ag9q zP^=l=2IkOYAj+$ndO%CrebG82Ya6VEde(C|r@$LqpigtL9xD+Y?&;|vDad>bb582J z2kqard=Z7x%F4T+G2EYMbMzMF27YH+#@iPsmwL%Lu|kaf_KGX3S5jHmo?Qk1uLF!H z-?~t3+BH0^T-o@e>*vY8?nSI;>~?oPpA#eh zof(rZwuAs56m%n zD)*SjXqylGD`SU~?|}BI=MCS37}#3AV6XO`VnYhiV5~iOtlt-=TeGbVGkQ$6`!s$c zA=?FGOW_uBJt=?weQB)bqqOc9x~vPCD1&EJ+wLC{vFJ!cnea$lOElS0&8(KJVGen` zL`5>Qkp&FSCUytsLNJKj=d#!;q@>P%k?ro+b~jL(BxdU`nYLIEdURMJ>td^FZ*cm% zTF;rK^Y_#s01%mR!%dHWbL^#z)O?a0HhCdL)kJeB-{131U1v5{iA*1vpi|WooWc+~ zWPFsZZZZf1qYS!SJQ^GlLecH{?)kVf3N+F#L`~A`cy&J`0P(Szkud_Uo2Oj0n0hd? z9}MOpKYe!J%$@`XrtBjSvk1qHc<1dNS1g=`w|8F~ZLpCN6YaMs=q3w)rn=MrpB4ba z_*)ht))Uh6_raCtTY^!uIirkfsVLlBz95b)9=nVxY2f*tIJ8=H!V7I=baz};gmkH6hY5u(X(iP=~d0n@CKG(G2K z31CivpZ^Ad;Y-&Wxo0Ho+MxNda{Ug;EJ6IjE-(TL)F@q_>!_FEAZn>g+~_eJg`)uk z-JGBPDxnq(y#8zNme+M#JmtFSYXVY&n{1Ks!HOs0)YH*%BK~V{#cFdEip90?F)8O0 z>%+3>9VP@M{=_0i%Nc@P7f8aK-mUMLW;*}am3+8TQ!gWyqlJsT3xLs&)Byrb0SMxq zLm((RjD&LDG}t}TuC4W7u3t*t{nh^5g952M2PxUHhtDw`oJ0%FA#tkz07(**ADFxC zB_;j8=v6&liRKB7<{jUF(gfrCAE4c3{G`ue!}sj7S?`XAP;MSxPxFWGJOVF3mf`hF z(l-rYG#RBQDCAntP=60XIF4HPxr%3jQYL3)JNC)qFIv#>=j!-74WjBm76US5cjYp0 z(^@EC8ywS5g*Sp9cg5CypfBSRa@hwtRWGN~4flccs?|kTckb8J=l|Lc;X4zhqi*18 z18R-PJ$`L~MRd(OrbSl&AmLH8bfS+TjRHeYve zvS-!$^*sizpHjC8g1*X^e81sXa@uOc#@}u0=bTL|E4NB7@lg3~0>td6Z&YRLx}I&h z--lxmGOJ_?gLpEqSPr^Tp33!tamh#7g#WC|)g`i?gDmRdVf)I88G7S+>gO&_CKN!d z?t6eKz%P&DAjKuj%&~8La@5XEA@t8};)DuGuBfpZR?brn`%pE(IZM?T5x)`3t}(4g zAdIwNj08~{59TEz6GRbmz{Q7pVm#v*AjXnqNKYs5$pVLn!(su$aQd&?hrchQ42a`_v*NMFvMz(I9TT6qW`qG9b( zwQ1OcT&SUpW~fghE3eG=DiK?x36(ULS5Dw-_w}`3Sy~@CatJ~S8Y^hyLx3v%Z;dcP z&F?ZV8h3jryH#IF^Y#9wy6DM2(*n1R;BUp&#e>d+&%SFEPaE`;;kz-KhyE zuqNlDh3-rpW&OctJ6c2OPW%1UR9A2%o}Iq8Y z2KE0wv0fY%zu!6%iYM2uXuKAI$n%bsU@H~XK@Iyjs4|cwIxETHXE8Slh{{u=SBqc# zL)kn!Ybct0LQrPdbxEi2#$%W@3|G9t=rZt|g=k8ebmrKWry`o00@_Db;iY8%);Y>0 z+~Gg_LS7J0}HWelYL0E{g^ZeiVWV7C}F@16Y{sm{>0G-JhG+dKc`B* z#RDOneP<}*8bBolM(%Dd*ENwWwIlYx>0X;=Gw_zc{0oJ9w#Dhzkj=-6iVD@Q&$G4I ztvvrX3f$*uH=<7x{t-@?A9!K#IRHDEl3EZvT%ULP1^e;SyF58TfEyKfL z3l36*#+k~+93!Gn#n^4F9l@DMRYrFa|(kAsm;9QT?=h&dTxsCHhpV10K5C_lq}D?S4d zhj{bO%d4?$pZ8Eo8V^Jtz4vg5(t`lU(80V$;?7|v9~E71spjWYE%~R8xMd>vQ(1xs zR`f|cEd`5C-T(~9D5kz3 zh$ABB-&s4}Zb5+CNr`&FH{pjv5nI4-`>{;l2eQ-OUaneZKZq==0WtTa80r5OJb{4C zy`Z%olo$dbkeNTq>ml}`; zFU8J@qDnP|&Qlwq>g|-NfZQidmuSx1b-E!GDNBGXPe0yY3D+>jr&cm^(~y!u&rkPH zrT9R_Ck4S&MNul3sx*ETo)!82nD(-7y3g{cV}5V?LkN>uzYG`0UleDN&<13jl^EU1 z`(zLN@XneFjiHO}!_3XZWGLBIKnP3oMl1slDr0xCX@H4QTT7sMPp_i8RUCN=HjS5SrLR<@dT1AtyNk_}h&hmp;Kri$UrVQ<5JsqT`{rY#d*Z%*b=`En5TEG9{AtaO* zX{9@)yA)6wiL25r(ka~_g3{e6rKog?q;w;STf`9MR`Du z=aG)vuUj7^GhfgMSG(s+R=dxAPBL`(BQqdbKkx{X?cVL4L1`nGNLifTH{_kqYBP<( zmq-o{UyQpULR$&2X_6Eq4n{OHO?$L{a<7h#eO4p$JUqHNP*&);{fr3x-EWK1)}$=I zor?f#ZC_D&cUMt{ob{R1$lPrKYa!ZtLd0Szuun{=%n$;jqXjNyB_E^tMn1NUrzCF^ zPG5HtgczrVF%~yopM&jam0vIN;DHR+wxqFQ!y5=Nttzt}$dssF8DYu>hFZ;${w^GZ zOhy*$%E{cHj&Q*xa9}laWfkrs5n$i+@~&ONX*R3MrNMg79EkWog%G??4xhBPgs7=81sI+&gH`Xbnciwu1ofC6ON~$^KS4ggqUgG zo<^|0{oksDN}0k`Uw>AJI#;COljuL)vhB6e7%3Ck%oRv-JcrQ)xW2NN&=RZgRT9tv z76~GsDqD;4Np2iy>{mT93li-HzvCh@*Wf%?+HJg%q)Qdx?13E#1dCFP7V1_*isb-E z{Fggp1Ox=q4(m2d>kew7YY&E)S2G7|t|t~rU&wgO9# z^sXPddKS1-JOKGSum{q*%rZgk2(G*x#C6Hp8(`CX1F9UJ#b-TbNcjRJ(Nn1Q{dSfe zf%gWs#1C+01Yg%FFpkA(B>68mic)9?I;j$lo^^KQFa%#Vx#A#y6}hPWRD~r>Xk_po zax(*f4AcnbxGu7x-+L}d_7gZc691R@&KMjC+F@zpvN_fl-wW657eW3j>GD)#HefF) z(FX5p>n6hYY(B&sajt>o2tQ{F2@{X#Y>emNchf*&=zU=2!RI^?!6=0l2!XFx$JLFT z4#`&HZ4Ld+VBQkiQZ zpS~9r89=Dg0rLZ~V0UrfAqD20uLY|x8cBIPGYwvPA*n+kON78Op%PbDexx4Kq$r(? zdmm3#TK+Xkdobb-2YSCxgkDfy455ROvSQn_X&e-FM;zg3RJ*`4LRCJ$E}{s2rX?f&lRaMVMiz(yPt!hU~`^HEbMWmPk$q zeBFRMv7)_ak>>whdkv8@XYOs@>6h48)&$dVI*z2KC+$=L-6sK;a(ouB%+wX26q?cr zDsfVb*#yDvLupNx1P7aG$AE8Fp2wH}%H_}zodj+F+CHrHpAI}c*+Mt=4@RIy2Q4;< zuwR@s38SCJ zUgC&(P&3~BjtIZifpI;lr9L5vS00Y+Bad3z-&9}jZ^WF;)!sb9#CVjbOQLWq-144N zq>gn213FutvOg06tv^9HZGgwlclm6h^cRKNaSDQF!v(?`(OtwIms`db?)Ml`6zynb zP?e0hE`H^9J>cn-f5#*AZ9|y*Uf}aj{{{)sL)-+FS%!7!h0d~!zAtj7+nFJn$S@IN z>c4M3q8a*LQx}Z`0Iyv?_=JN&Ybh$Iw^8dZ%IbM!ad#S^l z;>AD=95iz)GsOXm;s<2y;7MKXK}=LfxFCe37qaOg761pCQ+(IpzP}hl|Fk)g?mptT z&OG@0L#M{0G7qIM6_&+rbVLGi3;}K}GgWIZU-V3ZmxH45FhL6W(eZD%wVDAM2cax%QR7B_j{K#{7)!h5@&l5L*{1z<`Y}~Okd$Kv2_TyQ&r4bRr_To!U)IL#|i|Vqqwp3%_StaH0tbAm0?xTq%r5%Q7jy@eYvY+=G;ar zyKNesaXAq7P~s|AMvEeCheKL==yZGDBrIMSfy`G-b({aEbjm?#C=dasknRiyI}($LMepSih% z+OICdS~XpmC-%1M#z;Zio7CfHyRxIoxeXi~95#cScf&$--Nvc|0mewtRhh2R9r-3G z=i&f`%-2jmuT~1FxPl$fxh_Q=Bz238_u2_^u(3^T95{@qKlM52-+{15`!5)Mrqyp1 z(D<{f0B%V&q{y2C0Tx+Q2jMgq9n z=)ztDq+>H)lo9g}MOrjNayeS2SHEts@2tW8K~RdBOc03&e|f_vLfBfw4|%Ioe6IPl zVN#k**bE`*T&)n#MI!PMNJ9&1izqjKdNnhLiH0E;a3U+?4q>`p%zl00}3GnozkM zJFw=Gr-CRO6&0T&V!|S-bt|G$lI9aBYV~c|Xv+}>MT}U>A`v1XaRM|NL?a5v6Ff}a zcY2DHPT`nz3`!Uz(S5%U;&{?TitfVIP-zu7AeNz#>&8zgUJfxUdy>z48e}W|b#)*Z3BmHCJNn(@b=4B@>UK+@>P<`Hl#wJ{ddfm~^5o`;aU{ zXr!oT=jX=r(cp1I1^4}*A!ciftz~cT6#X!3ZMgERvYz*Keq(#ozHyIzeAai(@I+$4 zKY6p=1Um)SUh{6IeWOYkE zQ~TKrcK>&UC+Xga&8#Tf9?ZzWAbR}6Fw*Z7C8Mavwn&aH?ZBd4>Z-kQerL4YLfzO= zU*B_oW7PcK{Y>i9uS+%gc)IK=HcBSUv_!*1Cbt;zs;R+^`}*>;u7TfKn?|lO@5;o^ z;;lP3-UnII!zIoGw>oGUb|wmJ@&^;^SLR8ioC5Etq+<1SPm_`+jo1`%DllxsDLkff z-ktDBv%(WO+;b7s)hzdzdYUlIr1m_8`0WtBdGo!L@8NBQ_;X%2MY{HP!va4P{jB;? zH#^>pf%sU;J44p0n@_KDoEw(4RWUHc&FA-LUjWV2*N_HXlzrub!9aZt3XxtQ*G`x`}6PM)hc`FH(o_y zl$z*54Q!BUDcn=f!YaZAn_KNKjg3F4zXe$6A2<+wlC|$XbIDp|JG}?kKYb{aYei62z-t$5# zztxKtVMxudTW zAsKQzA>ankgF@BM@TbdP@Gv&_d<9lg#h{h#P3Fn5lwX17G2Fr%Br#0h!zI43r`@pCtk z%vKFatU38T7L3@tL{6sVb3`XEwGhP7z2^k{6UrjVA9Rfs#OG)YCO%b^=o9Yhq4J

Gy;z!-rdo^nYsn_n~;s>MuEVF4#A zI|PJPhhOWeFbRN*kl263c0~$FBqUhO`{OayldY6Y=`+a{h~l!=z@eWB66Oo;^LJXb{5$ZmsvqF{9Z>Emw9Kb(1stmps7V4LoDQu=yYy6tY z*Ywg2NU&wQ+tbsd8|KfD4cK-GC6-Fc=(NFu z2nquThl7f?v9Uob6Oz1WBSU!JV5p(-?`CV(gV2}0MqQ(jrlUpFFaQY_?Yn9OJ} z2#C0QC$Ow!hH@IVQqeW_ZEq zSlB|t8DBTj%TGLSUcOPY!{C*UO*8h%^juV#{i<23s;@9>Us(U#+S=N}g896b91OFx zD4r)OGFmqO7!0#2dps$^^=lHDOPZ6nV&IXPnT*i!O#_5wCH zkVm`^#J2=lH9NfmEl7HJHIO+wKAyx72I_U-F2JC-KM3A-)2Ra1I=Z@95bWAzky4YB zvjp``0zx0S>h_v}O{dUENN&W*nB6v#0W;l+bJoSo&6(8AQ!7<`iNoK>p)ApYf!S}R zJ0T+@1L`0$C6@7FV&5!?AhK<)5))p08%4U?I;}+c9@5v;y4+B~(FSWq&Zq`PN)0eD zzBvOnaQ8u7s7lHiT=3eBuiucediqG=$NACtmtC~kuvAlez!gk`=76~DX}s*Qkh+-6 zy4td^_u14nmkO|%p6|};%a|=XoIBZTSp$yLteQk+icq}OH}5S|>e;VRlm8jRNf=Zg zeE$#b;nx?panQ5rjL}(&DNUpE#wK4;tChO+&pB4a?)unjZ%lp5XDv!$p~2f58{HU_r*n2$>kye=hH^9f4no}@s?A(~o39jLZl#Jb7Nk*B#18$kYN%bm;Xhe^eN;~ByR`VQ zzGBbvpJefWX+%Ol7j@Ve9q?r5Fw4}9o+bLN=UBLL8hVZg>MA+PhFD3|7n-N7q!c_i zb;8y*dvkb9qbkkhs$TefP4Q#9k^(Um=J3!^%q{*1s{4<9{|b!jjg$Dgx2P$`;9AyC z#U(^sohRLx{%1dcDd~aNmRNYGY0(t@t6ivvyNTNj;lK5+|LO<+>Hg6;GxoXFf1BBa z653Y(NK6Uj>MN%yIZ9lg^?PW4D?KG|Y@m<9K;{s?Y>gkHO#n2BY z=k-b7_*2S6xJAgyQVM!7d~D?B1vM7Veb1RvQlCF}#(fSN4RnV`AJA_0J2f)(ALQz7 zsK}^USg^9v7PiDXFZ(q7E$FCC;k3V9^A^DKW6$|18_Qg0ySt?eP4~Mg1;E)R!oX13 zXL3sH{kNi@8$(JD6N+P)h1ma@+dS|vBbM{XRHANz zp3cR^#Z3D-deUasfnJqVk1(mVt%ZdJv0ReA>m)MO%FAm>IFTemu3TTFtkM<0vsjO= zk*#cnMbG!|`%PLQsaVR8Eg@I+*}nkfivwXOiAtNUN2idj!f-IC@3EvJiG=v1@c4#ae8T z;WGrM8q!_Rt>GX8ru9qsI1Cp5k%*YK<9Iw{RY}lzMyFHnc?A1=J}?q)yr^vygSY>{ zH2yD0t1i-h-NMzPl)WuR*5%i&tcLaMfro|--X-H@VxigV00ekUPxIS&F6Fwl8x@;e zOtlu{D%NvZn{6FVMcwFaPBrSHfAo0iJ%SSV^MUrU-r8c~>L;I-NQ5RMuSa5V?}fp$ z!nPMroyS|Ny65iqmA|XTDP(Br&SP>gQ65eH$;0@O+=oizpNLK1vp-G$Em@5f(Qxp+ zB;Wd$qF;ylu&`;7YlQQ0j>Fx)xp-?7^SQQ2Qd6`8-L4{cn4&~W`7}K`T+;|a|Orz)cfZx45TF00>%Qh9f z-%gGXpHJy9^+_!ZSr<)O&z#;(D*UmonT40mEU!oPP`BLbjl{b!C$deP09+JD^VVf@ z!=LB1^Ai_ets8w+>OX^3@bu$|oC(ot6BYYzOAV`4{%5QK%|3Gv(N8Fx+UxZJhBE>6@K8m?{^(d9YqzB!R1E%e=<=c>=OZrk`z zl1VV-#HVuOb)XE?;O!c$UMjm86cs!ke?oV)FMUPU-wDIs?XU z##5cFd+Kr`PdCPkDR_Km_INJDYyTPU`Yi;M%Jy+`W?28+)E3}-Bb&ZfQ(o%kNXnQ& zE+SO%`P;ygDpm$s?ui~T>BOdt&`=Je503IhdK9APSq%;;iM>>0Z7lqQA2|HB=ByuE-Ul$Rsh}lu@HDi(5ETu`tUO)i6GKK51er-y?p>y z*Z114M0#_7R!;c`GT*B|_xm zij&B|0Y}dc;C=8H@-8fY2H}%4nMaLH(Opo=uS)g^C(-S7mu(@_IYGHEsFTm!mA{o+ z^!!_bV%|jLm1koS9u=Q$_}{;OW5~3sD3cWPfFMX*y7c6PwWm6_*%8dkBDwOx#LHc^Xbu3ib(V8KRZ$PkJxmt3EnDe*SH= zsEEtsJQllKOUQ%$+OwYiLsCNM*U^m&ct4~8I)Z^R0V5_>9L$HlAsyO|Aj}7He!%aH z6Y$s#peJ;%0HT;B4w4bjk||R(j%>cEsSo~)J$dZMSvp$PP_sj4PIPnz6hhR6 zz|uJ&Hxvw6e033Vow4HWGqnE)1p)x(uwDvyJkLGWF;a?vo zJerr%9S@P5kAvNSdBH;NX)?#`V1FOcU|Aaa;(% zoCA0!YHt5Sqr`DFsi8aVc43lIAN`Xk-_3Nir(5;o6;~DkEX)o&%)3HYv4E%!x@BFS zUis;it+&nX?6ACl8hKyh)z*YI=KYd^Pni5K*dz~jCATZo@3&BUFZ{B|^7#2saVA?@#Z|^_8?l?KStm|L>x^=dAGYhyg8fmJUSqMmNFAtM;5)!io3GBFK7D3q6^K83WHmE2 zGCVBiJa+v;->1_0tCp9S9+eQTWo{GNydLSp_jS@9g{zCF^jVk7(@~=T0>BNci#fCN z`H{IKwV=r4?eyBCcc7+V+yN?^o>GJQ)cFaE=Cktb%xsV1M#Gx&VcHqv@Tq$ z$|*`Rb$0uQEB9Z;_;w$}7Oro$84Sg#U&rhJaWXG`KrX9kqw~~HB`Y^t4vSrT?uplH zl8YqpQgpvM8gGO`3=WD<;m4gP?TA-d@jGpyX4r*x?cxH)5lXuspU7l0*UrGpg#58P zL?7f*@bvJW3EP4c0|LD~xbnV3PA*vIPeO9Zhs9^CSe%mSaeu_qBqAKCz*w>70}1EP zl|)^JvG^xh8l3F-FSwFmfp84m#{=Lzy{$8oJw%o}H0skPV9;wGevfz3nPi?D9sQ3j zp$MuH)aZ181?ubTp~@U>7`SoPzNEr7x;%Bf+4Q@*0qOYlYy+EZt><{H>IlcbKCz{6 zKKMo-;vTbA6_2Npd>u(szwJ?AV0%Z~TB+yz6`S?5zZg#V)X&OZ^Sn0a=fgo0Ay~i~ z<#=03RWlP^T@{~`j%H?*E&kE&b;2(h_Oc{HkF&w?pN2aM!@d5B@fD}5Ri2TVKiw4C zh(c2kK`_WY5VOuR8)$ ziaR^BS4C3soUG@pKJX!~d*}S+2q9gA!^_*wFyid57w^xa#z$IwF!ogPZK5jw+(l$$ zNB(Z}30Xvxv}TPCJk z!QkAn!$)3#Nmxd#9xQ%KfwaBy^I**vQ~3@tRd3SHkEyGYZeyRQT4+wG#?l?mEgeYa zTC|r7T%4%(biPmb#VmB2yBl&bI^#*2^*_R-eg9kYU8TH9rwTTWLuTI5c?Iy9w}At{ zlGDi-PvkEV8otsrCVaQnh>cw{oLJ?5 zkdOC_jbHCL`#1Exw-3FIhfij|L`1$ze4=j;F!+at_h`Ty0t16aJX+$kUtzCvOuK>q zD|nU-tqwprGhg50rt4Mw<=gVhdY4 zc{r8$g@iU=ZU?&PbF>fIeM3`h>^LX?@{^z>xPwg~2QEe9tchj4*p}Ck^ zA?$z4AELaM>5+V^(%lK2TC*;qu6`&Bklr=eI8G)V%RqVT8o$NyK6_D<4jVHM6w74o zoHvyW*&wfko1+L!4{Qh6I9Wf?_ZXP|-DdHdEfM8ah=sB+HpY}=zbKrj&7)EMQcD{( zdO_I=Cro~Egl;~5SYw2&$c@NM2}V6HC8Zvun&bB)%q}rwOKR48NwC?ufB$~3H`t*< z=JF5W`0wpe)P^JSI~%U`!bAwdS@$Jekm#N@q4-(#_^iv^xIh~N z54UQbf-R|*%*?Gge!dQFo?ZvGm|GlTxsg8p*zMeFT>W7wB3f)b zM*iYYyU+x=QOyYrjCCsVubWdFY7Gk{*K#~(8(uvfomvd-dK-;N`a`z=nOP_OF?Rq* zOX;?XvRiYWr%pj!0Bw~d+Yy0qcR&U4^ZSwZQ|CM0h$b6hf5dwtyyyVdcAGj;jhEJGsi~0RT*Bzou3;l%ga~MI z)G|G$d77uwNmoWPCp$JlwnF4=WbqhFdv7nqNRPNw&6vs?gExTOgWJ)jvrAJytzP7g zl1E|<3to*-%%d7kbU|5^_%&sWnm6jic>yBmc=80HEW)NZybd;$#wLzL!~P6xrTDZ5 zf{J?a=vi^^@Ozib_#bMUnr#VTx>nury#W?yQ-n*YI*&rDB(t?qb`wgwQ)X(_DuFop zUDS&OaBJm2v}l3AZ+V!lHxg|U#4S)hjNGvm)LSO?;x zV3|cYGp;rnRheb;GQ7f1UlS3btYNWOZNdw6-NqHt1ttl$*gu(-G<$I2O0)MhJ(+D4 ztX7FLjF_7ULY-KDupzW`0w-_2eaYmgfy8u z7i21k{Q_;ghLucNp3b5ib`;W;5adK!$3u!&8N{{b+Jb)9-jn4VqoJ0mc0caC+*rz2 zPNCy;TxrZ&aXNz&99cj@!8S8D-@X%i#0Bb)VRxCk{U6fB+741*OmuMlT+bGc z90#F>K^r3>AlnL7KqTxGAF$|6rXBPvwV&0gbz13$?4OT*y^-F{%&gN*nnD0Z;jUF^ zefzKG$u%ugHf&?Ufj0=;&BTa3CzD}*=qc_I!UqKMK5%;;>QsDTv5z+vo}HbIU869C zC*dLCnmV)wvv0=lS=YkS-JDm}abxbdsv7scXa&~YYBOCIH8nL&O-)0?d6?V|Gf(Q) zkWy$!PC1+~jq17%q5D#(#dhR*1?#4+nHp#7%2eVACIu&W#SHMp06Nkd*f08lfT75s z{wKr<$sT#u*VLRL6(A6X^rCn?j$sma^X+H(oS~PTZN~dJ;~?1Llao4Bk#jm{$o@_mK zUm8I8a-v{yYd*~@>p|(I`P`k$dvE^FI}8i_rZ59 zwzauwjWA4{Kufa>rmtSR;gt&L7|9U=C4xO0>cnkaulo7$xDhQn<2T12eoMJ$l(<$A z1Ub=JaPSpxY^W~&&?q{5YgC9PO=1(yn6{?<;NJuzj_R-{YnYGG5>?Pw%X(rlCf#oq zVQqa-g1EcscY#DI%4aXs?49$v zk|iv&sx+)Dj@lmj5;nnoqkup%TLuk>nf>gltTevfSy8jx6e*!|`Uq`fV@KM~=#Rgg zXJx)NDm+X1;gDXS%z6c%?_I|5Mk8jfLJh~zl( z)Nf#K$%l1;1FLKKn4pjl>3?2m_$>5aKf~S?z*hZ`rriW9#mTeAY7$)d1pe^-TFO7v1X zz}@ojjDfrOix=atFohQQC0CO9w^Ja))x931OA)sV4_kVvqZ~q@D!wlpMxUp$TnM5< z*hltMxp@2me9PS`m#ot?{Z2#QCoK+OzqHHIm<7obt#(kC!}x!HJa z&7=JVxN)Jrp!z_9=7#03RNx<^M+-+HHBIoaN(~%jW@aiu6apNr3AU%aBn6*8TL9e; zW_e3%YtvPJ6-?LZnS0yKyS@=ThNsPIm|pj4ll`vOZa!%*R~=E`>{IjpaTIcflN1yq zMkOykx$3r-{E82>-d9*;{g=KRiuROT44?8#dTP~A_Rq__o8H8ZB60;$itL0ENyF5M zei(${($$p|`y1@sv4P4qe^)I=u#)(YhDH?N}7+lz5y z;=e^k&3h{9=(aHPN#u!UXS=NTR<3%SXt!ggSiiHS-+k0E>`cKM{4aGiF2I@o?%b|p zN@XNN6CD<(NSt64Zh?ohq!-W7ONGi8g!Wsi#GVG{bV3kfTVeue{mC#wl2a8|s#fzg zmZ=uV?j@1oZ5@oQCNikvbdp<4l0~D4Oc2yLb2&xI`17dFjlGOp716#cw^^5A{!y=p zAf?xfPLu0Jcxh<_E=~YW9KE%1%)3rei)dMOBJE8o?H)u>nDRmsT2v>(sL-_Yt|^%b z4R?onZ?ASHk5M>1@7Pa*$2YErbHE2@)7Sp)#`(qK^Rx1pI8n<|U_kD=)&01S)|OZ(S($Cvb$^AQ=>e2?-1ebY9h9LCjXVTI_rD4RA%Fp0b7pNe>bYvd&&OiAWny z-)cz$>d6`rA17`z!CjM|muC)x;)kUl@M^B796fWe$0GS_n`jOvbCh_@*}>WiwtEH2 z@r8#-F_gK2M$}ouN&Uk}Qx9x|haWiX^lluhyR@c|EjV|cPDIvh(^nsw3` z6d)kLsYbX|PHYe|`{w3+0j$3nmb!{_R>fn#Vm}bKx3rAW{8~?snNoQS@j`PeNq*XH zw)SCs{lfZBG41e?4J&|>1HMa?^QZ8gNKRN!3i`scx#{X#sZTE0%8kGcKm98G*ds%_ zN*P5PfkRfqm!AByZSjG4wM(C7aiLB{-GPTwPueu!w4S{ct^*3;t=0z!8)+&89eT-@ z-mA`W<-JxEWvuuVmL}~Uo?qk`0ZsLvwA7;J{4Qr=zR(m3`;+rx{w&q3eYvx`)OqAO z&)sB3Cfj~^a}ObHeSs%fMQiV5(%&>o_q?pLU3%?0Gu+x5^CD@ckw^vuhmQE7udtM% zqk>`O9io#=>6Iz1?h?gxDCtU9a=<)O$cPu6KSNMOXm)bI6Cb)lzYQF6RQXiqZcPW0 zh+*j%K4~NAC)g?1K0^^wLc;11=6BWAMX{5Qek#XW_VSyMk#T$97}v0f8j+PBb8yrA z;jrCk)Bf;fU-Tt;r|`mmwsmG96c%xvHTu`Mc^X#LwDqLs8oxac0;}#7qP{^F^iNI%!UnVXI*KMV}mnYk22o$PD9DdmH!b(?=TH?6I#JbQHK01tb2 z34(`+>@Uy1L0n_8LH)UA*7*o1LMp%|LjgAj{XgD@RRG-poo+mX0Wa55)U1;Ld&#W7 zw`ny>BggGxyP6ur@o>XL$yPlL6m42VIV&}0%7A=k0y&NJ}+ggdWArJ!MEmO7;gX3Mt>Z5$zY-S(~f!fn1a)@ zXK3V$EPi1i0`Ca)r+;>`OJ2YZpm>xk)Vm;Uw3tt|(mVD`qGmv*KT_0N^po4)!MG=x z{3yey1j{~Yw*<->-duO_>z0WGUfjIPt^ec6Gu9;0lQwyNA-ZVdj8BUH6s>4-;WbXD z?*ZLwz3P)sdiB2q_ao-2=9_Szpwai5jGtkg=vUdLOocRhet zj*E;2ZXip7A`BT&T?ddL0B~nm-UFu?*=Tv~i2QxWvQLLdq4e!${fvCb^q~dG(})S+ z1VEq}7;s498nD`C!7Bqm-Lj*X@9AK(0JoS&JGq6tB#T%)3iy3H!=B?Bd9h)#f!yF3 zuSSPI%^<5v>@i35*H~3rg)7A778LwNHZjYpRa&6#QWLD|`C%J&d7f6Y1Z&%mTcSxs zZQ!!J08npYvAK9;<@2Fl&`2t(jy$hFoV!4pTtd_5w%VV8#UmX1|F{6iM92L6d@#w9 zMIILb?AUeXul%Vy0R$>FeW;#USmWbm`B;VbiYfg=m< zN$BJ99;}q7UDLOxu^dnWlCcAX$uOeHRgF#QjxmfdoO1{wmAP@a3DIn7R?n@NRI{Bv z6#wTf=z7$wdHh${rjeE&Hyx7&1L1^1!%geYhah_P>SG^X#T~TArF5@(DepA=E^BIQBU3zQJDYZRr#+?v4wHPgwb-Fr2 zXMAYQBLO^rr_hSs9tf6|y}7NZ@=_@7)>x56P5_J1n1m*;_bVec#oD>@mp9t>x%qDv zGVOwJ|8*Sos8yG2X|U$SYluDktL_eWlJ}4PE~zj?DuHuY zOjhBDmf65X-tSW29fjAm3*faKOQdFNSX@%CT(UqzLcF&j% z!1B0miXRL6fdASF%9E6u~HQZVNenGVQ?7tV2 zANA9-JnMadodIb9_|k4})wyx%#PBrlgRg0f#Hy2O-ASbm=lg7k#Wqll42}iiPjqM5 z#55PV9hNaj7ws4Fl6=58{v%ftrMAm@! zgd1gI22*+@35C1?m{)17^G&zi1Z!KI3E-q-&Iirk#J7pR`~m{VC0VK*Tb?UhBCE+# zU{KToeXy{*#bm?-L8rE_4Out;8nUkcbNnp<0)NhEjvi#uKvQUWqD})2@==Nwv zpVLxM*Q0_*tHKeFNzMq)2t1tTxHYY!$-BeEQxGjSp<&6&GOd{t7m`VR{_%m|LZ4lX-qZu5T?C=gGvF{AEvRw=jPO<46gw^2@xQbsjscA_44#I z^Rgc-*aOD#&iZ<^krv)&ebT4Vp${c6p1paapzI20p(3G4f!5biD?lk6sa~)pjEz12 z`N2~{aCHrI=&zN?PZ@)OHbpdTNCwZ?yifbv>FUF8|PfDe>-L~L5Ha#;bt zs;lwS=OwI0SPuQP9(KeBCxtSpq#j~e;pffgdDG5v17ytOy7kiIHW7!Sv#}LEU((6M}jN6i%$SX4^fB70AV}nE4VwNKAz2W^N ze!QhK@%W5SY)-)R_0voN+Gq7bO9)>4=Us+5rVZ!{dTa@1Ta~#w{UQYh@g^;+t1y2vmc*4# zwFR$(5C%yIf`tU)S%DTqQaqs$k>i+ECEPUHH_VdzVMt3UP#2Xs6_jmlZ*OmGn7m@G z=RlPtH_}(m;Yv^h2_TYJhA+0oo#LHguC1mPMI1b~`G)*X+}arkh>?+*i$^cOBo@bBqq_Hp4g#k?VC={$Qlt9vuVs`a>l#SnnzDtTNQmC$1@ zG9YKp=nv=u#6VGT2XkAGaU728bjQj0d3(SL1HY~~dV6SW%EZB=^i1c09oQfcG4|Yb z?B@i^6A1k@sMzvi5MX+A0%lVUg@r1sbMi@Q+6shKAp{#oKY5<~)1*7^@Vo3l(mBbZ zvvx3CzX_LV6r}r}TLJ^^FA(4{y*Pt*3+*$m5{fS`ew3PGdeOrDCMi?G>&y6_K+b{i z?QgsXcY61(Wm>GV(n;@&8#{jUm=P@O?Wt7#DE2tPh9oxMbGoykDzU;W$XNDG+~K4~ zl#-{Omh%@8^f-#ImCN-xU-f0m__1l8w68NBQjBnHkg)z*adfsHZupky^W`c*X)ZNG zg5rWELgmSpzDDVoqU*9hRtdL1nm|x$7*Qar{jcE)2a^7#?uEbYl??TiAMSK%ZM5Ht zi0SdCm#qlEeGp`X8%|qB(kAy{oLm-9P-ktCPWaAK2XsuQ2(QlHmM?4OaDIHRo~cw4 zh`1vVvL78#%8e%&_<@iyl&BTkn=#zPQ@SrV8gc5JG8lh9iPNt>=J|4?B(zYr zgMvNQlv-)gE9mJ(G7w3nZbl#?OSt&D#o`fV?O_f|6g3LZ?^BQ1cdzUAp7)o$Ef2-1 zI+t<(->q#V%b*aaHN>bu^f>ylU`jmaExTTE=T&7yte?mlSW09vglS!pn=*WkQ}36H zF}1e-7M#XQrejhDhb?$*-bZT5xs4x}PV2nC&6v5lz7F#ibU}L%4C+@|Q9(TI&dB|y zU*D;V@r8j0qyNs%j>(;pTzAVfZA>he8u;F;ro_=;@rK= z$plGGR?m6TH5qLw-uB~xwQZh8SoRkH@=~pfY9OG{%z)s$MNcH9JsCNG0DQeZUvCf} zxzl)a4G)>qD~7!c2p0tJkQI-vAXG4v7J-}uQBhIB?~!_d@>d+iFTXhMfdWoq{<&WS zJJP&QE~^`!>Y~1bOC;hCXEUDUsgY$dx!mA1Y~!J%cSdOv7AkUS7ig7nQ1Y0x!qC|V zi29@K?ZxqcSw&U476wBR<>IVBXl%P`A8kjBJwoe{|HI{!}zES>(k(a-@KuJ?$^m} z{6RwyJaY`)sE;2f`e(kx?xVldGiW^ggj%vdoH&%s_Dj$&vlJVQsaSOZI3WQm8K6}d z19~OH>{TvmDQph`wS!U|`NFEyC zNN2O}`dC=3!T)DSJdYcn{}-&8z=hfuMm`R`A0?kZGph_kJpn~C#HrgNO%s3xkgL5M z;+BNkgk)Qm0^?}?(>nuji~K)e6VPZdUbFx$jU4Ugig0wAD-rDylX05 z0WyLEDGi*V04@c)d)B2Yl+G>t$p;4F>qw6Y>->ZMDX z5{KnZg7j~Z@#*OcB;W%~kH(AH-9#YZ1MSqr>jLylk;03iFk5`h$;o-S)--f(12 zhjtmLnXM{C_b|Z!A%n*>q|RHz;PwJs-3XU-sMwrIJ)>5@(u$-4SM)>A`i4IZVn?!fZaoG^8^Y#!47vyBKFno~{GJ8Z%zI)Q$TQ*0A9f zgY1i?hU3a&ykj=F!ryRhW;F&TF?W~_svn5&HGl0e`0I_QFv@f!T=Zl`R5MnCQ5J|w z=phzl(-ZR6WUZYzywhYBHiR6`tedv|wRct1II5o*H%2CU#64Ah@Yq(mS#Ob`1&z3q zJ4J6v9$8%APMVT*T@xm1f==RS+x})3#yh;@$F9*Zo-U+2OERgD!zW47^uAUQVZogE zZq7zliRlE?iukE4WGSMvFDIto&MeLs}O3 zn~%XzB3mT0a-h}nZjNQJu|G<;KZ&t(D#Y+n`r{~sr>YHo-6>3c?{#LNAt6WFphT?ldGzWFTZ@h_Ke zzlXcOqaJ0OLWUT0!~-m8 z)e6QXtN*)BP+Kx27$b?HYX#{2DTdd*>7lL33q$um7Vvxc`XchAF@%?S#2FaPNTmZ* za6A1If)j$dK4Odn=->@t@piX2X;&aPgVA4ObW5XyLw6I8nE&HBDZHt&5g83r3SMDM z@#sJuj3X+R|GPCtb}R?p)1$b!xdFc$9|E2Gq2#53f1E2!+4sG+c;t9up!JgcaifNJ zwelVXos3%<@9~o{vG*)w?ihE)gV4yhag=D#WN4bWlhF`psF)5p9l1hGFLf{P%KM{m zp#2v9-*^dMcvX%BS0phIKc6f9wpB}Z~cYvQMRMohScrS8AD{v zgf|+gs(jZT^A^h2)|jr}8MNhaw+q^L$NO0HrGlsK!8@>}nXpz@EZ7d0nBe13(ve4C zVuX_I`9Cc&>Oj0hp#Fc(k#-xe*L1b25RK?{ZAu0e`>aVhRl+*%}Pad&qL#hn%} zR=hxQr$AfW9q#UV&-tD6-fwX8M<7qKcQ%{3)|zXMF~_JxLeUr2%R-Y@%BM9z%#u-K zKA{i9;!IcG%9DUl#9NvnLwY3aCxAN9zozM>rRlAq-n6b$>NwHcd6Sw=j~u^C+1-hN zJCho~i^?;da0pTe*L_SWe2g+huE%72zGQqp1P+8%cQH=End43AQesCj&2+stL2?ap zA4Gys+~^7r!PtZ-5CT^yS)X;yzr*7Tdko#&JD|Jg>G;E6udZtSD&P+R1Cn}xc+J(4 z(;`6gFe=wXelPf+00$22&?A?9+Xr|9Ujllkz!qE;5U!*Xe*DgoGQ70rKR;9c8bBx3 zsh)oQ&oPeTz%c_rcjN6S_XL1304|}y0B*IQ62}Epp@5|05YYE?+iB;GEIc@8`kw=_ z*kb5fBr{9@A`L)|Yxw{#2rwe* z{ZzxYx3`BdIXykSv}8~-cXxX`#l!M;c4kJiJQ;u(GqbY5q~-&FW6HbpkHE^s7!fP4 z_TO)RPaN~NcK7cv0BRcHWcx_}4)lNB!AnAN*%$ww6aRkB6Zii<;Gdc5@2ASCr2m;h z|9*T1yU?Zhf6i}*^8KIa{(JtWU>iz+M)=PKEK2aiYX0*I7)H?lx4yp5vENaU0~G`V z0Ye)?AcC9TExlwOVQ23{Z{lN{PRbERhUpClbKr0GyeR^pXxZC5_y648r5^3Rga^H! zLUWwyAUKomVukZ-M{!0vE1oyxa4b?xE)Fmv2j8j2i;@|k^tqd5$!Yn24(+4Bz-@a9 zK!wV;qRIqlt;kQpaRb42$xU1|=`y(GJ$3)R#pH+vM2hNo(%7wG5Ky;G|B6a5`9<7A zf5IpXN(+@^X0iX@{|NF|?Tc8%j|p;IJzo*wU{zF555fJVe(54PvivXASTblBm=%|X z` zMnG`{m;E;akozg-8<*r5zSwW{vK!vcG(>%w7MPDL6KlQYGhT}w$f6MJV4?Tnb2_mg z?URPtuw*UV9lSpY-Hl}u#&tXX(wp=@(fy^MDx40@Bw`OWm+8f{B8&Y<#cZ1F+;_yt zj+Q0I9N}Rrx6ie7+9!-;(*}JB$a=f>ii78`N+HOnBE`0E*p6=hGmHF<$VDVaqa!)8 ziBTBn7E-u#>6-OhNm0~?QkH0vO&l7^$Nwz*>(20VFG$#Yk|`vZ)nO?fPhArk*H!H} zMWYcz1m!>fEFkKI7eD`zJ6D~vf?~-nczVuF4bK;QG^!%ptaN$NAE8@@Dk0W`V9@Gq zpRsfS2ra^%Xgci66U%?kP~s`+RGZ5QavoxqD1RBX^v-U)eH@t99f3%~vM=S7rA68J zkiTCV{Nn5R83Bfa>=5G+`F?ecLs3Tfu}nxYCG$aurQL*{kGrUV_2~j@yea7(3!gE8 zU?d&lDPLTV|FoAe#W#G=@gf!=kA_@p?*v{9D$Nq6#$n{3QG?=gk^v|pXb7MUlF#DA zqX?8UGIkH4S%755cdYCkMNMw+QjpUp1FB!5I0MpN=cOe7ogSLD9JDAi45;yfK_6hX zeX)MuGzh=&qBhq_ct0OVmP+JC<^)4ifpR$l>EJU~35&j34U-{8^I?h6usRk8xt2h# zG%g3|pkcxK{O$m>+aS`EX6rk@yo!c-q!1G*7Z-$c{YXs#L3JhnhABet^mrW{2dM)S zxSAoRJ#;jOT6xA;yElgDs*kN8l*?J1Uw`TnGpPaaesy`Xd0H0oVS| zoVZxBub1T+g+d zfqhb6&S-$p7l_BWM01+^6~|(>cw+mdA5w}0$&Cc~TLxq{s%b#SCTju{7H*(wR~q#F zR*x#!#?gbG`v^aRKJ+mUF{!ZuNY zK%xj9=u6qX(m#u(9e>@2{sd26=#n)-SmSCGlP<&{9_)Z!KReHK@$tu=d>bM*A|tv9 zPW6%Cwhf}x07tsugzt*}9-fH9n6>f@;mPM144;m!Iv_~!xVgCxd^_kD1}-RTOGdwP z;j_Gc=XxWK#MyD9#B1I+3WK3SvpCzOsKNy!qa&D>9M->yo&TJb;nS6NASjIg5mO*y zLD`=n+l~>f{ZCvZ1Ad`ECX2&?y4(d`sBJMJ2--GT7}U+RPHx?K6Pu~OWY@&XA)jl3 z8-4L!n(`aR!$l#;HL^X*epIO$-TMPi_l0Ub-d<=7gURlZeQS{7cn^X@0;mbbVE%Hh zTcfcR%T>$?j@u1J#r#R!0b9w(oKAB~`jpI@t0%kO6RD@9)n3IBNzsUa2 zrOg~#;6%|$MSSC+PY>T3(iQ?ed+IqT@*xSeFqO9vqF;4kK^PkgPZdHF-Tka$W z&;)zc(Bsj;)ej>ANgYjc+C8r zpC(^&ZKIxppj;BA##n7z8~-sA=p^mnJO}}&7DujB_rS##>tqh z|1gB90kujdJ;wC>c>CQWSyXvE>9X3F$P^1M^PBn`Jk^X4^QwSrXu<@;9XWj`;Rk?8 zHmCpb6U>&17b}?ObFrYCqerH}<#O#1J1r@Y%e;Ljyk}5LwM-wu^5uorA0Ot6*#+X{ zc!XKCp}ka8LdW3H|G8Ku13Xzig|*wK`+)rpA7A=@AMtw;tsjGwLNZf!s=C@yV}1(D zd?s{(J=f7qu~d03tR3SgiqTtg@xZQffF-NX1)n;RU=TYZr}CJ{mw`nI3dgE!twGYR z9jR-P88lom(wexFt;#%!=J}J*h%(|UZeN-4yBgp#RFqZZzQfMGDHSL`-FT(sCOsrc z0}8Zx4d*0w=97rF#KTFGbX<5_u#(C8H1Po!dZh^Y&<`wS&mU)j8Nsb~6Z0<== zpPW4w6B)sfAorG>9dp}yJ~%#MDzdd)k2v7=GZ2&EW$h+zt}fT8O^vIc5jJg8~RaP5p#b=&VPCrkx3d=Z%tRy=9#4x291 z>PAFx9JmGV`bmS(y3s&9bP!}j{Xh^5tiUYk$c?S=3u=-IMO@EBE2p1eswca`n~APt zgMf&}3NxPAC#Adbdp+HT&AJ`tPE(SVFf`Q@-5|>tQfBX5&!VsjgZ@YEC~DmJ(;4Zzv_%dP>7&4 zviR2Z6+4F+#svs9ez%ho8e_#-XV4J^ifpeS@bO0 z;uET~T5@cv6dI9~>{{PGorZ}OO8+)JW_>~ZA1=T;ca@-dpCAWfchC{hJ!`@T-278D zWVuXE#)RLX1OWm{GCNG)4E0bn!jO;mhBA1jP)@a>knx)v2QZkZP+C)lzgmKZD-cz# zv+PW;#B%)x=u*%V6IS*o27@BHwQ;AtVI<*;68LhGrf~R;5*RGF%Y-lq4uVX*>Cw@k zp;eHkrKg=iPr$`XAf$zwqbj`?16rhs5LCB|Rh0+`1qODt7J+=8N}E*7Zpp~iH79}N_Ah9LTxk3_c8G5PU?H>5v<0rmFUmy!nsquc|a zcr;9&cY&w(lu*u+p{w-%#;@Fua9vDH)}0Aly^qPA{IO&O z!otFE>$<%k4?O2Na-8A(&-m@gH*k-qtSrScNJ?Jzj>K90DjztZ&E-2T1% zfOGV;sl4{IeXta-&CmO+h}UkLnHM`|s^Pis{MP!oR^5t|P|@S5oBk$`l_2{I-&}?( z%D2xb(G5LK_hRydw$Ji}&fN0Ej?Myr99+!rF~)CqW!~!$_~G06v`kUcv0jg%gAjNi zL_7P-)OWGP_YI1tBt}}i+e`TuGwbuQ+dRmuoLac(N!{j=f|;ar-(cvakO>U+b==|l0!}e({MlsAUIkyd07_KDdw~$sjfL9>zero zw^oe=%R@)uP%yJk^fIH7Q9x}dL){Vr_ifSN|pg)niIVu9<1Kx_OW{7bVV0v9yOenh)BVYc1Y0R&UaVQ@4+mklhkFz3=n zJw0SM$x`cAUyPlcm|t?_le*p+ z7f8&NFcgeogJQxm25wc)>iSIhiS1U|?xsdrSvfpirHtfte|I;$ z+^pIa85vpWmCTuzMCO6gKJBYtL8j8LBR(>R*|z$u*S@~X>s?UZ`An3@mjNAbS4SF~v26)?)SzrjBm$1W6I)#Nnu_;AoF~Bz2GkGJSEZ>@ z?-MVz--?cG3FGkNU?ZvMuiRG0zFu8$xfPxeJvi9SJ!y8EXg|&7t`^xnXPFQ@sBoL` z9ua&_)n!En^$r{c6SIC=pT9#=aZaDng@!0Efs1Bf(I?2>mpmDG+KOszYAX3G~?fn|z<6tv3zI7RZO@ zR7s9aT12dW2;mrjq2Tr2(2%m1;!(tHoVE+XzRh`)aVIN2^IO3<+He=Zy0SNMRQ;?W z^HZ@Jl(2;ewd(l*cXLWMgmY}K)Ua+M=%M@4cEsytj&rL$!VP94Y)A1K)B?>F(T7mX z%gb8c^<~J1^p-YR#KcX#*!OshNQwkM;mkd}^<)UjM$@lmkL_du;N?Y%LfP~-#jj%< zo;|W}z6-0XogsasZedbZsa!0hWacI&`F(1ZHZ~uD>F>v<1QC4i_dp6zlG@x|;ms=| zA|fQTxUkUH=w^B%5c&QdHLxv_bNJUVY8($xK;IUBdD_hleB%3ebv?S-A3uJqtx;?9 z@bS&Atr`3K$7&FcF3)OC<^YZQmseLL;v8ID^#ENi{IntDd02&+Sk~0e=KZf)4b5h@5o{Meoo+-_^jPu&M zFZ=c&Z1Xj@9~L)Ey!XG7>YDeRC^0&6{ll8qoFs>cq()CQmr5AF*&D%`>ENhb8suj` zyS?VjXgpi{ex;t(r@kjbyo>)Xb8_7#CKEShRvI=0ANx5kLraTQjLW;rI_W&j6l5eaJbxFo9)e#$05Z!__&Kh6_xcZ58oAE@l-wcZz=sn{EA^sSIOhk#%ROfi_c ztZJy9`*pj|pZ3zOR8%Zk2a(87k&ll24g3I#6b|NIni?FydtQDM)x=yGs1w zCifw9Cp9IBAW6Tx=@U6y+xkq?CyJ)LnCE-AxS(`o7%>yjYZos5V0IXY(jxZQhrRNv zEq7aYlgg>1@@D?J5g)vWPFBDeoga-j$!LHZgp4&ro~5m#I8^#D*evG1B&tfEC~xpO zek5nf|J@QNi>eWcR|pGOluYD!$nEOhPBtvvm4K2_{PB^$OhbMW$yfz#r!_VOhe>-P z!1s>(N~~#Vs*O~s+SakX;;fel%LOL+0-eA>3Jaqb&`XRI&x~q)Y0yk*)*%D%=Z5}1 zhod9uwBww4F0KS-g#!PO*J_*6n^6mz_O*s?m`_vjBoxC5*D`3X%7nnzI;;{ z$T|3mr+)*#_w@8MH}j=)zSuuH;Gr{(V*9PhRgLZAk67N#&!;aaQY^fmFZWY6?O7IseEI!RLw2}Za-=>)?4dX{u6?gCN&D3D z@qo>7PoR7|Z13~KJkeGA%wv?n(X)piucbK;IjYc5F_4^dkT-`2_MTd6a9*9FJUvuR zINbXSolU5WS~Y(qdiUmyz|dswIAL|`&&sQ=yO_Nd?yQcyYP*b4<&M^qTbzBx_M?Pd zoApoPD=k;fZX4^tc&Jzqa0J*D#KsE((K;*3k%xnS!SEUY)iP4T?VKMvg9WB0~Aia(_}1@9|`ezLFRSkmA>A4Zh80jb9oCX zf}RI1cUN)zeUy`@T~2GK&7W%CO~|rAi(;ii^S?7a#3O)cm`uFHnvW))HXAZ3j~G)i zP7K%IV}=Kv#|60VylmM4et@F9NeM*`7q%`8~P z*I%3>X9+G3y{9g&?pIO1P`pN?g-OC7L1Z33?Ij1_eEl9dKCV&}%X;H)VbPa;KJc$c zB#xq$mq&3%@JsvxuQ%S2*ru_+K15WL$J)$bQrm@x13^~suLm%sO1OSgk&|_Ajh6c~YcepVmC!hm57Z~#wOfiuMx z##a8_r~TUB=i#5-{xjtKLG6pvM^jFLitHvTK0Jcgy+ymvd>?^U$FfnyC42`SHa?7t z5uz9a>YE=g`5o_j>2O4cLewx3h?o*FC6H4?W;7>nZ)zS~ol3oSf8Mzr7qI(Z_#g1y zQ6h+8nd#4m^wHROYHdk*iC?F%`)-`vb#=ABWw|=6*urUU6*hi%TDKAKS+J$Sxz)!R zSHdFs_eX)Bp_w~eEayCB^2MwlY-N&O44oND9KB9Q0c02X|CxR9T6;;`=xPZz4XY1P1!}K#R>%{fS z@mm+_EH7ofr`xT&&f78LyG49Y4cmjyh6kX0Kk7AmUll8kRJbLRZF$CusTFe&@w6%SiBN?sLEz4i;ZYbcu<2`X*K%c#gZ zO;tvEDX4oGks{u4RuFJM&N17fWM6JH+>mVrN71m$x>swkd4M2yA|WXfv!-mtT3214 z8~X(q^KGA*pS{zAYFqxO_jCR6ro)rVbJ!!`>TB7XY&pg3_@2VMZF~tztsNXHT{CuCE!|@`(sqhC@dLRKzPU!#1_lOlh%HtIev3vhlGFXh z&4`_k=8~gQ_H|GpJg?P;EWt$QSsaod%_g2-A;Re*T|bA^DvF)G3Dnnmm! z7GKkIW)2Y%O4oVPNr{Z`^rmlGTAZJ{Iej^{1owknW%uz(sr35tL)1&@LD6w>I(*~k zA8aDPPQOe@JD5Pf_Vx^-Rb5`BROCldl9Dn#=hb|hD&mmKybHYo0t=&ySt zoB|GSYM##G{6C$wT$XsvTO6!@;9tESqs+GH$^1GNEEQNMYIj!DIhko+DuE|Ckd$pp zzlL_%eo;8b!XQYSNZj3qtdc*86_51mnX|ggWOzuQ^^7stO@uyA5FZ30v0-5mxz3g8+4Mk+2z)_29&(xyrfJ&?-ZaC`C| z@qTPA9wUA2qTYI5nAEVa|Cg;qua7?eC!zHscDF`MqWvK8jI?3$v~5e|1mE{MuJh5i zK#Oz4bY_{{7!PNbk2-5Y)oHEH@oGlk^TlsU^!LiJL>F}0TJk`i!p%S&1)8|1+uhu? zX~VU}8Cwm+m`}FCPuCXm7DJC{H?8B04?PB*;;C~X5gYa!Ldss80|E?}m*iQU=#g!qSdV&~bGW?N z-vN{B==+M{%4k46$=pj2m>XUYk7ZOzZ|el<{Rom7F&t-^#$Z6GZ)usUWd0tbih_(B zePJozQF`w>0Y~S+d-*laDuf+c;o#o26)u@CO_>2U9xr6#2GUb6cXvY7W1yJui@PvH z-+cHp@CN}Z#qS158dwa|NC>csijB5eBU@v2qU;_9t&I%mA|cc44X4f5D@;@Do8;~s zeOrdQx<+_%v8_sxs~&JbzZ#~u82`PLuJqZIbPHmPdpm|H{I}x7 zbW9cckA?f(*8LDL0K(cbdYY zy}!@_7(Maw@D*ofmX?)SK&hT9H2>5QeM`}H@Y%lR3t+uj-Ej!CYJCAjq-wtWF73Km z1|+xuRjGiJD1@z~HrPu)Imk$J#K)4hn=Mi5uEWnb9Y-xShCl%c&t<$}p%T~Z>QHHX zf_tDLRB4qRbImmXu>by4vzx8RY>=6?_SoF=aF8Z;d_%DcShA7ynbrNvsFN|N&e6br z3~mdB3|VdHbdkSS0I2SqBQJhp{58k`RAzm?VjcMax;0Dbc7&4j_z}KhfS=7o-_~+n zg0HGV>sIW0^iWADzYg)7I?DI7qArg z-<%ovbP~!n?SS1A53q%Kd*-*DIP%i6SBZCfHi3cIlo44EAlU*NKc$ktR%@}Y8}A%) zkM{O3(73Z2;=Kt_Ci^ykHkJ+Z{wS~Q9SUh0M%FQhTqW$l_@Q62gTFj79uxr8f_Y{T zutpmC8kSew0mWNkVd25S!HA(>ZZSq>rm-8U06}#_)p9k=WgC-aGzlO_yJ7tuw!M>+ zYV*chTU%DqDam?M7#rC2`V|$MrcO>Z>OBq%_5C6ZE?Y8RW0GGNEP|4Czs?UJk8SrK}l~7IcM>}+&95YLXunX)8 zr*Rvq_)pp+4fGv}8qg`HVPDT`KQax@0hzYXhm&69C&P5b91{PwpAuIZ<;i3}fx;dK znl(@4sH3qLdg_+f!2iWs(x#c5$YpQn-uO+eb}X%qsS>ARk#;^(T5Qbp0;|h&**czmOf#Q}Q-oSZ#^Ncq z1St?=rW*dOpdj{1-?+Tt`%CIh=-OFH)(>N>oo3}`5Pr`) zzqo4!MEZ~)>qIjz#UH54*iv%Gol}&N*?rGy)-EZf_Ky!#b?_~?y4 zsPJ(2YJXXkW6X4sO^L(XwBmRZae^rElm6D&?D&L{+p4kee)K!)H)%m#k4cJ7tvkV= z_B;$ND`7&5^$?ov`^s46MqV`KXdd`mC0*;B)}6e!x7~MadR<{AZO`w-e5dBR?jNBI zIowtGf#zp5NB-ILtr|^S0*~{v(!oiuryLCh6Fx+IYuB#EM2tV%%srp)v_0%I8sCIG z-K&aknk{Ch}OEOF>I-$iEkn~gx4b1!UTK)Q6H+0o!c&)wOXqj5|xaYyz!W8Xjw zg`z8JvIALE$16+)LLRLjM9-3dPIk>+jgR$r(c}GU_RYpXF_Ig+BKanxbZlgmquQ{Q zjLY|Lp-8*r=M6APNCNXy>PrJm`Et$Sb|U>Rgl>%cq47^YG>AHR*>R+raj>z&4BWPY zKEt6d4Jx11jH$7&ft9&|gk5lrt)6<9N)r#SMB{bk_fm<3;d0+QF`W{%5GxT< zGeFZG^Gb=xbO3&;&6*!M46z2I;&8Z)jRDL3ut4%3xxX1axAhQ^>8!4<0=Z8N1;2JP zFNW$kLvfgnWxo|OkcPUgA-n6&-^6%uBjeL`X0 z4WwdL@5-cmNL&QD1Duvyzc-Gw`a7Tdzu5zxQA;2{666C(_GSH|K`n4MZG?c;Ro2o2OdbH^V7n2aF6;jCRg!^!`*NnL zOaCNpjY|0UeTzy$$}S#{QjI^;5)Qtecmmeu0yH<}q%uYF}}L!-RygA zWi#XY+J>^bLf!Up7c*%AUeTAVrY;E|V6<Po#av&FL+UkX}+qy1&Sbev6^v2(gMQqMO zdG__YP>S}0`{uPjLTi_dNke-+Q|fQIBsFD(L9&GU5oiY-!i0yo^mdn{4rN@cW*Ei-B79;}unz^(nzJv9MR1s^vqTV`rKhN)LF` z>+yC&URA!TA?YrSmtIxWl!)0vpTdY9gqL4kGF!cYTSy;e^UbA~)nmJ7W%O++u<%;c zn4dHEXQglAN2;Rk$P;|3Cfy-+u{B52dh6fRH|l{{743{uH}u(x;Tf&hnnp=4X$D<& zwo496O-n_EV)MFbMt|0Y-g{H9W+;2FuLvfFF+Oh~vro^VZPf2Urrc??tC z=cV7A$^G@-!dlW_q(*iC`+O^ti9h3A>pcO*J?Yb77FSnSI;h4b4vr>#Ed9rFh7Ms? zn%QbI%mZ4rH8nLAZz~+WXjXmZYm^g`<1^b1@2pS$Qb@utc#E0i;VhF}0;D{%pi-;L zj6GE*p+<_F-uBq)Gj09AS5a%->6q>Py4{_<`4)m76$mwV9Ce|r?oqn2!Sv74imUDZ z=FYaH`z7;RVv%K@eg4dxMS7T93nt%|eBd^On;k@X$pJBk?Jd;&Rf0S)-tW0-&VmF47uBH)i+G zXdOhxH^Tw3u?LOXIq6}gXbbH(X>s;S`1**665ys#eb4~HOEQ#+iQnzbbqqs#63<=^ zY;j^V0*F6yH;5E$m)D;YcCwNbC)P~0;&FF2MsPa+o_b{UJS8h)^F+7f;CXAKdeJ6Q zzZ*FQ2>kr`OfO8MqEenfV`I$}1!CR}kUM7AN zVL?SSUyz3vb%`r4?;ET53(`%O*b;7s)Pz`2P?=BK&N2QmwBDnbNzl7Z zt#&Gko@hK4D?(Qz?04uHUMVa?R9VzzcyHm`L7D=aMhH{oZ4Lm0fplC&DF08QMdmPkp zU$(h)Q6ky5^$=7Z*(>$+EZDA|@GXy2jcAe^L1D%XYd&=p-b(tX3I!?qoXMe?MmB{g z+SmhzjMs{i@<^=F9So?B48t7~I8({l7cbAY6Zi0xW;h6_-F$t00ULz@`AK^D^a~>H zRA3#mcD!C+Hz8H&fu7&-(OLqA4<;s19?6Js0HFN()V5l2Sz!WfGX_wBPCl=P+lwFL z%M8*t!y0WbwZxNomA0bW9zLL1Hqv)u^coUo| zEyC7W&5%%tVWa5nVkS0Dgj$G*cUK~<=VVq!+N2YqY^J*fp`S{05GQ73N-f?ZHoxeC zOAz7nImIyWpA0m+dwaJ6gb_gC<}57Co42H1Vv@3WigS}pEgQ<_chJ|M5QJ{nB35=W z0s@KcI2LaIgRYDRmMhg5aPUCPzFq z1JJs_#h0s}piFQU*h)lS5K)XF*aGT`pR$Pr`qopgiz%uwK!qgn+RfL_EZ6FpXDB_S z@SAZEIS4fz#N!OXfYbc)@-RAnf7A*mtH$L()<>}Z+!=eYoFU&IpCkiAN<~x~B0?V^ zLK2k30r8-Mv*n=}XP`{MQ9EJ<0ali7a}c^AqKp;l3 zAZQoyqtB>{65hK-ji0`MZfiK(lm%9=-E#%yg_|F@(=3cCIjtI$SexySLbl;R-TxyK3qsVp)zI<*htz(<=dago7GQ-ilrSA6LPR-aP&PqBvhl=2)5xA7 zb}@e6UJBwk7bW~=_|>RAuly*(Qe5Ds34wcd;^vLfvP!J#*ZSGnyqM*y1@ZR9qTaqo zQLp{; zZ2W7L=n;_{&rZobJ@Ewq6|xL=kW*0sZSO*)ZvwZ|)6(_u{!~;b?AO+QeqqwHLSo5( zM&1+T(;5O`9+W`6qz;ukB88>PTlbbzp`-v8pr`m5;5{Fq`PKPVOftHEZE|w5 zySuxmw77S{;aig;G9#aT-BAH2D$s;aN=8No)OyLdO>u+KcP|0VCb1>DBQ8U2<0W}& zXam1-b5m2w^9wG9kduUwIyjR3+0GbaY`Sf^e3vtQ`b*iKM0V>Ry}z`h)r%M6yEhZ( zrTa{st_%9wr;eNt7_i44kMnzfYH&ZBWxQvS#^&X4>Iw<{8ckHtcJ1KggyYv3<%ra4 zR-{CUjA$C6$HO&@dvf3MyCt~5FTN4Ac*MJw)*L$2cm*b4w-dvV zYz&oSvY5djTm&;1CANIzx}ig!wKb$VT}dF2V*1|5D2HuFA}P_=j5a9v5DyKJ>k0I6 zHSh;k?dNCOb7S`{*!o5`(l!RfQ3xhbK_<{72t7lZ1*9_62*F-=hG|fWg!r;S<)^HN zcj?>cF+p$yDiZ>#YieRs5z`%V5M-BOvQdbRZCWE4TK>2#D@p!Pl_BC(2^s0>%rV|xY>K| zLKv}I2qqV2YS;BL0fAxsIYI2*a32_Ca%Qh&|8XNef3fwBfoWDSb>CKLmb@s6)ulX2 zw_Lu^F{nM{GEC7Q^9?4T=C>Fb96&wuIkmleadT) z3MP?CwQC56y}O{f$B)(3HzL822rtJ%k*>DPaO}{lQm}L9VMZw)+xB z!IB0$Z53e*i>{QLG0H9-GPD_Bfa9Wn=w%oi9z03-c9Waf;7^8Q*muE9LmmcpnPfS| z`3u15LUGCk<#(nd+oEJIv38L>vdy&!2Zx#|X?x~OLZq#+Tz|4!^QSY9#s>AL+h8Z7 zgX_4)*@LZh2r?X;pRKNdkNb?{41(QAFY`wC#&~t*$bf9jPv^HxTMd$D`|j8Xd6le| z|NEQ~wFO0quRX}bECp7JGN|Md+*8fkF0Wj$T&lJd2IL1lXk>hJd@e@kG9FTg!E%UG zAoMnAgP0vMeZZmUYDH1>#bi{LAB%Yc=r#g7 zXZ69{HSyS}ytJ-XZjqd|FjpoJQM@lQSh7}Cs>kJO=gh{u_th)#2N7DPQo`3_&v_rR zEw}IYtA#WM`}YVk42{@yNQ#Y`f>9=PuN|URS1f#7djTvJq8!PDtMmtUn~OhcB>+yB zRRcxCcSU&xbC9YutJ`-DmaEzBARKHI2%c)s}lVTZr1f&D+-S}SbgPr5*yyaq>6I? zmL5bOfJ$Vc`&iJO3um})xjGgmu(=mxP_BGn(vr?*jY>AbV-}p(l9v8x%}itUt;~W{ zvf84Xntr-r(z@zimv@hubCh4dip>>B0)R@}F^AHuA*Ne55Xk%lwZ$O-@~9V_Ok~a( zNCk|w(GdOGZ04??y1Ih>$i@97L}}D6VZ?X1gaOokc&t9(-ThADvCjfEE%lmBSZ{>h z#3EgpHx-(^S(QofNAbWyw^SFzk_pgx26i(Q_(+j8jC%3=JBstU7BlNI>)=j5N4f@* zs&zOtA+>gogq4YS(gkDHT02VZyW#0Wq9))c%nxJ3+FYb#aB}-M;(QWLcgF;#C|y;o`mkY*rWo6wu|EC@^@Zcd49~M)Sf!~ zHxeb-TLZ+LGiY{UeHl9+1*WsX2vtq^m`&(%pL9o?D3=JOY6!!n9n83rF2LbnbVyV; z1b&h&%~GvqvyMm!#mpCc%^phiu3N;7?;TnoDFkAx7WX)CsN4rEY`3k}mryO7#1!!~Q3?xfInm-DTpB)$I2RAG+KRWKnL#+*bhm zA*brQYpj6Np04ZjqsxF>)g+fF7g@V_>5vdgQ6xQ8Bvf)#7%nO_3go%(EpJuIwkDKl=~h(N*e4B=dF4(k<# z>L-)?YA>HO z-&j;c_YSQgEChqyBt}uLj=cBYLIk^)9mv9kg@k%jmMA5@qU@ZmU>|9YMPUfgNYhxc zg_L^(Z-4pxSs~FTEw7s7^_*ccd*z*EQLr-$H!yMGpId9MhLxQFC8Js*pvYS41tcLP zMPhv0q=`!Bv`pw%Z@?I(FVJzM?vS=L>^cEtm?O7%$Nbzyz@G3-Z{i6+rl>!?bX?Vz zuAEucVij<)Oa4~xjYgahT~bllcU-%u=d?+aij!NClh}_G=5cX#^MPGEhX``vI1hRnunqE5;5S4uuRkYmD5*sbE_^7w7+2a)OVfhFq_FO>at^^U^jt7R zX)L+LD>%Vmcz}(xV0q6p0v&V&V<;-Ujo| zM+edq`f*2vVFn6F^?(CO)kg8U+hj^nMdSr>eEZQ@VNq&KKk+LEa~De|hi~-p+#Egx z+Qu_ufjJ8N1}EHey!N%61RgtdvVAXJ)dcJ^?5#cycs#FCdgH!xqGRZ-F~?BzWwV~2 z16H1~4JU}gLy)9>i!vlCWMU#`0oFyV4S3RIo&Cszy0p`KO6&3r4>z1Z7u{iO*?3hD z9o`91#+-yWGnUbaCa_-zCN4JjeQp|@?T6FYi61x3?YGuA*~=uxer+R~)BY1>d?R0J zI;J^}fT^Fk$&y9GhAa5ID!~Ayu>(bS)Yyo&Fsz6aKyL<~^@Ygr1%BiQRCRfH2wgtt zP_lfsQ0EbTcJ}%kvc0qO?$n8Y5sx6>5g)(ownB~W}1Y|VaThDg(t zp6;2ZMr`8uCi$KaTPJv=m~gIG2jR!Y#CW*7hk%CwwWO#PndchI=x471Dk9*KSmF3e zJ_dlH!-_BV<|vw_S=~%EU;W$w-Wk2@NH=0slCXS}Vy=C}O~C8~M2&!AsUWGQ=TPKS z+;_Y>B0Lt}+#Mjo`(q;gKgV+E5wrK~^iOHy?GSTpv%-8h_sB{sdZ-yu0PR<o7VVDNGxG8vVWdJ8So+ak{A#tI(I)CS0h02j=nN823Osci5X%#&kpM9%CN`SU!*h@ct%e zr4>GMY1^aH$#$EKxF6f?Ascispg9z<{UMb@a`Rk1b#Of>RMK>=WHOS#E&K~2pEF9h91BG^SyW7Ol6 zVe@-8YG3As)n2x4ZE^xC!nHrYn%{Kb zyT&W*E+kK!k(V-vTm_{u=hvDSvvze;nqQ;BWiS*pSMlT^JaA8#FbwUL$CB}|3f@m9 z^==F7%{z1tPpW&fvxodGw6_v`Zao<_yZ8ci(Ap#s#tuJ{JP-66TMAbuhdBPVr)x!9 z@wZEF-84G3m95dR_z>dZHdLrKxPRdCppHywU}sD)DpTP7WpZw=1%MGYJ-%1wS?~Qz2fEBH? z#A$#QtZjWCvC|>2;#Kysm)`L?tr{vB06T6Uheti1rP*04ef7Vu*8sqV*awxf5>;6A z-N&AOHKNcDrN{V5*#LY(96;bgnaNC$tA!~!l5Cb9ECCQSEMqK^{j)K)gI}1j6l#73 zIKHPO0NWTKfy9XRwl}`Y_?!Ew*RH=9UZB|V!WNfdE(jr&k~c6gxZX=t{RSYgK>A2R zjTD0@Cij21`pT%df^E%);O;?#ySux)ySsaEC%C(N8h6*=?h+tCfZ)M`Td+B~Z@o2h zr~W`MI8D=Cr)t;!j1>adZKu9sX3=U5TBL>)H_ZYLcj@KJOgnQ1%!3XBktM$~upf16 z)yv^6RZ7TpbUx*3&}rtamH@uWE-o%xIS*Py=>Pl^rvA+ZcYO2IpP=I*e&-AVI{8n?h6;I;_p4 zV748@zI1LYy?>%j<~t^Mw|srkuHXJ(TIloYx09eZFT2}Pi35z=lIYJLs!go1T;d1ecNGU> zj|#V59tea}Ah%PvSV*)gmpE<2F!Vs`B1xjoABUOy{;u~J)Olmn<_EoO#R@-dd}NpS zs%#Di2bxJhIO0`Oo&pUiBDf|Fy1lT*cl4C zEv0pUukj}PVKl}@yyi0E{OzD70zAdL(OQ1_u-&`Y=J#p$>OehvLd)v~RmkliqI%AG zesNJwKg{5QX;|p7{QVkmU{<;^FAGpu9*g#8T53vqd*Qrz&8JtY6_4me73t?;{XWh? zM!uh8qhALD48hZQS+fKi!^#!}M9o z5WvzJou+&zHJM5=2b3=D?d`R-j{h*t8m?I&HQdhM|Ai$*TO3&)g*_J4595B7D$b;y zRTv>|*mKVK8 zYBSbC_m{7^Z+pqM7aC~Rj5!hFF>K0?kRZde6~eBOzTGp094u*s>jpdB)M>@TP>wEm z*%xMEVee&LN9wlxn90{|Sf_9AS4X&QNojE$AH)#kACb$?u7zWSa{H5Q%%l-Wn8#V& z-tI?*PtU)Z`noCiIF(`=zqN07Jhkz!82Mc3L)1FIEw#I)-Mdd%h?`I2z{R*qe<2Zx z6d8yT=iVo*`(C08wtambjQ$Ke{as;fYn9rFYvJ+atT9-#(TWSE~HCBgG>4{Atm(HE$8@$ zwrI=Jf5g3Ck*GLXG%$ajn-q0-Z-iwquVH2yG!v74XM#D9m6;^R{jM^k3Q9euEUC9d zmPbsrtN>0dqc8uih54eL%C|VTrZ`*mZ3&eMb0#&7W?0Yv{%_>P3}=m?QKhVKQXcQm zws04Oa1naO(*2C+@Nx(^l@GeHWoDl|_9duJ*=|1w9(UJ{8Rg7=H1hX>+_lEmNR?)O z{>6|JEFPS~m1D3RCV|9}B&7lu1Hu2x9tkPdNF0PLP;WsL$y3Fh=dlZ2Uej&gonv93 zcGelNM*Q-O3Z(A{e{SHA?sIw=`{!VR)JsYF z<1Zu|W=K1Ehrh%P%imV#J{72Jy&?S|yX#+iZWp@yz+C7RzGr_w+MM9?7@ZH93t2k> zF)Ew9zo?houk8OT^!55_V21gt%F@%cyWj6bAhdHjX5>U&-J9VdI<+P?7EnMia?wcT zA!PHkGuy}q-mO0b{4FE4sIgqD$*j;1Ji=5K{Ja0~VxUDrk-^PXR}=I%JYUZ?0YV%* zb{;c^R$|n39WqutX{+0OlXdFhJg4_Hq`dE;PoDrEAImnwp&+Ng$#AQI@7VaJ7`4>F z(q2-J4xM_!kq9{YRw642o8E1jW2)<}dN7XmLth}5*VST5kYMxVg)Io$aWGxtqgc?^ z>q@Vv0773vzOZN>P!W&LqNylgVI6IRC6#$`e9VfKF{TN45*l0^j7EY0+?OhAz2?a% zgXNbta1DGG_ktoaEGpu7ixKb~-(5cMEo0#(+(bmfXP!mCr_Y<|y4__b3YZ7IjdqU} z=wFS>*JV%E!I~L~!t}!Y%IR++1cAN>L(R?8pjR3Gf4Y`RF$a*W2#dx#NVc zA5YZheEVRl{cD7wI2d^2lvp&-*^(grQ9?kpG-$AINqK231C+8~8lqe5n>88#Q2PmY zHB}nH(4``Jm-vW{RrY#dbZ8m}J?U=&hT=wzL37PG3h-H0fPiHL!q~N@9FO?&gKbwM zMGxqcw}{Iq9?YWp;wnPM%`5lIj{dCC>h~0vP!T_fDPI9?lW1>DTG#tZYs2^B#AP72 zP?4Peu?sB;wE|qwhQ%WM4olo|z4?*5uA$Jm)pG;79wKvfCuSm3!(e0T%t1dk=q>s_ zJ=2h)r3IZZwq|$vdcH$MSUl~+yw4Rs4PkF3^q67)IOdf)5LR6ZYGA}p9) z;piMLRoN6mC&N_gR~mF`yvNy|pmqr~wAFE!rEaoJ>Uff!jZ;>^*7LjX_VsKX%5|AT zGB(*f5bIG?>lXMGvGDcA-az^(aK%H{)0b@aSiSJEVk$To>s!jX?PyY#72&1*f5iSg zpW*Hjsa)oEC}FI=%YPZw54!%&{vf~`UhedA zpTTY1y8FYUx?ua%&SIR0d>#}f!SU6GSLJcnHcwjoi(6H(P@SdeT^C}Ir?%&9Z{+=+ zWYGIXVyp4z^=nRQ=KxP;GT~o43P>P$I5arQ;yhP6;pzNP*pyUEbV$$#dGG7#J#`;i zESueE&3INRNMlu8vxRC9h>%7KN+L{;4v4LuDSS%x^7jn)sQ+Isz)@shURHrr9b%_g z9{tudP)2ME)|Ix<&OkmzEGhj+Z-tH&BQ+k|9MmYT6tZHM{z|1q40V=k{JK~Fy7A>fkcvHd=D%)svQiR3q=knC9$&)DQpN{e3URJAQc9$90z_-{tuWgUHNAIF! z$o%5;!M2@_<@vTa~uVQ>PC3domZ^ z8_z2>_B>9GQ_O0E=(ww_WjO{=fg-1W30%2j2l`_sKKtprizIyi9W#7jU2lA|Cfv2R znkewN?|#)Gc+OjKD|@QlS{V4G`LPyJV6N5R*sq_*9Q+Nb-J8Kl5%N2?$S-;-yr(Ww z9OKSBBEPsVV1{2vQ9t7RxbWy@p+IKxXilyF%p`Vf1_!jzb7+3Sw#HPcC;?p5IaSX* zE<8!Qh$PM)92LYNuMXhYf+uJyjtrUy@RB_p%?|JAPNv}2n@xHzziZ3g35=^Sg>hwr9&{>d(i!sz;s>Jg(` zd?!svysVmF8ags;Nn7kl3DvM*#}V!4@>)-Q(c?DTMn(UPHvPgCrLK)n4#wva{ZgVK z7!qiNYEejZVhB(cRM5#uA`$o(pJ&(n>bHrzPY&Ix-A=zJ$%z)uxfeuES_0`O9rr?~ zHQ#VPNUVs0$!tu(;3mBC8g$kiN@etfNX~)a@gyLnwK%whjl_*YGFSsnT3SEHeO;t_ z{?5NNer4T*XpYnp1OXwD%#=1+tH?`>CY9$VyAicMYJ>UIE6k(D;*yiAo>Ppi#uR4? zIUq6U`bFlsP6J(&rX`lBAkU=9km0V|en`>GG72R+^01^9KdOxH{Rl8A=L0T@QmrOn?sUy_H!PEC1$w?13uu74L zXnYTxawnq5LsAX7shvWj z-7*K4F9P0mY%-2gMk_y$fB{`Q*AWhU>okjCGv+?y*qNaCEdHEhOv$*s&vRrXfE61s zF&Zw$1$@Cj1FQRpkFfG9W(+Ul2eF03MY76IRF-%VyWtL#kimfmoMMUZp)uMi*-L@rV$$e;btjM$nHl0Tmb+lwV0WpJvXwoP^(y|^5GiBf|@mJ^*O4KeuX z_+^*>=TrF4w+c9Ee2Xyi6$d`(>EYz`staF}DCEcNWto>nb@_O4&2_qGLs@Rjcoxx- z2140pW)_o^wIG%Fa@li2lXj}F?C{A117i$Ap&?An@IGzP>F8^~IPk)kaAs0*net9s zsEbzYMDbXAcjn;f;C@IVxrXnhAIF9KVFL#-6p!7#VL`8W$v6~4B6zFlkVrNkzNTM1uj7_- zM5!HrCS@DM=8wEf{rd96{3=h?PJx6_U>-(WxblS076Se#;ZPd(-{T`HqAU@wr&R9f zov6o7j%ieD^e?>9EaZ|TLc5jAFEp3x$MaaM-9x+Xp=uPR>U5a~#$fhNvEuzM+S+`% zX!ym4W^_UBKWOUgP&#N(cmz@sXizR4%gN?P2V;PJ6F*9;mHK2q`OdIFlPs)2(BHbR zco73I>mZg=GuqTy+^@O>q%x9|cLH2`&2B{6m|-rh3l2y~nP2*C_P|SgF#^=*aw#c< zv=B6KFrhNxqQAibU&W5WB+wyIGWL!w5Mium&nrj(PzeexO7T>KVUXdISm^mEFo`P`kf_uT&Q#6y-D8xhOmg8%RFZm%PAA`@3l4!Vo|ZL6vH;*hQTIQZgYq(1y^)xT_9CTN|7wm@oqtS^wmK zV*$lJfHo)pazSfCfBEwtv}?%WLm!lZHbAftDzFA#3Sh zD~{_)G%*Se(6*XM%^Vm4B8s(h>K`@9_QM<6&p}QHPLrkGLxpz>CSB$sk+L%%b(&@7+V7#PQq#0{*8f|itdbUKQVeYf4cfvZ zHIhGz)x3uURI@Zz`Fl@KhI7y|0W6;7u?EmeTqjjn|3ZH2_~Wi$;iZo zXXAiyr4S&vxL~E_8j=&B__^#;C`Pf?NJLVCS=G8o;57tA@kv?B&R`&t5}c9z78@zu zWP*Ka7CE#WR_`EiR=!c&OM|3j$z(A|NK1WsPqL!I_I{Mei<-zZe5L?d|3ql|3mav^ z5lgGLm^_z@C5B-F27zcPObDB}Dinr6vZO;aYUr}tl+rduHDuDhwYtTc&{e-A{g@0M z7WNWcD%C=ds1nFAG5%{DW``FI5FGimCGA;x?B!4e%gAc`Kn^|CXw%{ZB!{%AYGy%P zLb6`7X;NnmW5E<(SWrTzz$)r^A53Kkym=3#E9u~06Ai#jLo$G@XwWcmv(|0?%)9@J z6A-S~etd%8?ZtLM9s#nvb$|_I)9Z75mLtg8d?)vuy=AkB>LGz}g80c|`$;j&8 zutkk*m>`_Gw1UsP_;-1#vgE{nf5G-%dg(7!u9!9cF3TAlhvNZ3;9G!@v!II-`~zDo zt5kgz%Rc7uHBY3wW8MbEyWs}ihU5+3v@W++IzFNPrl*#_?CB{eFBK43E^#+Q0WH(B zpc>t-u~?$aRl^aObI{l3YgF-`zU*xWxs8Jn$ZaM_Gruq|NPy^9d4_k zf8@-@8%BhYvPtMA;S8(O?!uAOajJ%qa)sXgutXXV8SU#h9Q+}vFie3|ac_o}3K|FP z2`6B0q2XyPtqwtnvp@+3W;bRDLBX-a8=?u+xfNhU*!U}Ee+jYiXN@9#45r(bkxc>B z>fSUNoIDct{gtU1H;oqIPug8T!ggn4iHLV4!TAmw9TxWPaeI4B_vv$HoG3VdwFuq3hW!rMq&Vj05AukRd@RL%bXX3 z7i)O(-=XPaFb~6k&``2}ekDJlvnA@7rfpkY^c+wc6%{3b06Reakvs4Wu{IY2W4GCa z`Yu#W8lfLTUL?+zqE-ohosKg6xATOf-wbW{`uc@|>r3jILE)Yd%_epDO`OX;8aAre z69fn>l2ZgN&H_L5X?~x+ut4xmlacmmpSC=UrH46njda94MNWb=WMI&4ZMUKVh1VgT ze~d}oLIht-92bsA9#fm37(c>+xe0=x0Y?93LF!|H7heCoPN&`CylDNpzn!|4J5_Mj z7_+!TzktzTqrHKst~xov&bRxAQDmP>j09S~^2uUX`ea8O$Yepsx*-*`a{pB?FqA(N z*OEvq%~9N>gsL2}JST}ynkq=v4W+wq@jq*6xC{W~thd?&REKk6Ahim`*~jQ7@a_Y2 zf_lAHy23f-?2hhk{=zw+Eo}J-AtUKhY?v<7{N&*%Rif^b)JmP9zT-bFE5L#n(l6kv zk~`r`ai)O%T3Z_*MWsm(=yqmxF527<l$xFG`AJpY46EZ&! z^D+2!utr2nU`dXcxdMMiEDFdvfD$zcz`#&dtSJZ!TQeg+k`OIQgDgT0fr}6gCCoHf zbrlpv2?1k8pi4$}5C7YPnQ6|B4zTI92>*l<>YY{VSzA-HvjZHrTvFtO5nDcQ3Y@{O1DPCg20A@pMkk{)n%bHQq&jsRV5`9l;XME_qS_FMQ zAMYY+AI%2p{=Hf}7T&l|+1`9+AGKB6h%|*8)r(Y)`!Ta7kSsz18AC3IBJ7yuSOzTKsE_I69s#h1MY2)eb5{$po%h91#SJ2&BXw zt3eIDjF}pgI2sb~A!F(Q5*(Ti3kA!k;@n*ugTg{9IZj?yk%$Rg8(0(&x-71UO%zQM zB?;#rv?e>W38)WEeI&bw>tkzfax1}*Ag<+xRRvIV&ai;dFIj$j0h*WLOP(d^;bkq% zFCvf$L&1C6M;3) zfI15XBmtizg0U~L&w@9hHyDg}i4+w*tRcjK4Q)4FrYXad5HbWBifa7&8hC9;O1cw- z$^4fa*4|Ymz%=)EX6BmACT2)K9GG4}M+u050W#4>dP)JEdHKIVT?WIx6FwVjk}*RK zApSihUY1t}CkE(2`#l|FI>59Bi0ndz?QG3}vQ2b%=m%iO=6HH`CSahf3~F%PXd5yN zwO{*syZ`tk zBy+J*evGn`ObF%}@8@W=U8LD`JH2SGXa07>D1B_w@Ef(2IlwYY&vgSx?hdF|XJB3! zgq6+WKpKK;2>y!Vj0{JS<*kqmq$Z%d&d}PCp`lMS;<7`DLA4Pb1@HF|LL6S{oP8GA zlr_PCnZIOPFJyRv@K^oR9SJ3)LrI3o8Xs$ zjpyy>D1&Ja3YkFH@PIJwZaq;TsT=*F*^4!b#@@(bC!m*38fL}-sX0Yj;rs9OH5g0i z7s%4fgdqah;t3Vj_nHtll~7Xr&ma`TIW#?IT~1ygaT`LD&mh#Ny}{NtP-n6cWh|AG z8*dAGTAkVW|0**l_fyN-u5=(-?9koma#G`oc<$aIKnyc^Awz>w%+5H^v*9m@g z-*yg(_`On-680b<&j>qE_!Ug5u3X25S2$*jrdg0a*o4Axh!dO!CKrV-fPD)7hT7fA zfxLfWq#)&eI3S|;P`qyJU$>fOKIq_=B&9`18SlPK85;SY{YjK?j|=i{}dx{0GyTssa)?s3a-wtr<>!*t}jvCFAa&K|XF01N|7 zx!`vN%m6No*375g*bB@6025o5BRV@quIdA@_$vJpTfx;sgN;MX&D-yq{s)uktxp?A zD@3^2-zi@J{QQiZYUR}=yj@U@+Z%Lyf4|a+oYUl0Wj;L$pUtrWO#CWx>eK9Twnk?B z_8-H(YYRr1K=(_TvryL0(Q0$OqmN~)%F|7A%s)T3R(uu9m2Po$Rhu7=`rC+5`z)uv zV+RI5=4lnL?QnsvQixs7mznvKr4bR=&rvGO8atX0Xy>NqlX!H_xH&p999Z>SD&jSg zzOAcD@1>hfE>|@g)K)*o`BC?}#AY|?t;v=bQ_fmsyJccG?;<-D!^c+U^LyOb>?-Ze z*TTV8AGb7+yO$8lp8j?j&E0{S68-Y($*6v>y?4jg+fpdVTIkV`C8_>r&8i_~CBu=c zJ4z2_6I_s*EWk^c8zoQ38aYiWO(-<apoP1J%exiX9jPO(Vg`y+aDWIC&!{R%0)c?)X}`4D<_l=#fh zbWK^;peafuFaC~An&!msm>m`~b<;YQOz#*ixh(D)lp{mvAI0m!Y9NQOztRzE zcvDy;{Gj0)rQw>iDQ!iGXJXIHMx4eT!5#@YoX52!a}*mQ&5fG*@gutn3BuimCq@Ul zhD7J<${6EzbDmuX(8FvIaF&)My!~_KEr?xWB5k&KC_CtQa1Z` zU&-;AqwWo?rR~w08|8Qt4KS{s&Nte*>%1M`+ExJ}lRbO;y#Ww@TJ^e6_Orv)e9x+W z=BJQg;oK^A@1J5<-?5eU``TJ!y^t zOZLDD%GsU`Aa`q+qt(qX!`|MU7LaMry%}|Le1FAoKaW}(UuIqEkR=#jUbp3>{>EPF zQeAxkrmbe=YGlD|j$zj=OS5IA|8$hGF3Q8g;?fWivxe1bDBO%)?4+g;V+e#k*WQ^jz2lw*6Pdv`pz~QQeGd%V_#3nNwfyDx$IAC!vYy@bfb4s2 zZsMzzC>-eswLo76F*e#LG%}u_*q|D9p$HRlhxubxuVjmH)qPqwPD1?VT5K4n>>jeG zult$f)k)=4UR6BplHYNg)l$t56TqSlP|{&&xMYG7zA{;&rZ7;baG#QX2nU|0;wI0K zH~DCAU`y61R3-^PkAXC$QzLs2;8{&&8KPuEagBzZq7P__WtyOv!^ZYj5{5CRr;~Tw z!YYu`LdaOiRIuRSOvrWg#|m=59^v@q^|~euRRY;EJeDb6i%8uNe(LRJAmo%!?*vh= zKvDv;t?m@4P42yN%bJ^)>hhOaJns|#O(G((pG}tX*4E{QzJPVinVsIlNIk%+=$U ze=7=fUICv(U?Zv1Ypv?S>jLg0fZ$xS*GWsf34|3n-T-ya!Ohe9krw@rwP$rffq zz#@(-d-HyIJs7x7&x`B6!uI^a`^_k5a&UvZ8Z&p$%91;r)o*MO2%WQArqj=iX7&SI zwgh%sGqUa5BjWXI+Khn5ZRHMf_Gem*YMgDdt-GxRzq38;^WFv@nR=Y0joN+kH_EcK zB!O26ydJysL{B!QRm*P2mo7V{I0n{dT9r)wO{CgS9d-6yqZsgeC(Waf7J4J6$H&=C zeRhqC+^m=i-o{n*Y6`YiGwxOD{X%s>1xUwX#h|+PlV`IaKW!9Tee%2tP(j;(h1dcE; zOK4YA@`Tx3Uk*rS&FqepHU)Aj5DJH|Ne=_f)oNPJ)3Zk+MhCcuqNta4&)WBHwWsfV zCCRd{|s*?%jSVpwF3ob2|JfzjS2y^R_qkAp5=ddeDCEc``AP*J}v= zLO*D*vryn7cGcfxDPFqI*?q&WUMAl|)@sI!?a1io=Re#ztN!0SW`c-3PsdqrWJ-0y zYs9ovI}Mm3`7E|RUbEaJ?XWek@~h5!99Mqk+d zskX55r7d=}9{en{X{o!JhqhDZocII0cr%BS}Of*a=tnXmwG2y_Y**T}) zXZr1@8EkIhzXnLplOG{~EsV}H*T}H1`bT@^2}+q=braxN<0rrQ@z;s;@j5YZ1K0E} z1~$~{>8=DU#;LpxH~{o%#mxF$S{ZY5Si)e5LStmxkSBaa{C z)8+^SGV0pLsIBs+dTO<*Ex%PghZ=sXZd*9(7By7e!`gCkw9hSJs?Pdme)?}I(BGEL zt{|$IDRzxr=91ZzsUVp1I9DH=?&7oxTGlFe;#p>;R%C6y(jAkakEX39w~yts7^Vm=Qg_^NaTf zL++Y5qw8Tlqtu6?mbLoY_|l&P1-RA~3bsTOsU8R5DHS4EeOj}aCRu2u%|!P_epo-P zmo(OL(lLSw*ZpOrj4wQ~wvHZUdD8ApWD1?zHY%BXCucr;`qD}@BP|(UMxW2^Laf{T zSL;@_?G08@g5@vlv=|NPNj#A)35W{MqJNz58Ol ztyxE?M8`~+en@8zo2>S~TmUqnPaZ~& zfL`~spv=^L(sp6^oI6$5b$&wpdLL7;(HHPElWBZ6HC5kr5OZ~UZ6D;hwk+KHhq^uR z0(RBdb-C4)11Bu8e^{y^JCcuLc;LIY*Z2NcRN=S$>bmz?P42+s4y<}_Egyw@I_I$mJ8Uw-|~l|zrxg~cWv`#O-g$?joH5NwJ&&4T~9 z`6bc+J%`xXXLGLa6$o(LZR=Jrlk-EXDHdIp)W)`tRJ-#|nL9e~>b6~?vS(sFwtney z4u10A`ZHu#t5>C~Sd9c!Rg*_IudlE6t9ALeA8hxaTtAsALIMl>$u!d;SD2aM-MM(} zhbFzV_8Fc(4mD4vcZD8WyluOfj)V@%nm6_ANkH6btyFu>opsoem+SQ4#(hm|ZwPsLk=8KLwU1Nu!JrUlPMkyz3le6mz*0|2q0xZQ?ig9mZrSvgTg#qE1g>Rj{dw zthwrBo~L09(ZlxRmyYY{&7X#AT|-fRYUes422 z?Hi|(&cDO{EZwcGA!)fdn2~aXXP^qs#d>{ye3f>!>0+AP>aA>geC46_L5@!o>XR*a zEvC~WpPi!FE6x9KQxiPcI8%`B5;i#DWrA$y$+p~;p4s|rxH8#J^jaL9_dl7Sei+@TM27JY zJ}N=5K|!~JzabomEwp%1(1i~#ll(>sZsCfEPpT_Pi?{&es8boofj-uq%8~g=bRQN} zKCgoekW)U`9Al?5xK56j&4Yi>^HY0=EIw*>dcFew%Wc-OKxMF6Z%nK%`|&7TKnd^q zsYf&T{`R(|wYAo;yX}vK%713r(zMQUtL0RNUeMz5m-G1O?YE_pF5*`Hl<0iRpvG^} zSBjdN)(gOq9U!rLIkfW03A_ukSnZrQ&aXGKcwE>zDO26wZ?RI?>o{3+#Ww(K@TS+U0gDvt5t4ck1p%~(7h&*zVv`7^!& z6_Di$N4oB8)A$+|qz=;z?oEQZc!)AYvS`C7S)dO#R3qRx6)79>2>3x7b=-_mkGy*| zSuM!SEGRk$xy9ueoUpg?adaHYGPE; zaj>ahY(t&+esY3Rtzxp`mSO2bF*=@$t>11I?JvA9J4=Eb&}~1!Xa*54ZNE!NORb7_<8X>2lIrv?-j0RrzRZ49PLyP6 zn3`xJl6t2|I!3q)Zs_atWCM?bPy&fA5jK7|snqq0<8+#AKLIV9L!7WIoOTVYaGs+9mWgP~Srofis`e!J5* zpvpc!?Lt1*>DCoE+vL+$XeSE@2;kxX;z25=h1C)Ad{+9EVqPmu;>pnGF18p-bq5Vt zwIMBlL*55kZdCD7t-`>DR&pVCnp9(s-ESGRmiR^t*g>R!xUELhUOhTiPQx37df~F# zlY4@{C!3>7oEZ;uN=K~0w7bgkE=0j0ibY~{(Wbgl+8g4+D@uHoi?h5E@fs5j zkhJ01*kZ`!_)&htMmN`cBH2-z*)H%-K9S#PVi(ict%jKd7sLuBo+UxlY0@AK4uMla z5<%GM&XFL6BOBVY8~;#Q+gOt}c-2%;c%#NB*SJ?&`hg6>$|n9uS5isPs1zbDm=qTd z#{Phhf|5X!B<(Kr5vvKU(O7-&OboCa`wsN!S~UQgF>9jJNTYkSF$@SWo(JS<4MOTv zo5G4YB(vnZxNb^vfLgJZ+~T0nU%w9F7yoO-1;VyD z2drqf$Lp~)53cGNueVhR-n&&0n067?ET}9wEgJ)_fo%W)74Ym?-PKx$WM%L*>y^`a zx`qr`G#c>F*To$%RkE9t%2{Rw3a*<`ADgZr zPGa-fZkQB1y(b;~xP95upM)ssFdD*^faX)#fPa_$e^v{Bw%TO>hGdp6TzM;Ie8PlX zucK$$ExHJKU9XAPoKJ->BM!OFe7%s@eR3w5f0|5htLn57k zGbIWpoXedv)pVEGX3@duL`j3G7L|^ZN*8nvxS0rg=u8aw!yeSNQGL+% zg5(s0uxc9W5CCoU16oL0nF>T&N;VbTmP8WkS3jHy_u-{MSi&!D^3ndlLPL5Oi=tcdLsTFo(Jg^gu@@yZSaxJ;P z-3vWa#ov^%R!YRU>$!jO?Wvlt9R2Q;y>!+Rp`;(J7n?@U-#hU)gumveDJ9)H?rzUlL+S1WPjH49qA3)fD8m5C59qle zbTbRNsc*mFiuK4RlDUzBqV|Qp!iYMJc#j&Nor;aBpbS=e<1L&GVwY>CHz7)MiXjsVc;cKOfrg@%T8y&tzL?A)u3>9I}YLR@!v+AR}plZ%kSO_aEQh~>w zjEErT#ElS#u`EepQ^m@@CaHuBCB@ZTP!W|lKqz^YyfYX;LakKORi`Xc%SJ%Qj)pEF|V8mfeZl>Q<6p#;mg1!3#^|ePmuyv6TAon zYdC}yx#|jA3fS1)Onr4DqgH?O?@l}M`>cB6M7AoX3Rc=cIE#0VPdx&;l|)Pox@IuC zBLXEpylq2?6mqyh=$gE*aHS|04K0Ewv|2aW`b!E6!M>J1b!`KgKkXv7zUtHAB z+?Z$xziW9#0`|Qz7h+RFp8#H&7bh~F(T1f%}?ZdX+)0NuhLq;HvuQg}P zu6q5vX7do}j7JlUJ%+c>HI?D4kn0#ysiO=&YSdOc;FWE}X-57AuBl#`N^rE7FhSe~ zEQU9xxGWu)O#g1;_w|{vO_lY>(U}qJDQ*KBY;GM>wqrG54F1Fa>~k|`ewO|nr+JrV zRzT{yu1Y{-BTyi_B;oS78};xUT>SoBf#1p*_?lbFQEmC3hWE|aJSp2JFAo(i$N26jhaFBYO5E>ts?1ch8Df>8s7@)3qy zv)Km%%_={`?mOszu9<+n&uF`GZiYf^^LE0LG3)ASwkhh{hCsD)%X`aP(BZ!MXfdu; zD`82i)j-0&tPg3vj3%2Jmfd~n6{U=rH_TKf)A~1S8jB?RqQRqWQIf%zh>l8c=noSV zMB)Wj6FQ#5yGI2ZhB0Zu8q9F1{L&z}HBOnKiNL)S`$A4;c-pZvT6fAlKklu&%P;)I zj!U~s#KQcPDj`GQY;aQDf~pORg5n$&JL-GjoMWFa$;KH{!kLhp>$9q&bm7AvZErn> zNutV@&n*ZZ1K?aagoIQ)HkPoN%r2o$eqQp_ZLQ5X(4^M#7PNS(cq#e zH8C*-bfz)c@PTm^`v`FG2X7KoC^ZxN=VKDG?YpRC9Jntu2Y9qJY;{*X)Sk%izskB1 zEeA|lGcyW>-fL-Pq|uN#_}8zV|M=avdkJ@JoK0!* zL5U~R_Us_n^WL>Gzs?vYZU#Jjkl#@L^y5LnUCZe5Y85q3Aa{--8AH6cJlJ@<`(d^_ zNIQ;DIGr&QQ7`~wiv5seiyjitW8y~U>0CI-LZC1v4&MOkLyn;NCn;sEjA zorhG8{b=cO^IN@-ACBG@l6jdSOWbJFmA)XOOzV+#xU-a~Cc{CAfZXNaY>VnUd!3(^ zm|4U_@O+nSnFF4WsWY)Z`MX@4_XV(VKql0CxC}OxX*vses3Ja{m2Ud;(){hMCcWr6 zTF}4n*YRcS{dC4t_4>1U)&>H^CDl=RKM+iJCc1Z3?y|%js7sT`r0Z$nDuW!4#Ng!Y zv;DmF{lp?s!8%-vj>cLoLhOJ)5cfo$`M5R-6F#%?G*SE8!Q&nd;uqqrmwoaN%9=6B z2uQG{*kcUB?#s4gtzEXTgSe#qHL`fe3=*b5FR~M;L;e8>x3}MG!6cE&Av)i*JSih(5tMuFn$H1uV$Q22K;&3jPjvd((O6$U=X7O%x(pom z$C^EijF35;D>wD$YV`LWv=ywjvDhnQ{;j<4mH9lF)?xX&TuJtKWB(XE=KV1wWKHm8 zmdZfGR8Lb{E7wUbI_$?vvirtfs+r%gRJF}d-{v$ck7D(M0Jh-a z;jzY5i7BxxJ@+2~{Lm99y!d=s`Sq**|3yobK&dlJDRYf`gyDez(?bw|NjBP@f%wwe z4CK8*(z4{U77M++SI@k+m`L(!$cPF=~+b-6d>(NfqAkb~i zA*C|{?@zlE`A(wli1}|0q?t)ELXlfiFywAf>z*gzh;%c2q$dxiIxbHSfe-_q4M9|} z+SCtl@ObjeWh!c+`yYZ?zokki8v$*{Uz_<1)(;No}v(JJ6_k^j-~(t3=^Q8!~sla6vbl_dN`V8@oYrXVU9V_u60K7wly&G}Vw`&**T zwYazF4SFR;`_IR{LZQ3iKWbU{wKi@`_~~)DzGvBj#J~A&4r*0M=BCh8w7f4DsO!70 zKB71A;``j3DhplYv7<>1-n8`p33)ZTNilwRS|#f9*?)^H^sRW8Wj@ymO$od3t3#0U z`uY5uK-sYIZ;N(;{~+0zt8CJR{nx49J>&PiE!90KC#HV)SRAjosxs%mS6F3KEYA#2 z{5?paMkhHz@3D)5j!W_j|IMpdD1ykfWC|KH90|nn^>34Z?@ob^bXB>e{g7( zP9y*Ex^tH2a!cVWP?hPue)|uzQ9Bw*xJr_Xc0qe>t_lqSY!HlUj5| zer5<%ajo65x&Oq@Cwusr+gqBOXf?)tz17w9hYu6}LVb=KT-`)9f8J{fykPVZy4$O7 zJ}d)xjo>ki34rFd8F@Sa(WGm=bpggz><_uFJGGjzub+gj%-!;~vT?MH1Ma#!POGGC z@hf1c+Hiw`g#Z9*mwTnw&9phMS6oFMtJlS2mluTgoZ`2QS+gzGr;EszC;kst?-(43 z7qt&2nRw!fZBK05PCB-2JDJ!vCQdrm#I`54t%aOalKKI;%2bzoa zplqzT!F+u8QmxaXKB1m#?>1J2U7?LFzlzkD2>~52nBjCod08uc zdUn=;h+Q@gN^i~LS@mMEUi&$UX*kGv&yDocqNqMwc3YjRGSfh)w98uS;MJ;XRhhO` zgj6f#%h%O%vj4lajAYy>rGTa)P*#mQ;9{|Zw&OZ9=Fv0nA)WN>VJ=CknNClvJ_2d> z?B%b-eI&Jmh=@Qc2?BJQuAMq*yPeU`G zt5gC5{{D&;fCx4)rr7)g3=@AaWY3ZqBr7;@Y6~?cNL2}_DetRWvaHXCe1U*Z13zzz z3M)3pk3S}^ZPxu-&EQ2&7_?y6%sNyV6{FRjP>5EeH$`$fO?MAeEdh4>lDDvWhr)s4 zMGDJ8maxI!<+^*Vjz1qwAr#+kZX$8EX46T33+7Le^GVg=@-)tLHPl-s8Bd)5emNX4blD@yiwVg(%zMeao$f zRiWd`M;INOL7G#K7PIk1t)i}|D{`sx?*4(uTO*Hbd!}6E#C+}odJ^p-0amI53?;Ic z5i$ig%ZHw%Iax2yJv?q^j2Tmh`Aw@}0cL&yHO%|E|M^fYXwfs-eY<_1(~Oz#e! zhW7pY4jiH$uqqY7XW)=Y<&g$2KsGk8G-AsvDsS2#%Yi$0O`}vix zU~My_xK1%TNcn+Rz+=Hv{*CnyOuu+C4C}JXOqBe$xu+|?=Tl`0 zQY5#Cv{Lbo#|wVvQ?#X^z}rQa{Fi9to~KKyBc(AARq{i~+@bCK=6jFprloQ-zLOd^ zzA-AuU$Rf-w-|$&>(<@)K*yZ-Q_uTAs72>)vAGBsIvdZ-GuWDfp~vdz$qAkQpFMV} zp8H{#JcZ5!BEb}+r4kiN3MnbNb8{LV zT-6Uk-mfaqVh7YIgXE;c7h=`^L!-XWgEWks566LUa`=Bh-Y}r?>VJSR|2pm%&>{#C z2?=!Y;K1sOP-2>5yQFnJx+CUOb+>GNTc~6rRy=Y%#lfs+EprYH*-HP@oM~Q^*wi_ z_3^F8x5j5h9?GE5hU&iZ3Soi}r#x&QlG*XmFP06HwVHu-Tw;y`M_sYhkbw{}OWCIz z-}>4yk;VM!y?Sf1p4IU7{f+Q6z%d}QWIoTCMPqwI@-uHHHz2J7GST)sftTpjFAawe zf~@p%m7UBe*zg%X6SJ`r;|dTA5xLQ_QtW8k6Zx>7%;bLW3@pIBYd`zYr6j$Y|Wn8pP7YL^vUy1ohp|sglHxYFOCN z#Y)9$>@@$kBHq09rqdpWt<|*@!L1P7DsmaBeCN4oJ>Dn83)LKDc%yn}4W|Q#?k2Mj zgU>PPiFn7h@+MkqE8U(0_E@gVykq|N1BD)|w%Ic?s@x3Ov?E2HPl}?$efzF+aCsHcv-_m4 z)_Z8i;~W?3@CXrwGCElMSMmJWm$V~SC+gexIU55PIa!pfRPaQK@hX_RtdUDk&#RFLMy_2D>y=~czYt=slL%j~;sMzR&S|4<}@YapQFkzp3Qnf?zY_Z#pJXphpxyy&&0ZKUviKSO9! z&C2@gJS1cwuDLSnN5F3o-vSP9{xGyS&Xrql)j-?7_^HgI%UjE)vNkxGhfph}+kI+% zsG^p~$>9_+;J1ga$EF+EfZ4Kqs!YMYR7~dGwPw?as3IHr-Oq}E`-!je-|>t$a0VZbWOqt2tHcE^a|txEi8#~@!ej$yAz+P)zoF7lf|@Eza%~Q z7N1Nh_vuqZ{qhL;8m3+<@4i~^*cm&%o&x?}_q;DoNXk-*Pnp%fPrv2ApZ<*Rx*w3F zzJyywofCrDxl?{K~z$@^H4;?T-1OiJQgAtM2=h1hkG7D6Z4e>w%oUjV z2=U0li1jSB7V?<8qr{w)nHSGu^S;LJwpZGkE@59z;&v zN70|}CRhIZkw0-ZmJQl=3In#3lSnt&w0BBVzMj6WOAI#ZLZz%*!STb7DAYCnhtHu4 z;y2lC9*oSMXffJ8HtqAJ*vT9*g;|O8^O?jasuuzNhuSg8%1Ca5TO{Y=(tZ8a_;j?J z1~_cHdh;`hS*^CO?W(MWfQL^|Xs7i?M^zH~OC3B+8LP~uJhSMDkUEU-(se8*C+Eyh z^`slwHkO=&7jKNVU!QCWxS%qX0tC?#F$+D64~X!g{y&`fGFL4A)(8QM|tD+ebJ7hubY{yjEAkTicBP zk#xp@Vl)I3ZLsE)EA9-B&-_vH>}2L1wNdd>p6{d)W1Hn2)NxLe^~m3hmyY%43+UQSBeuIKtJ^w0Hkk+)SPiEtcIkdfo)zrkUJ)S|3& zSSQIzf9;lAion6+67dOZlA&M+p`;&P1Sp%}!6gDT$%J8@d!1Eb$5F!Sq7cjjUJf-n zcLC|0_jAelKL1d^UOcusffFV7q}Q{GzEG#u&k5Fk{mM4mchfEU?$e1jw_9cwwcJk! zi(e1?4&2@!ypp1aI;>g#^M;1`o4=-{&7^#bFKd4sZS9T%>(|}aIkF;O`#^cl5@Xv%D_wrT zrz*^TBm zV&w<& z0F7j^2LBaK&u3VAO>RRBEtph8_Md?L8ki?^EneC^&nGY?Y($*cn)dfq#`b3%#1y!& z5xd`bpVxx!tTHZ=1`s``6;ah;x>dN-l$Gw_@*9jbO~SFdp||H@y4Kf~fW3Wk#m9-Pkk+`d;vM zx94?3V`qhI1l?rALzYY$$<R}kOL0Ti{S8HBERe7ZK$u=GQqrhTs;`D4?d&U?o1 zN$@LEk_wJp_q73w|9nq2flKo{R-%*?J&+c=IW{IYk%cw}g8~A3JOY6-BFTL{wxr}G zm80{txt7bAr@m+nmp!TG%6TGm6-Xs5+1I{dISKiVnjj@7K&7L<9=`%;2pdDlCRcJ} zEKw*#E*37Bl%JQP~$#Qd!f=q}&vPO;B9#be1UIT!$Bnp|w;Z1+(7{S^TwhIGQieZr>jQ z_XEky-*=Vh+nSt21xWS;NnTnrbX$3lDU)=T+f0<(66gb7C2-)X^s7fgjU z={d~VlJ>6~#!Y@bY+NKnnq>OSqZm_ZFL7}%eLof>Ks|^`dgGSzi(!yKgPtM=%fl{lEP`p@&`wWE#aHN(BZzRkHH}X zqrskyK`Nx!LG4GP!vn&|r<9Ei3tj5}Fe}fcjae*yUR}NJ|5lKPZwYYP-L=Sfky|H8 zr7qG>bMG_&4M3t9c`g~hwvn&I;VL0s=aGF+_;n1@u&INFN`k(FgU<+jML_@Zz2KUm z1pqkbmr9QFpM3&uBPVCeA002>h}PI|%fMR?<6y8##-uw03s-l(K3xnvpq|dG$J?w; zyWC31nPvT|=K1iX#6#1_rjj1ZlgN*il^&Y*KP4ZpHm`mOTwiZgdu+71fD2G3Nz@+M zvnSY ztQFXMWvT^lpa=(Lbta?#LY-3X`DtoiCXTJ-GOPsZ?r-<0+vYJktH$Q-RJcR;i^)uf z>)j#a)@wRZ8wpPXM|5J2Yy@ee=>2b49!-Lm5JeUPE9mbZ5=*zvds(rJU8j$V0WWt% zJtt6%0=rG=PTkZ-Wd5%36RL>d(1p?|xJrt+ks-|2{^+;#KdldfRGd+Y+4}~T5R67> zj+k7)M)@&_I00 zF;kHY6N#9{g{z|!;bRL_feMj|Xe5TKj2@Kv3o))F#45^UY{v#KR)^-ZGN(yFi<2iu zuC5ZAWz`5fiOoqp4}EyB1LG*q_jp}2c#enoXl^84L>z7u`6suu3QF1eU!p&t$(E7N z^j0+ouAa8X_YUhBWcJn9tNd3h@r&=I2KTNqPOD`NpYvBljIy&+O_T_k3^wZ0P0eTb zjQ>sL_Vc5t$ENC~XZe|Xhph6`%oL_L8Q@`qIuUaHK884KYysVdqlCG66MpakOi1n1T&h#H>3`5xItacXS5jY9(OMoh`&)EvXc!9FTJidUoi7h$as0$@%YCKIE$9eIm3|GZ9uiX-_uKg!#Np#Gn3Utg07aBJ!fRoj%}bF z5E(Tx0pvR#Y2|lX&5V5R;>mu`<+Zr#*_q?7_VdfR2WJiI6 z`W1qqf<415&K7y8IsHSgsUFxxNn9Ze7b6a@xwq!y?hXr&3J0TXK`a8MgVx+HRs+A- zOX0@GYX8t_>|d9CYi@^WL$1DxEGci2gzeW*JY80iyZPkUC?!`r{!wm}f?;;J%BAv6 zh^8Uf)b=+PHhT~pobuFZ470zH6XV-BO_jBRF#ryvwF5_*_zL!Y5XgoER^N;37#Mw_ z3=<`zLT5}a8mvS^wUw3SduXjr9{^UR)ZZtm)))1ChL2ezAQ?Oj6NRw}JR$Aq7FBPPp@qtw?-&cXh7cVjx$i}1We&CJOH z5G8F`8R776>+b03{q&;WQZo_f&@N;5pQJ9GU5n#VX4$s)9RcMqht=MMZUH1R25As3 zqP(USZs~=Ne1bN+cnCZ(LW4CmfC4P4b>q9>(jxjq1uL8X7(6vtA=Cb0bVEja77$gO z7e(QxaDr2yn&A&psNYrWQ4|upt3w&z)NRTE#!#fuoT)22Q%Y^wC0^qPy-HFsOSq)W zVqu$>?lx_K~VI))5J(0 zu8pdpSz=Z@$s06xaU)0eU3bb?oQNN1AH0p>0`Dd6K2dG(_aL$-pBlQfky;38a3NZP zQ)))a0yf*2s>DYQxrZqqLXiPUqm<>wZVcLZh3YS8eDpecp+Ib7&tB{TQ&ZKI6tZAR zn_)!Bfr5pqsXj2QT(2~`bDW{A17VYxQoG$jWFK^(mEG|o{f}OCv^vca*eKdw&A5Z! z>yVJ%5FTfBaRVPSiiI_)#h)f5C=Nrjsdr2O1dT8okC8KC4%)Ilg4fG!LI z>6#YwAG4lqsfHKj)Wg%Q51-atbT8SnwQ{!!RTwFemny>}H zw<&iUd-s)Oa=Hr|&1tU%eZ+=;iFiwPv`5;pCNN#4%p5XGhyZS;IS0R`o{ zCJ`=-Ui9Zn!ROu@>qBGiv!@Ru4z1-mPLO)6h{_ax_LRJb?Qvzi#wpbl(YiJRfK`!Wy7pPDSP{b0Qh_mW_aZ()<^&0)e znl{uBxH-*eM%zLl$&%HQ=RrLb5%t7c?66wQ+IeP4iBVcjb$*QRRX#if0vOb&5P;en z%9dcNj^oT$%f)~R-q_YSCo0~Mt7xmdt}b#~gkV^DI7m3HaMn@@M-1SgR=70V+iq&r z9hKy^ZqxPqZ2OMafqNvBspMOO#WH&*dxzxQq$pz9nep=O#DAI%|hS~}eLgAl=0K^Iy(7*Jx=tm%0A+q7z!B$+9np*9;peW=3UPC4sAhj`rN4odvE5h=dJ}X5>4LHux}) z>0-*SqAkUGd{D2kffBTcHRnbQi&phI&Lil^KDzFPucWFDcu5b~l4MQ`Eg*--BrkRy zA&7_iUE}95PBIP5t?7cCx-H}vEQ{^@LGjE(KFl>VHnXB8ppM-2sxmF;G<9xp9VU@R zp*&Rl7Oujk7ht-gWIwd*woaO+&FXi-+OliUT4Z!vc);6O`5<|n4jWPx=GY%zj8<*-;@0z97F83BEg$#}HBS+THlA@Vw z)mAB(g@7};lsit$>o5IK`L{lSt!@L)Rl$Bg-yd1;68(K4C#nin1};4*f;<|Pm9nu= zrV1$GN7_6*u9RBN6~s!s5XcfkGJzW#*axJXPmp3mAAvCjIy zAP*r>Xo@(_Vxhmeq3%;@R>Mt+9@-E89}$0DKu+F0ea2$fjD(je{x_4w z)wbRUyNnd{yNQug;BkWM^3Sij0^E2jFpp_UC01Y?nG8d#5)f@6Yz*NjAyrxV$XG(q zmAFmJw!K3rC`Hu&XZ z7WuYU9Twg=wu)WWQJYmwC~+m39UCTw$#5j9SgJ0j<#P>#`{${p#jp*u-`(oWHYyyM z2#ty}t(GJl(bqw)T)2+G;s>{EQM#UAX|pmoLFFz}VJ7i9x5Yni^wG`W!ln%IRgWPI2^LE#zu@Mhjo|B(>0k3fWkGmY+c9)ENO9qY3i|v01nKyM(pEzoM5_+A~ z&-fk@H3r}IR~h7T-gPD`lXu?nUrpQ&RO<8gxwf4!`ae2R5%IF7BCYc+<1)Oql~|X{ zA<3c6mMXy`uq)xwz?GWoG^RR5H%mw%;!u=VOZ)2*+Hd70f>!TOQXeUQ5uWmK$@vw&(@Pl2L_V~r<}pJ6=k{) z7i^E~IXInmza?@;mjIb>Rjt)8^ugNhYX$Gy+UZ$w*vrR@0}gi2LA3D^4;gC-!@~9FeZ?rVL0Ec;0nv1;IucH+^{ngNTsQ z(t6dA1xm+XUTk;L=^^5XNKwwf!?2`E#>QY@8-Y#q>k? z`Myk058zfdkDA#2IKsbg3C5a>qx>_fe>pGns{6d=%GV;^C|D+`m}b8ZhUk5vOwe!B zE#Thg>h;d!<8#}0H`)MNNGOTYOv}Bjw6ivOkOGGk0_^HT&BFR?;}hkZkXSL8;LXp0 zY>w)+o`5gUt7^@95it;g_X$bTAKyQ@M5`lH@Q;Plx}cvPhAjf~2#Q9vwpsq@d9IYL zIoD}sS)4>sd(AIotu(*uUtf1yFh>oOAt=Tvr}hm248EUt`HD%!RkY?=x!JszP4k^k ziYuM}pCQHnl)a*HWq`&%AzX0?%u6^L;jlUj!m-kr-p$f3=PqF2Y06af3T7)q9$SxD zbXTqCiK{S2j}PuLXe0UoG75&^u=Jx~EF9731Z8gx{nV3Or{?%?vJo+(^>*dtQIQO6 zmLtsb;rWh-^+X>l>DKzQcWG>mq26%AObfA{9vs!@mh63a zG^96Yp=XnOt@2!_D-YZLj{3HR^&cRDhlNr5-YQjzU84(Jz&jREMI$?>-yO*x-fW}K zlAaf64j@ESUJQ89Z)c`4XnLl7NKz<>X2|BL2F8tU&2x9e5@ARwLv#tlpU{CpxMbQV z8#rY|jfV>+h(Up*L9zVfc0x(5nTlJlD>r3%x+QkEpK0qv&%f}xnVPiZ|CoC^Poj@O z6q-Wf_vKYv05cJXCHz(y$o>}#PykI_Zqz_J%gnN~suS0G8TUqrmVJ!ZeLs~e_%$r( zJ;12%`tBroxz<%@^Yts>BT>hLj}MF8E<@OSe1efrP+3!TtfATpodrTtC=;F2|FM!9 zS3}^csfeRy%m1oqz597e64PadTb2HA$9sP=4`%e;o4Tuo0`E=89?`#flwV67Snos| z*O8oEXKfaQEv0O$f_`?#RNcPY)Bdj~d$v)V+7Ar8Ul0D|j`Uh=S@aBECfb8zEre?n zcv;Y+mP5ya4~y{uXB_9H)^K?Qqezeh3JBB?Q6X$J)h0(Wx?qqYW*+jGq~PD_B+bp{ z%+{sE)xgjT#0*ck1aRR|BIj2Oy%%TnZZ-0|V8#~yvQnr91gwj0KUWKWwPES4NRIayqb;K!8x5JaL?qnyT=y zkXWHo!i^BQ3KI=!Rap~h6N==(YvzcYp6k}>*S0+J!fc|AZO7ri7vZZRjXbx>;RopG z_=~l>!^`cLrLBl!YE#e>`DKDpXY&u@5zl?_r9>Et7+?{IUZ@}>mfC>q$c{FHj-8mg zSbkKf(2F2TDUz@7l=gqzs~(Fy1!wY1Sc2FNu6^g%nmTrf8Jh2BO}AEADJ)8>%WSfl z|E`7=_56ot{U49scsb+4kQsN;_XZtV%9A71vr3sWoJ5(0ahSt!O()uA@HUkl66BkU zAVrA93VX}p*Fsx+=X5eRS?%mWvqWVZ^Dvk5}(xrSl{=tQt!Clp9`1aX3J zCKJ4IQ34TpY;c_6;czLunBt?(S?%j;{o3bSdC=J|xBckx(V`>x8b46s zYJY4JzuDnZbAV)l_{;GXPSTG1%@i9&_*W8yl1j|HVx6s*G(t68rFf1leXm;T-C*8`E`IdJm0)L7Nqq`3lGn>F(4{P+C z2yWcOp_ck`Nc% z0O@U9)v~*%csc!VD-GU;YcfyWJT{)B$hR7)wA!|kF6`#IJvxnSve{4FkFk!Xx19HI zbpCynWna;+(LGIKw(uQnKT>@)b+7LfzUBITNP4^y+rYElAL_PP=6GyXXfsg!dC6Mt zKAHJEiwm{pu;O@{{`oN?_`b4RIhDn3^XOOh^mnu9L$q>3@y=K+Tw)D2MunRM7ivuB zatKYF6$hm2j|;}^&X&Yy=V=+@DXz7+`DfH|%;*FRV%&Q{3U|A@k=zJ7kaV;Cgnovtj^EVy*Q|H9plA}M57+xsAO=4%mv zU0g54N{bPuL(QVJR#-3@Z{Ebu^q>t4hq&rfM;%FKvoHRNnMK*{4j!jBx(}WA? zmk44yn{wr$aJ;P4_q`*yT2&1tQL&o#J?!g~|GLZvO`H|BR=XTDv|{<>-4I>cj+3IN z+y1#7vy&8k+Ey;EVxbmNM>^r2V=qD#7%GV$)AVQQBWj=%HXazq2n3Qjfd!KT^jhZL ztsI>)nRdZIu;FbG1*K&@H#^%0cnkrytm`rr%enRZ&!gw9{k#Tr2C8^|OcXqxWnZhZ zX#LGkqPmCtoP_yug6_N-#;a{R$^tPgw%yn51d|0{9g>KSw?FbE;75nQom2zDt~!;G>Zk>hp-$CFbAZ1G>5tmA}b);dd#=jUk(0YZr4;)Gj|{hrLp zaB|@c{01Gk^jm6IGCDQ{Ub6fC@{#st3!E`v778RYj20TIz{>m9736~J%vKMZ)C0@=N_vilHrwMgNhRcc># zJr$IckTe=C;6hh@sKyJv$A8|r;i+37^~*qt$`qS`^P}`fj@W0MqSHJ1yZ?J!zruYy zw^jmK*)bp;@z_82=)D0`2_&w;vc&s(mka`Cog}OEbNtFTrGV{QZ62S~ZywuCA2oVD zkC73q#s02)tZ}dNPhH>9<_E=Y7&~sqALtt01!4yDrgYsex1O3EHog+${g3dry$#)s zcFSXJ@Z5Jyh3e4^HvP~2P#OYBfMK$@uZ!;{fAs6Lzg-JuH9mEx5krhvQnG1#(-#!X z7JMUKkje_|^B{`~8EJ2f`w;{-bXN0{jr^XT%u#)LXS_;7JS7JR=h$7ePfin6cr zw#aq9U}yAIm=9vdoHCDKHX5`ZRUvs)Jy-pX@eF2zX5j)D|M_m2pdEyeQ=NkfJC3_V zK8+DY)vfqhX6Wzl52AU^&(DjAiGg^l?Vqpb`DfA}=PM0n-%;&AK;8u0GpeI7JbUcg zA&PX6VY*-!=pz9QI9s6GD8>d7=@1nBYRow5K}E{DygDAnbMOUPc$~0w8-t+HA8!F{ zf`WouR)T$X$$GC>IikDp?`&qyXOw*M++qxGpiI$olw~ulec-9CxNAx&$0g>;GnP0J z{EZ;tMQWrUCw{#g)u&5Rr}|{0T+s7*l9i+LWx2+;?~X%`(}OM9=WAl!;IZAn%N>BI z=9k5DY^w+8+Cme?>5qW!D|f&Gl%Yb)#%mH*q=cEZ_1z1Vs9!!kw|3AH`EN4vKP_*2 zI$_%~UhvoYOttmgW*zh26{o&EZATyCj5naS@BzSMc{B2%oymto%l4C$A%tmmhSKv| zfSCse=byfb7&|%g?pG~Dgh&ZgqXZhipu)9*`s^8vFo&auv%P4*U0rh%_6$kABda1# zUh1TjK!uvC)_+*FzMtF!pPh}bWt8#O-|l z8bV;fCQVRfNv;P_Pjmb&1~*W@aM+Fs3A3*#068si`=70$kG5R z5GsMw^JX(6D5T6k5=lrI)y?iAM|7XkD%IT^Ly91YAUc}NJ4fG&ozXi#xRTM{+M>-dU~sM4|eGE#Rr8hW|5y?^!84 z@o>e|f0=h{&$VVT!s{&3_OFD;&~W1M^yH+&*OKgvz@=sNk;$Ti+3CY_PfwLKw6pA` zc@l+FsTtn1&d;AhQAUwGJZ251>#?NlPVG?9;1j(Na@y(?eR~~yS-8~SAk|sGv-#Mp&e%~V?xB!EqDbo@ zv&R(w<5aaK*Hw2rGDHB2B1A|EJfTp(!%0^~{22~7ipWHd07sLFGm{XANi*gj#ZKF- z+nz}__1`|BNY8uj7JT1yneivQ~TBkz`V`A1}q&yC#YiSKcbj=}rbZ8^{FHU?4Z zXQ~+@rK}}6B|CEr=6K&{`vtQhEc`7nth`UC3RUu>WHX}6xpg$q3J-iOzYd@AV z?qTHD6@zI|wX``eoq{f-`M1B7Pw(A7 zKPSIPQoZ^1s}2=BLii@VaJoN-AMN62HD)pGqL_ld3I?(mt^xvIAyXv;D6U*UEA>b( z9(Ef#O=O8*)JO{v1oX)Z#@-Jbb4(m86{n&^pQDb;VFm%aOv^(%bKxGFADl$qJ2ehD zs3ECQz+mQ@dHd&fS2n{G=+qLUT?ha3b^o{AtJeW?etY{B3OibMHP*R$v;*pOCe9v< zrsq+ZuQMB+fD-1-azyj=*N)GFp0Cpukp0S35gos$49-5o-Uu9BAmN_492nR)c>wdH zDI2ysTK99#!-#|L%hz3^4z#;zqpi~HS?j;|`7(9Ru3DdS_|H(f>8ibyT^0{kFMOV& ziY8(yRQ#cN&gy_6v#?;U3`{#VGkTs)rRdrAb9z+&eJqBTMGgHItbh+>eO70vJ=Wa) zK1bIBqcVM`nCN0yr~_L5zee1&^u<8-%~Y#@NO+-S2H?g)nRrH-B)}F~>Fg zDO1P6&eU=-tVb85C97 zWGj5_DsFks_;omRKm0537|z5Fe%e&`Upbmrg6L> zD%18o_1e6P_kY>``Vf4Sh#Fj6Tlv&;L>MAj z0vk z&18rI^!--wAJ&&lxdHt=y8cnknXvip_wA6^Mh7{W4X0MwO9n1u=Ir7F_I%C*u4lhi z+?W1pjv&(Av7EJq?=Q6UobdAIKU6Tsd@Bgd(lZ%KNvR>j2 zgW@mYP&(iFg8uKQ#*_*?%xoprCIDui1eAM@0fX<^N$8K{UnW)$-fherj~wZ!?9jeZQ7I@!9?KWm9R&1_^+U$-nqT3B|VEAChf&m52cYf{;hCrI5gFAismXdMw>NzKZnykhwU~GrsZ5~ zip}Y$)!kR0D=%9mB7#FT784t|{{1LgSMHd0#t%NOOu}G@MJdI-~X<<567MM!k=B}V-ZM;Lt#}MnA%UO_8Ix!biZp7-xSTk zIE9gNa^XFCdsWtxeCMbQ_^QT&ky7FkWXF|GD@b*wf0KZ@_FG^-Rux~%h~9URieGk* zEm4Z4H(SC40T(BdN8DV_gNAm#<>Sy^?`z^gp+Q-RZ$$f#Ngmsh$jJb zQHySumnk*>$OqHNai2RZ%Og}_BMp|c_YlTSgN=%w;d!{QD4vD_$Y;d`k{dzA>|zJY zt5n-KhSbJ3+lVhsqi1KG8+cA9vfuO@E{rxz*75E*o;JIt^*fR)+id~BziVca#6(4? zCu>6Qqk)tdQ_ESjJwvfG_Kv=@;gts? znU1rR;5L(i6AO&(GrvjKcP=PVzc&2FE`>fU0sCY0Wk9wDk!>90 zMCkSCnGM%5FDg&r>4J4okaYopPYz@nAi2k&0TB2%0pR6**+#LLu(YcA%iy zVtLzpXl-8evH!_LuHv@xMsu0Z=a7ZGq9Si{+ArchJe@=uKvCLF(w##0`#V`JU%+XL zf!my1>x%v-I$jSD1&Z4X4+ql2=>46IaAsc7Umr4BR!||Dy zKxN~e=GCvrMwtl)cbP(%qY82uLcxP(DKO$t67^E29jAgd8Cw_Nq8spB&jRY)s91q* z33CH*c~oOpNKq@%RQFR9%0^Uc5r|uvQ$o!}8y%&=&M;=nA4RHM;8jUO(q2U(qCPLy zi0^gOv|c)?0-KmDXIJy5Xlk)v3*DNDNUEN>%0x?rLMEw(ZR6f;N;np%+zeD-tiS~b z)X}2=P^lAx0a|JR42dGMd4?n!%RMpW1mLT-isdqUX_9+OhLHk*eMY{qK)!6rdR8el z3~i)bKrG6Vvq?(6!>F3J0(Wue8<@E|*fb5QR+%lSXb_t;{Ypd?{cUh0fE^eip-gR~ zlx<2m^II4e9Cn8`D}uC8llGSe8!orF4Y~W+VM4_M+;7%-Vi~~Q0<|)NGRI3_9DLYHFL99Bli#kmKjD?R+c&xUQJilV9iO*cy+dn-pE;0DMuBhbit{1to-5z zTD`_?yFCUSj+s8BEbJ+)e<+K%6*1iW;Jha1s^nFQ7^m zmaR+jACm*@UYSg0b@s6&b8CDx>wN4oFVRBU)xkA6w_zN4+dP`WQeZ{7h-id$6}(zv z6RverVmXGqS~jqlUU|_BsHE;5UWnNS0^Z+L?+|H1y!8D2=# zGXxLdwpkbUEx0~sGeC8)?~Kc^;3mjQU>~OEAW7E$ciUF{SgZcSP4&HC&Ise*C!*(P z@_2%eVJa`%*d|cz{OkMcw-G+i1;M{+_de5;`_<{g_P;gz;o4J;N4Ef z6^`}WtcCP~$9z%-*cff((s1AP4#qbN9#^8(? z5JO6-{j!nK)Z&0dH2vR~p|t8&Z7ola_WM(x(#AeV_=NsrtE=_=c{e?tjoa}JDodGy z$2r^fGQ7JOv^aqF)cMBB&4rviZ3h)8hM!ct&G545e|<6+XibbV3MGt1}QvU>|3TGsbxCveUgj6lvp;&aFzVN(=-?Woe* zU%K;ZS4#ibW@|}IjBiVP$YDdylH=sZbu$ZxvRbj*{$!t;XChf4V@!tRRo67VaL+>j zJydxtr&$4A$I{t{t8z^DI*!IPHydAa-k3EoX*M~VDm$7m>Gb@3<%N_Y%6%64^pBz1 zlkxLCT(IlTV*4E=n?;Z=({^~?rjNpOOfZi6u)X^w+x4W7 zErckE8lncH4MOxS#%P0Wh+d)-EoSsKlBiKf8(owcM2i+J%GSH+H3*}(=s|GS-rslj zcg}U4UuXSX@4KG$mS;Wdxu3PJ`^Ftpd|AtIx!5K3Y!2}IwX)fTUP5c2x1U}GH=obC zq`Nbx@AG$|ZjDRr&#X$>2ER2mb!Ay!k(;_&YFu`{SZ_wVUakPYbWa*)U34+l4N^q1 z+*{NRf1u?yw%12b!O&={S~1Z-pz?LVbi>goAF&$Y-9t#8?I*}b5v(81$?@gi=!E7jVmaVXTe`N*Tw|fSY2+f*O ztb^tyu(8EB-l*4r_Hrr`;#?>kE8i|lLfciBTA|QhR<^{$@UWC{?jg(`u>j&ZeU=0V zHI+T$s6Z|K$;qMVE`DqN3)(Ned6ym^uiaFar&I7Cc%)p&>Op<9aHmAfWAG#=GW?Lr zI{Zbn;4X;jxks3CVMIolGNy*(8YPWC)X%z}<_ysId?+y@qFF~+NTB0W%*{)38cVn& z9j)$4BkT}k8dL@YD3JTKriM+f0rFu?$IoyXT|BhvO6VTv9krRYu-!Rq5%Ng7eIq&&1c zot+&!le%fg(z3F>y*=894)Z&jf0XDlMuq6ScBnQy=qxAY*x#fu=PI}CeIGyUv3ZqX zFUl}rBa#GO?@3TOL(;KR{|3F55^}P|kQ?~kt(o09K6Ymu04l7!i6d({K8?&7Tj~eb zfJk&*PTM%|1Gn$NIAciRooG_&JzeeYF$3h#;Aho;*qEP`P5jpQZnHyB#c6@>|6J`Z;U zu>$!6DM}L^5-Q??ll<4f3RN2dDhm79K;#=VU^-b>AsIBY;?e+0U%xY8r%hRhL@dIVQw(M&fDvDpqS~ro&0?`^t7TZO`N9yV_1)mNGH)FxvcOhd$T~ifsRg7K z-$Txhu!29hr*ajG76)ulRx>v0p>{uhs>C1B+KHtdFxW&&NK5l!`%;ivjixma8A-`F zX%@ASgzyFD8f!zikz+)M1-f2cW`V2Qr}T`bh44ru_zkgj;1n@C>0*a}3PWt?5?@72=Vta4wjAzd9;6(G=uDd<2RkqNa}bCc zqym-K(fI%`xH(`LU)FPqC8ele?JxG*6Y$?zEIys{-5oJ9+m*gPcA49f2#(KO{;4K8 z23I|uf`Cug|NJ%@KVEbFbMk6gD^$+wqukF%mM>e->#-t=gZ%k{+{F$3XN;K-Q@O2h z29zS@vNAF&EyjJvNk_xTA2aHb^-bxdw}fZB$PYUbKCWHz-w;R}qs*1bqf%DK=@#{n z5}Uvw2HZ5{+$=!gxTr4qqO5F|HWkh_EzTflG*F_$dw{?Oyt}Hc znRP0BJVW?6{_3+X!#DjnMc!&0Lgn+uUTB;t4uSc^Sra|Jb!<^yTT^4ZRh_76eNd^? zwXJrvQazBVm zHnT9~rm@267Cnf1-rFuGEjY%LOc4Mw^pEdBQ6^0cU9lo7dnuR+wOZ z{YBa)?fv$Xkptiv3H_Gtw4jpKid3t~pdMX|!qEFB1N>0y$2)@lmI+};*$jJ&$RNWK zt|3=%Y203j*bl0F?|xk28i$-+XTt_(p83Q_J<-^T=}-CQV;tTu7qf+NEjK4nTxB|K zBD<G=^C+n%b?Qzv_H$v(eW{;po@l zkf&sRS67$(ePa2ktz+p)OWgC4aQkC*pdPuRchJ|@wC9`YiS$|WHm#Yow3#$%-! z!Ipk#n950Y-rd^9Mw#utc?@H_^6_93TW2)2@9;+*-r>pC!btbT-keA=hKH>l4g8~2 ziaoR!md%D;0(XuTm0B(HIp3=X(MavBuC97T2~TuaWpF%l)V$#k_eSkf1cOl`9RkOv6^?Lpv#F7z@uAzFZsDKUN<4e^DP|bM zUd>qrcH|<#n81sPpkVy(cb}UCK7*ATu|iSI|sOLL{SfG>yWxBsMjPg=@eu%y@+6D8T#D zpXD-Z{yGTENQ8Yp8}`#GPv~hxLv#SAR-L@iuE|{%>%o$*V(*~YD0C& z4o4>^0Dm-~#ma#lB8zB>y69fl{F#9&)QQ8Un9Hk^w~U$1IJvF*%@bO_S_Ii9lc2P& zXP=*UPi!5p!+&k8O{UqJg$JQbEN)auX)uS_rwFxS`>Z#NtbC0ow~pcb>cYeNZgq)p z*DN1=MP+m(ZCSZmR~6JqW%VSMKT%bfZ0_U-Ly9idAci_EGDEzxn=XFtLDz5Nba#eC zi8*lze|8%VQ4VutOsEgxzkA86l3O+NtiA!GX741o10mE{taHZ$1w)lBgLj@8?x*Z! zKJx>hOBcWPO&1viUM7TGd&}# zrSMA?Bi|8`@Ly3;Q6(iMqG}k8;phHxt}E93fg=POBajaYB{JZ<{pX)=_`cT%iuXg7 z+<9=8^3X1a7^cFE&Lu)rifr6Nf;V;*QWJQbUPojvFEF|-t$oZ?YpnNC0zsb}X@15E zaL?Yv3ur41r{ri0S2&i~pV)Y4UL!@Nz+WorIKK&DNNqFWV3G z!jicklZQW7fZ6M&CP3gat0Jq~tJ?Td;cQ`I@N|pYbkAeKfTwU6##KPqN|(0IBsFD# z`hrcWm9w(XA)2X?tqgTbJf0%|V&z&#hc^-6k7;XbLln~N2*(H9a?n3=np?YnX;IG2 z9zqpHuLvHt9( z%$(Ak=(c<)IdxxFf>tj#j;YHtA!{;y@5tHXtmM(9%@e)t+rh@W62h{SYDOHc%5YV% zwK9EfZI@r?X5@IvQYl+WH}|_;J0bPW{bUC&NwbeI$~q`BMkz0hMrFC7JWXp`JW$QT zwt)Vbl?{*+TUsgv)Le2~qG)G$HlJi^zJgh0YcgC>Jw@KC4+}t3q}mVQ5)svgk3Qqy zJSup$<}4fW`SEh`s63Zr#kEtl;l_f9$V!gBa}}*jy+LX_c6mx=Hy_LjIxugQc1z)V ze#1CO=!h{FG&3IWw3iL+&C#?1GQk9E=^9B{2FF>(Rb+MkVyXBnn3s|G?2P8*qR*VM zpVg(^IWq3Ba{fV=Sm&QkMjm{GcQf4$mld`8u@k5#F=1GM{Zk^0UyC!8}& zSC;Cath$+mLsf~ki3vmmhj?)=O|BuyvQ}yOtVoLyaw65r5y#d~`I^4jS{toRVCUt$@{IDGmfUToUM5R4om5^7n z%eXdWAQLj>v%bwu>Ez;4R#Jl7%Z}xtRrwxA$}i%i*!FmYqiLtb?yw(-4plZ?l{?Fq zo-u&rOwS~;9B&GueL{997#J7aX*zFs(~%Twyj#w=$`u(fMz^nOAUT4)RLK!(2z7j3 zK!_CnVrUZ&e-`c3P^H3AgFik#UJu77il0}{?f@usHpSW3w*h^YQ79Le(+PI20MJ#E zRWguCHJWQs#SyxUQ++fJ-q~ydkHd?ypx=}FR?Opt;1G)bqtDHO$4n4Yq6ej)z~fBQ z?zLO5^(16YeAD;#kujyg=SdSaRRVO4y`5SiiI849#o{oNj?%tfcV+JxWAT6?1AP4a zWlK8!F87mKy9m*c1W4WQM69T8)0df~t+a#a{*qV;9r?u%5>)u!`;6@IN}1wH0FjpC zAZ;CM?%BTAP8YR0K!!JZp45liP1BCOCkEgOv3RJqjiqI-X<3YTSfM^&Z(le8)?=9xEo3UG zOjD~_RqC<;-;SdVe$IM1K9msgzQdw&+Stg*auxXTL~YTMh+loFuid*uSB>2IU50Rf z&-;#?$e^8O-}}P=<9n5c=<&_kXXeT18^u7VRPD&rQWlX<{p`*|bnGMJQ`gn>l^Q1Z zD$)xl_=u=@8h7b*T=oN9%owF`&G^bzJZNUR~VfDBea`O1wbS z(aG5vCfGj4qEZfubbVb%-P@iTX9u`;tDqK4b`>%4R~9tdxDlmwOob@5d;?DoGJo&= z5>|7mR=k!6@c_2^BYXg}`GsQQ%+4;Zhd#UkHnSaBM_@4so(k%{9ZkYCih^Sc)#jeg z-FtyOqr^;fQrM}W^iY#Q`D3o>Uzb}*el~5Kz-yK(_x<%8?kb3Nig!L)j%CF&({l+! zQw!VtkXH(i*wi^hE(WAi*=v4#F`0ab2e)NpTKa5zhTTMlJuT0ahrLY^a&&C&ih!x) zC|Bm1TkD&UEptg`L8&r0&EJf@ss_}@>{j%7QGhhha8elnFq)fLN}j38AkkVs>+;F>z~v6G z^@g&+$&)V`gX^qmOuQEhqT$VzmDG*rP@NLp9d`TeFPMs0)$^&>?C+D7pKi3TWdT zl<9S&H~2%9_O3UUY-KOE&+y*ks9>`}6 z7A!q!(=FnZam$>K^n9`W)InIb(O4k=Ea_$`nY10~F{k(?{{2lh&45Wl2$c1LwJ7kA z)pNR5zuQ_Cab)M7OhXBwpA5>}^}%o1AB2YuhJ|@q<5_QmK%hWd?c!Or%IO4GZ+2f^ z{@NmjvS|z^|1c%+0SJ^SNl)wmzPr7bIv2?O!SDC8*rc0=BZSe8SByXF=2#SDFzdZe3U0#n{x0_8Y1~3a+OnI1grP(dX2q03!i0q+-7g3XN0N&~rS?dko0D;1K zzDiO3)e-daaf);CupN{(TL9a)f;=l^{_G7*QTr0CeG3GFS|>}5d}SveymOXWf8?JN zbGd8QboA{9WeM@F4AxkwC?%MhnNh+sj}<#e%OEUq_`~qW=Ec0pul{_wbFNTnNiH@8 z@zwhppjuTSPmaHD2!bny%ws~Bn|b707W?+cY2;EVUR$x$@NU~z`3A_pOn^YfA!GxX)Tz%F5TfXPrc1jX2;OtsZP!fO z`F@b*c3`GxB?Q^=SgiMcAKQBA27$@xt;Qf{#~4Y~mQPTK#eZ9=B}?&9;GPLw{|kPx+z|i( literal 0 HcmV?d00001 diff --git a/config/compare_config_list.yml b/config/compare_config_list.yml new file mode 100644 index 000000000..49acf91c2 --- /dev/null +++ b/config/compare_config_list.yml @@ -0,0 +1,10 @@ +# This YAML file controls which model runs are compared in utils/compare_run_configs.py. +# Specify run_ids and their model directories below. +# Use always_show_patterns to control which parameters are always shown in the output table. +# Put '*' to show all parameters, or leave empty to only show changed parameters. +# Use for example 'ae_global' to show all parameters starting with 'ae_global'. +run_ids: + - [run_id1, /path/to/WeatherGenerator/models/] + - [run_id2, /alt/path/to/WeatherGenerator/models/] +always_show_patterns: + - '*' \ No newline at end of file diff --git a/config/default_config.yml b/config/default_config.yml new file mode 100644 index 000000000..d7d66660f --- /dev/null +++ b/config/default_config.yml @@ -0,0 +1,163 @@ +streams_directory: "./config/streams/era5_1deg/" + +embed_orientation: "channels" +embed_local_coords: True +embed_centroids_local_coords: False +embed_size_centroids: 0 +embed_unembed_mode: "block" +embed_dropout_rate: 0.1 + +target_cell_local_prediction: True + +ae_local_dim_embed: 1024 +ae_local_num_blocks: 2 +ae_local_num_heads: 16 +ae_local_dropout_rate: 0.1 +ae_local_with_qk_lnorm: True + +ae_local_num_queries: 1 +ae_local_queries_per_cell: False +ae_adapter_num_heads: 16 +ae_adapter_embed: 128 +ae_adapter_with_qk_lnorm: True +ae_adapter_with_residual: True +ae_adapter_dropout_rate: 0.1 + +ae_global_dim_embed: 2048 +ae_global_num_blocks: 8 +ae_global_num_heads: 32 +ae_global_dropout_rate: 0.1 +ae_global_with_qk_lnorm: True +# TODO: switching to < 1 triggers triton-related issues. +# See https://github.com/ecmwf/WeatherGenerator/issues/1050 +ae_global_att_dense_rate: 1.0 +ae_global_block_factor: 64 +ae_global_mlp_hidden_factor: 2 + +decoder_type: PerceiverIOCoordConditioning # CrossAttentionAdaNormConditioning +pred_adapter_kv: False +pred_self_attention: True +pred_dyadic_dims: False +pred_mlp_adaln: True + +# number of steps offset applied to first target window; if set to zero and forecast_steps=0 then +# one is training an auto-encoder +forecast_offset : 0 +forecast_delta_hrs: 0 +forecast_steps: 0 +forecast_policy: null +forecast_att_dense_rate: 1.0 +fe_num_blocks: 0 +fe_num_heads: 16 +fe_dropout_rate: 0.1 +fe_with_qk_lnorm: True +impute_latent_noise_std: 0.0 # 1e-4 + +healpix_level: 5 + +with_mixed_precision: True +with_flash_attention: True +compile_model: False +with_fsdp: True +attention_dtype: bf16 +mlp_norm_eps: 1e-5 +norm_eps: 1e-4 + +latent_noise_kl_weight: 0.0 # 1e-5 +latent_noise_gamma: 2.0 +latent_noise_saturate_encodings: 5 +latent_noise_use_additive_noise: False +latent_noise_deterministic_latents: True + +loss_fcts: + - + - "mse" + - 1.0 +loss_fcts_val: + - + - "mse" + - 1.0 + +batch_size_per_gpu: 1 +batch_size_validation_per_gpu: 1 + +# a regex that needs to fully match the name of the modules you want to freeze +# e.g. ".*ERA5" will match any module whose name ends in ERA5\ +# encoders and decoders that exist per stream have the stream name attached at the end +freeze_modules: "" + +# whether to track the exponential moving average of weights for validation +validate_with_ema: True +ema_ramp_up_ratio: 0.09 +ema_halflife_in_thousands: 1e-3 + +# training mode: "forecast" or "masking" (masked token modeling) +# for "masking" to train with auto-encoder mode, forecast_offset should be 0 +training_mode: "masking" +# masking rate when training mode is "masking"; ignored in foreacast mode +masking_rate: 0.6 +# sample the masking rate (with normal distribution centered at masking_rate) +# note that a sampled masking rate leads to varying requirements +masking_rate_sampling: True +# sample a subset of all target points, useful e.g. to reduce memory requirements (also can specify per-stream) +sampling_rate_target: 1.0 +# include a masking strategy here, currently only supporting "random", "block", "healpix", "channel", "causal" and "combination" +masking_strategy: "random" +# masking_strategy_config is a dictionary of additional parameters for the masking strategy +# required for "healpix" and "channel" masking strategies +# "healpix": requires healpix mask level to be specified with `hl_mask` +# "channel": requires "mode" to be specified, "per_cell" or "global", +masking_strategy_config: {"strategies": ["random", "healpix", "channel"], + "probabilities": [0.34, 0.33, 0.33], + "hl_mask": 3, "mode": "per_cell", + "same_strategy_per_batch": false + } + +num_mini_epochs: 32 +samples_per_mini_epoch: 4096 +samples_per_validation: 512 +shuffle: True + +lr_scaling_policy: "sqrt" +lr_start: 1e-6 +lr_max: 5e-5 +lr_final_decay: 1e-6 +lr_final: 0.0 +lr_steps_warmup: 512 +lr_steps_cooldown: 512 +lr_policy_warmup: "cosine" +lr_policy_decay: "constant" +lr_policy_cooldown: "linear" + +grad_clip: 1.0 +weight_decay: 0.1 +norm_type: "LayerNorm" +nn_module: "te" +log_grad_norms: False + +start_date: 197901010000 +end_date: 202012310000 +start_date_val: 202101010000 +end_date_val: 202201010000 +len_hrs: 6 +step_hrs: 6 +input_window_steps: 1 + +val_initial: False + +loader_num_workers: 8 +log_validation: 0 +streams_output: ["ERA5"] + +istep: 0 +run_history: [] + +desc: "" +data_loader_rng_seed: ??? +run_id: ??? + +# The period to log in the training loop (in number of batch steps) +train_log_freq: + terminal: 10 + metrics: 20 + checkpoint: 250 diff --git a/config/evaluate/config_zarr2cf.yaml b/config/evaluate/config_zarr2cf.yaml new file mode 100644 index 000000000..cb111954a --- /dev/null +++ b/config/evaluate/config_zarr2cf.yaml @@ -0,0 +1,143 @@ +# This file controls the conversion from Zarr format to CF-compliant NetCDF files in weathergen.evaluate.zarr_nc.zarr_to_netcdf. +# It defines the mapping of variable and dimension names, their standard names, units, and level types. +variables: + q: + var: q + long: specific_humidity_on_pressure_level + std: specific_humidity + wg_unit: kg kg**-1 + std_unit: kg kg-1 + level_type: pl + t: + var: t + long: temperature_on_pressure_levels + std: air_temperature + wg_unit: K + std_unit: K + level_type: pl + u: + var: u + long: u_wind_on_pressure_levels + std: x_wind + wg_unit: m s**-1 + std_unit: m s-1 + level_type: pl + v: + var: v + long: v_wind_on_pressure_levels + std: y_wind + wg_unit: m s**-1 + std_unit: m s-1 + level_type: pl + z: + var: z + long: geopotential_height_on_pressure_levels + std: geopotential_height + wg_unit: m**2 s**-2 + std_unit: m + level_type: pl + 10u: + var: u10 + long: u_wind_at_10m + std: x_wind + wg_unit: m s**-1 + std_unit: m s-1 + level_type: sfc + 10v: + var: v10 + long: v_wind_at_10m + std: y_wind + wg_unit: m s**-1 + std_unit: m s-1 + level_type: sfc + 2d: + var: d2m + long: 2m_dewpoint_temperature + std: dew_point_temperature + wg_unit: K + std_unit: K + level_type: sfc + 2t: + var: t2m + long: 2m_temperature + std: air_temperature + #near-surface (usually, 2 meter) : https://pcmdi.llnl.gov/mips/cmip3/variableList.html + wg_unit: K + std_unit: K + level_type: sfc + msl: + var: msl + long: mean_sea_level_pressure + std: air_pressure_at_mean_sea_level + wg_unit: Pa + std_unit: Pa + level_type: sfc + skt: + var: skt + # The standard name for skin temperature is sea_surface_skin_temperature, which is defined as "The temperature of the very thin layer at the surface of the sea that is in contact with the atmosphere". + # It is strongly recommended that a variable with this standard name should have a units_metadata attribute, with one of the values "on-scale" or "difference", whichever is appropriate for the data, because it is essential to know whether the temperature is on-scale (meaning relative to the origin of the scale indicated by the units) or refers to temperature differences (implying that the origin of the temperature scale is irrevelant) + long: skin_temperature + std: sea_surface_skin_temperature + wg_unit: K + std_unit: K + level_type: sfc + sp: + var: sp + long: surface_pressure + std: surface_air_pressure + wg_unit: Pa + std_unit: Pa + level_type: sfc + + +coordinates: + sfc: + valid_time: valid_time + lat: latitude + lon: longitude + stream: stream + forecast_step: forecast_period + forecast_ref_time: forecast_ref_time + ncells: ncells + pl: + pressure_level: pressure + valid_time: valid_time + lat: latitude + lon: longitude + stream: stream + forecast_step: forecast_period + forecast_ref_time: forecast_ref_time + ncells: ncells + +dimensions: + valid_time: + wg: valid_time + std: time + lat: + wg: latitude + std: latitude + std_unit: degrees_north + lon: + wg: longitude + std: longitude + std_unit: degrees_east + pressure_level: + wg: pressure + std: air_pressure + std_unit: hPa + forecast_ref_time: + wg: forecast_ref_time + std: forecast_ref_time + forecast_step: + wg: forecast_period + std: forecast_period + stream: + wg: stream + std: stream + ncells: + wg: ncells + std: ncells + #TODO maybe absorb stream as an attribute + # forecast_step: forecast_step + # don't want forecast_step anyway + diff --git a/config/evaluate/eval_config.yml b/config/evaluate/eval_config.yml new file mode 100644 index 000000000..85157728d --- /dev/null +++ b/config/evaluate/eval_config.yml @@ -0,0 +1,80 @@ +#optional: if commented out all is taken care of by the default settings +# NB. global options apply to all run_ids +#global_plotting_options: +# image_format : "png" #options: "png", "pdf", "svg", "eps", "jpg" .. +# dpi_val : 300 +# ERA5: +# marker_size: 2 +# scale_marker_size: 1 +# marker: "o" +# # alpha: 0.5 +# 2t: +# vmin: 250 +# vmax: 300 +# 10u: +# vmin: -40 +# vmax: 40 + +evaluation: + metrics : ["rmse", "mae"] + regions: ["global", "nhem"] + summary_plots : true + summary_dir: "./plots/" + plot_ensemble: "members" #supported: false, "std", "minmax", "members" + plot_score_maps: false #plot scores on a 2D maps. it slows down score computation + print_summary: false #print out score values on screen. it can be verbose + log_scale: false + add_grid: false + score_cards: false + +run_ids : + ar40mckx: + label: "pretrained model ar40mckx" + results_base_dir : "./results/" + mini_epoch: 0 + rank: 0 + streams: + ERA5: + channels: ["2t", "10u"] #, "10v", "z_500", "t_850", "u_850", "v_850", "q_850", ] + evaluation: + forecast_step: "all" + sample: "all" + ensemble: "all" #supported: "all", "mean", [0,1,2] + plotting: + sample: [1, 3] + forecast_step: [1,3, 2] + ensemble: [0,2,5] #supported: "all", "mean", [0,1,2] + plot_maps: true + plot_histograms: true + plot_animations: true + CERRA: + channels: ["z_500", "t_850", "u_850"] #, "blah"] + evaluation: + forecast_step: "all" + sample: "all" + plotting: + sample: [2, 3, 0] + forecast_step: [1,3, 4, 5] + plot_maps: true + plot_histograms: true + plot_animations: true + + c8g5katp: + label: "2 steps window" + results_base_dir : "./results/" + mini_epoch: 0 + rank: 0 + streams: + ERA5: + channels: ["2t", "10u", "10v"] #, "10v", "z_500", "t_850", "u_850", "v_850", "q_850", ] + evaluation: + forecast_step: "all" + sample: "all" + ensemble: "mean" + plotting: + sample: [1, 3] + forecast_step: [1,3, 2] + ensemble: "mean" + plot_maps: true + plot_histograms: true + plot_animations: true \ No newline at end of file diff --git a/config/ifs_fesom_config.yml b/config/ifs_fesom_config.yml new file mode 100644 index 000000000..4167bf91d --- /dev/null +++ b/config/ifs_fesom_config.yml @@ -0,0 +1,20 @@ +streams_directory: "./config/streams/fesom/" + +ae_local_num_queries: 2 +ae_adapter_embed: 128 +ae_local_dim_embed: 1024 +ae_local_num_blocks: 4 +ae_local_num_heads: 16 + +start_date: 2000-10-10T00:00 +end_date: 2199-12-31T00:00 +start_date_val: 2200-01-01T00:00 +end_date_val: 2209-12-31T00:00 + +num_epochs: 111 +samples_per_epoch: 64 +samples_per_validation: 16 +shuffle: True +loader_num_workers: 4 + +lr_max: 2e-5 diff --git a/config/mixed.yml b/config/mixed.yml new file mode 100644 index 000000000..7c8092edf --- /dev/null +++ b/config/mixed.yml @@ -0,0 +1,2 @@ +# The default configuration file for multi streams training. +streams_directory: "./config/streams/streams_mixed/" \ No newline at end of file diff --git a/config/profiling/annotations.json b/config/profiling/annotations.json new file mode 100644 index 000000000..ea730997b --- /dev/null +++ b/config/profiling/annotations.json @@ -0,0 +1,793 @@ +[ + { + "domain": "WeatherGen", + "color": "A1F702", + "module": "weathergen.datasets.multi_stream_data_sampler", + "functions": [ + "MultiStreamDataSampler.__iter__" + ] + }, + { + "domain": "WeatherGen", + "color": "A1F702", + "module": "weathergen.datasets.tokenizer_forecast", + "functions": [ + "encode_times_source", + "encode_times_target", + "tokenize_window_space", + "tokenize_window_spacetime", + "TokenizerForecast.batchify_source", + "TokenizerForecast.batchify_target", + "TokenizerForecast.reset" + ] + }, + { + "domain": "WeatherGen", + "color": "A1F702", + "module": "weathergen.datasets.tokenizer_masking", + "functions": [ + "encode_times_source", + "encode_times_target", + "tokenize_window_space", + "tokenize_window_spacetime", + "TokenizerMasking.batchify_source", + "TokenizerMasking.batchify_target", + "TokenizerMasking.reset" + ] + }, + { + "domain": "WeatherGen", + "color": "A1F702", + "module": "weathergen.datasets.batchifyer", + "functions": [ + "encode_times_source", + "encode_times_target", + "tokenize_window_space", + "tokenize_window_spacetime", + "Batchifyer.batchify_source", + "Batchifyer.batchify_target" + ] + }, + { + "domain": "WeatherGen", + "color": "FAE289", + "module": "weathergen.datasets.stream_data", + "functions": [ + "StreamData.__init__", + "StreamData.to_device", + "StreamData.add_empty_source", + "StreamData.add_empty_target", + "StreamData.add_source", + "StreamData.add_target", + "StreamData.target_empty", + "StreamData.source_empty", + "StreamData.empty", + "StreamData._merge_cells", + "StreamData.merge_inputs" + ] + }, + { + "domain": "WeatherGen", + "color": "C6BAFF", + "module": "weathergen.train.trainer", + "functions": [ + "Trainer.inference", + "Trainer.train", + "Trainer.validate", + "Trainer.compute_loss", + "Trainer.run" + ] + + }, + { + "domain": "WeatherGen", + "color": "A1F702", + "module": "weathergen.datasets.utils", + "functions": [ + "s2tor3", + "r3tos2", + "get_target_coords_local_ffast", + "healpix_verts_rots", + "locs_to_cell_coords_ctrs" + ] + }, + { + "domain": "WeatherGen", + "color": "69a201", + "module": "weathergen.datasets.tokenizer_utils", + "functions": [ + "arc_alpha", + "encode_times_source", + "encode_times_target", + "hpy_cell_splits", + "hpy_splits", + "tokenize_window_space", + "tokenize_window_spacetime", + "_coords_local" + ] + }, + { + "domain": "WeatherGen", + "color": "0264F7", + "module": "weathergen.model.model", + "functions": [ + "ModelParams.__init__", + "ModelParams.create", + "Model.__init__", + "Model.create", + "Model.freeze_weights_forecast", + "Model.print_num_parameters", + "Model.load", + "Model.forward_jac", + "Model.forward", + "Model.embed_cells", + "Model.source_tokens", + "Model.assimilate_local", + "Model.assimilate_global", + "Model.forecast", + "Model.predict" + ] + }, + { + "domain": "WeatherGen", + "color": "#C27BA0", + "module": "weathergen.model.attention", + "functions": [ + "MultiSelfAttentionHeadVarlen.__init__", + "MultiSelfAttentionHeadVarlen.forward", + "MultiSelfAttentionHeadVarlenFlex.__init__", + "MultiSelfAttentionHeadVarlenFlex.forward", + "MultiSelfAttentionHeadLocal.__init__", + "MultiSelfAttentionHeadLocal.forward", + "MultiCrossAttentionHeadVarlen.__init__", + "MultiCrossAttentionHeadVarlen.forward", + "MultiCrossAttentionHeadVarlenSlicedQ.__init__", + "MultiCrossAttentionHeadVarlenSlicedQ.forward", + "MultiSelfAttentionHead.__init__", + "MultiSelfAttentionHead.forward", + "MultiCrossAttentionHead.__init__", + "MultiCrossAttentionHead.forward" + ] + }, + { + "domain": "WeatherGen", + "color": "#83F5BF", + "module": "weathergen.model.layers", + "functions": [ + "NamedLinear.forward", + "MLP.__init__", + "MLP.forward" + ] + }, + { + "domain": "WeatherGen", + "color": "#50CDF3", + "module": "weathergen.model.norms", + "functions": [ + "RMSNorm.forward", + "RMSNorm.__init__", + "RMSNorm._norm", + "AdaLayerNorm.forward", + "AdaLayerNorm.__init__", + "SwiGLU.forward", + "SwiGLU.__init__", + "modulate", + "AdaLayerNormLayer.forward", + "AdaLayerNormLayer.__init__", + "AdaLayerNormLayer.initialise_weights", + "SaturateEncodings.forward", + "SaturateEncodings.__init__" + ] + }, + { + "domain": "WeatherGen", + "color": "02dff7", + "module": "weathergen.model.engines", + "functions": [ + "EmbeddingEngine.__init__", + "EmbeddingEngine.create", + "LocalAssimilationEngine.__init__", + "LocalAssimilationEngine.create", + "Local2GlobalAssimilationEngine.__init__", + "Local2GlobalAssimilationEngine.create", + "GlobalAssimilationEngine.__init__", + "GlobalAssimilationEngine.create", + "ForecastingEngine.__init__", + "ForecastingEngine.create", + "EnsPredictionHead.__init__", + "EnsPredictionHead.create", + "EnsPredictionHead.forward", + "TargetPredictionEngineClassic.__init__", + "TargetPredictionEngineClassic.forward", + "TargetPredictionEngine.__init__", + "TargetPredictionEngine.forward" + ] + }, + { + "domain": "WeatherGen", + "color": "02dff7", + "module": "weathergen.model.embeddings", + "functions": [ + "StreamEmbedTransformer.__init__", + "StreamEmbedTransformer.forward_channels", + "StreamEmbedTransformer.forward_columns", + "StreamEmbedLinear.__init__", + "StreamEmbedLinear.forward" + + ] + }, + { + "domain": "flash_attention", + "color": "ffff00", + "module": "flash_attn", + "functions": [ + "flash_attn_func", + "flash_attn_varlen_func" + + ] + }, + { + "domain": "PyTorch_flash_attention", + "color": "808000", + "module": "torch.nn.attention.flex_attention", + "functions": [ + "create_block_mask", + "flex_attention" + ] + }, + { + "domain": "WeatherGen", + "color": "808000", + "module": "weathergen.model.positional_encoding", + "functions": [ + "positional_encoding_harmonic", + "positional_encoding_harmonic_idx", + "positional_encoding_harmonic_global", + "positional_encoding_harmonic_coord" + ] + }, + { + "domain": "WeatherGen", + "color": "C6BAFF", + "module": "weathergen.train.trainer_base", + "functions": [ + "Trainer_Base.ddp_average" + ] + + }, + { + "domain": "healpy", + "color": "6CE6B8", + "module": "astropy_healpix.healpy", + "functions": [ + "ang2pix" + ] + }, + { + "domain": "Anemoi", + "color": "A9EBA7", + "module": "anemoi.datasets.data.store", + "functions": [ + "Zarr.__getitem__" + ] + }, + { + "domain": "Anemoi", + "color": "A9EBA7", + "module": "anemoi.datasets.data.subset", + "functions": [ + "Subset.__getitem__" + ] + }, + { + "domain": "Zarr", + "color": "52ED4C", + "module": "zarr.core", + "functions": [ + "Array._get_selection", + "Array._chunk_getitems", + "Array._chunk_key", + "Array.__process_chunk", + "Array.__getitem__", + "Array.__setitem__", + "Array.__delitem__", + "Array.__contains__", + "Array.__len__", + "Array.__iter__", + "Array.__reversed__", + "Array.__copy__", + "Array.__deepcopy__", + "Array.__repr__", + "Array.__str__", + "Array.__eq__", + "Array.__ne__", + "Array.__lt__", + "Array.__le__", + "Array.__gt__", + "Array.__ge__", + "Array.__hash__", + "Array.__bool__", + "Array.__array__", + "Array.__array_ufunc__", + "Array.__array_function__", + "Array.__array_priority__", + "Array.__array_wrap__", + "Array.__array_prepare__", + "Array.__array_struct__", + "Array.__array_interface__", + "Array.__array_finalize__" + ] + }, + { + "domain": "PyTorch", + "color": "E8B795", + "module": "torch.amp", + "functions": [ + "GradScaler.scale", + "GradScaler.unscale", + "GradScaler.unscale_", + "GradScaler.step", + "GradScaler.update" + ] + }, + { + "domain": "PyTorch", + "color": "E85A5A", + "module": "torch.distributed", + "functions": [ + "all_reduce" + ] + }, + { + "domain": "PyTorch", + "color": "E85A5A", + "module": "torch", + "functions": [ + "Tensor.to", + "argsort", + "cat", + "matmul", + "full", + "split", + "tensor" + ] + }, + { + "domain": "PyTorch", + "color": "A6F7E4", + "module": "torch.utils.data.dataloader", + "functions": [ + "_BaseDataLoaderIter.__next__", + "_SingleProcessDataLoaderIter.__next__", + "_MultiProcessingDataLoaderIter.__next__" + ] + }, + { + "domain": "PyTorch", + "color": "A6F7E4", + "module": "torch.utils.data.dataset", + "functions": [ + "Dataset.__getitem__", + "IterableDataset.__iter__" + ] + }, + + { + "domain": "PyTorch", + "color": "E89C5A", + "module": "torch.autograd", + "functions": ["backward"] + }, + + { + "domain": "PyTorch", + "color": "E85A5A", + "module": "torch.nn.functional", + "functions": [ + "conv1d", + "conv2d", + "conv3d", + "conv_transpose1d", + "conv_transpose2d", + "conv_transpose3d", + "conv_tbc", + "avg_pool1d", + "avg_pool2d", + "avg_pool3d", + "fractional_max_pool2d_with_indices", + "fractional_max_pool2d", + "fractional_max_pool3d_with_indices", + "fractional_max_pool3d", + "max_pool1d_with_indices", + "max_pool1d", + "max_pool2d_with_indices", + "max_pool2d", + "max_pool3d_with_indices", + "max_pool3d", + "max_unpool1d", + "max_unpool2d", + "max_unpool3d", + "lp_pool3d", + "lp_pool2d", + "lp_pool1d", + "adaptive_max_pool1d_with_indices", + "adaptive_max_pool1d", + "adaptive_max_pool2d_with_indices", + "adaptive_max_pool2d", + "adaptive_max_pool3d_with_indices", + "adaptive_max_pool3d", + "adaptive_avg_pool1d", + "adaptive_avg_pool2d", + "adaptive_avg_pool3d", + "dropout", + "alpha_dropout", + "dropout1d", + "dropout2d", + "dropout3d", + "feature_alpha_dropout", + "threshold", + "relu", + "glu", + "hardtanh", + "relu6", + "elu", + "selu", + "celu", + "leaky_relu", + "prelu", + "rrelu", + "logsigmoid", + "hardshrink", + "tanhshrink", + "softsign", + "softplus", + "softmin", + "softmax", + "gumbel_softmax", + "log_softmax", + "softshrink", + "tanh", + "sigmoid", + "hardsigmoid", + "linear", + "bilinear", + "silu", + "mish", + "hardswish", + "embedding", + "embedding_bag", + "batch_norm", + "instance_norm", + "layer_norm", + "rms_norm", + "group_norm", + "local_response_norm", + "ctc_loss", + "nll_loss", + "poisson_nll_loss", + "gaussian_nll_loss", + "kl_div", + "cross_entropy", + "binary_cross_entropy", + "binary_cross_entropy_with_logits", + "smooth_l1_loss", + "huber_loss", + "l1_loss", + "mse_loss", + "margin_ranking_loss", + "hinge_embedding_loss", + "multilabel_margin_loss", + "soft_margin_loss", + "multilabel_soft_margin_loss", + "cosine_embedding_loss", + "multi_margin_loss", + "pixel_shuffle", + "pixel_unshuffle", + "channel_shuffle", + "native_channel_shuffle", + "upsample", + "interpolate", + "upsample_nearest", + "upsample_bilinear", + "grid_sample", + "affine_grid", + "pad", + "pairwise_distance", + "pdist", + "cosine_similarity", + "one_hot", + "triplet_margin_loss", + "triplet_margin_with_distance_loss", + "normalize", + "unfold", + "fold", + "scaled_dot_product_attention", + "multi_head_attention_forward" + ] + }, + + { + "domain": "PyTorch", + "color": "E85A5A", + "module": "torch.optim", + "functions": [ + "Adafactor.step", + "Adadelta.step", + "Adagrad.step", + "Adam.step", + "Adamax.step", + "AdamW.step", + "ASGD.step", + "LBFGS.step", + "NAdam.step", + "Optimizer.step", + "RAdam.step", + "RMSprop.step", + "Rprop.step", + "SGD.step", + "SparseAdam.step" + ] + }, + + { + "domain":"PyTorch", + "color": "E85A5A", + "module": "torch.nn.modules", + "functions": [ + "Module.__call__", + "Bilinear.forward", + "Identity.forward", + "LazyLinear.forward", + "Linear.forward", + "CELU.forward", + "ELU.forward", + "GELU.forward", + "GLU.forward", + "Hardshrink.forward", + "Hardsigmoid.forward", + "Hardswish.forward", + "Hardtanh.forward", + "LeakyReLU.forward", + "LogSigmoid.forward", + "LogSoftmax.forward", + "Mish.forward", + "MultiheadAttention.forward", + "PReLU.forward", + "ReLU.forward", + "ReLU6.forward", + "RReLU.forward", + "SELU.forward", + "Sigmoid.forward", + "SiLU.forward", + "Softmax.forward", + "Softmax2d.forward", + "Softmin.forward", + "Softplus.forward", + "Softshrink.forward", + "Softsign.forward", + "Tanh.forward", + "Tanhshrink.forward", + "Threshold.forward", + "AdaptiveLogSoftmaxWithLoss.forward", + "BatchNorm1d.forward", + "BatchNorm2d.forward", + "BatchNorm3d.forward", + "LazyBatchNorm1d.forward", + "LazyBatchNorm2d.forward", + "LazyBatchNorm3d.forward", + "SyncBatchNorm.forward", + "ChannelShuffle.forward", + "ModuleDict.forward", + "ModuleList.forward", + "ParameterDict.forward", + "ParameterList.forward", + "Sequential.forward", + "Conv1d.forward", + "Conv2d.forward", + "Conv3d.forward", + "ConvTranspose1d.forward", + "ConvTranspose2d.forward", + "ConvTranspose3d.forward", + "LazyConv1d.forward", + "LazyConv2d.forward", + "LazyConv3d.forward", + "LazyConvTranspose1d.forward", + "LazyConvTranspose2d.forward", + "LazyConvTranspose3d.forward", + "CosineSimilarity.forward", + "PairwiseDistance", + "AlphaDropout.forward", + "Dropout.forward", + "Dropout1d.forward", + "Dropout2d.forward", + "Dropout3d.forward", + "FeatureAlphaDropout.forward", + "Flatten.forward", + "Unflatten.forward", + "Fold.forward", + "Unfold.forward", + "InstanceNorm1d.forward", + "InstanceNorm2d.forward", + "InstanceNorm3d.forward", + "LazyInstanceNorm1d.forward", + "LazyInstanceNorm2d.forward", + "LazyInstanceNorm3d.forward", + "BCELoss.forward", + "BCEWithLogitsLoss.forward", + "CosineEmbeddingLoss.forward", + "CrossEntropyLoss.forward", + "CTCLoss.forward", + "GaussianNLLLoss.forward", + "HingeEmbeddingLoss.forward", + "HuberLoss.forward", + "KLDivLoss.forward", + "L1Loss.forward", + "MarginRankingLoss.forward", + "MSELoss.forward", + "MultiLabelMarginLoss.forward", + "MultiLabelSoftMarginLoss.forward", + "MultiMarginLoss.forward", + "NLLLoss.forward", + "PoissonNLLLoss.forward", + "SmoothL1Loss.forward", + "SoftMarginLoss.forward", + "TripletMarginLoss.forward", + "TripletMarginWithDistanceLoss.forward", + "CrossMapLRN2d.forward", + "GroupNorm.forward", + "LayerNorm.forward", + "LocalResponseNorm.forward", + "RMSNorm.forward", + "CircularPad1d.forward", + "CircularPad2d.forward", + "CircularPad3d.forward", + "ConstantPad1d.forward", + "ConstantPad2d.forward", + "ConstantPad3d.forward", + "ReflectionPad1d.forward", + "ReflectionPad2d.forward", + "ReflectionPad3d.forward", + "ReplicationPad1d.forward", + "ReplicationPad2d.forward", + "ReplicationPad3d.forward", + "ZeroPad1d.forward", + "ZeroPad2d.forward", + "ZeroPad3d.forward", + "PixelShuffle.forward", + "PixelUnshuffle.forward", + "AdaptiveAvgPool1d.forward", + "AdaptiveAvgPool2d.forward", + "AdaptiveAvgPool3d.forward", + "AdaptiveMaxPool1d.forward", + "AdaptiveMaxPool2d.forward", + "AdaptiveMaxPool3d.forward", + "AvgPool1d.forward", + "AvgPool2d.forward", + "AvgPool3d.forward", + "FractionalMaxPool2d.forward", + "FractionalMaxPool3d.forward", + "LPPool1d.forward", + "LPPool2d.forward", + "LPPool3d.forward", + "MaxPool1d.forward", + "MaxPool2d.forward", + "MaxPool3d.forward", + "MaxUnpool1d.forward", + "MaxUnpool2d.forward", + "MaxUnpool3d.forward", + "GRU.forward", + "GRUCell.forward", + "LSTM.forward", + "LSTMCell.forward", + "RNN.forward", + "RNNBase.forward", + "RNNCell.forward", + "RNNCellBase.forward", + "Embedding.forward", + "EmbeddingBag.forward", + "Transformer.forward", + "TransformerDecoder.forward", + "TransformerDecoderLayer.forward", + "TransformerEncoder.forward", + "TransformerEncoderLayer.forward", + "Upsample.forward", + "UpsamplingBilinear2d.forward", + "UpsamplingNearest2d.forward" + ] + }, + + { + "domain":"Numpy", + "color": "89FAF6", + "module": "numpy", + "functions": [ + "array", + "argsort", + "argmax", + "argmin", + "amax", + "amin", + "all", + "any", + "average", + "bincount", + "clip", + "corrcoef", + "cov", + "split", + "flatnonzero", + "flatten", + "flip", + "fliplr", + "flipud", + "full", + "full_like", + "identity", + "imag", + "inner", + "insert", + "interp", + "intersect1d", + "isclose", + "iscomplex", + "concatenate", + "asarray", + "asanyarray", + "ascontiguousarray", + "asfortranarray", + "empty", + "empty_like", + "zeros", + "zeros_like", + "ones", + "ones_like", + "eye", + "arange", + "linspace", + "logspace", + "geomspace", + "meshgrid", + "mgrid", + "ogrid", + "diag", + "diagflat", + "tri", + "tril", + "triu", + "vander", + "histogram", + "histogram2d", + "histogramdd", + "digitize", + "correlate", + "convolve", + "vdot", + "dot", + "outer", + "tensordot", + "einsum", + "linalg.norm", + "linalg.cond", + "linalg.det", + "linalg.matrix_rank", + "linalg.slogdet", + "linalg.solve", + "linalg.tensorsolve", + "linalg.lstsq", + "linalg.inv", + "linalg.pinv", + "linalg.tensorinv", + "linalg.qr", + "linalg.svd", + "linalg.eig", + "linalg.eigh", + "linalg.eigvals", + "linalg.eigvalsh" + ] + }, + { + "domain":"Pandas", + "module": "pandas", + "functions": ["to_datetime"] + } +] \ No newline at end of file diff --git a/config/streams/cerra_seviri/cerra.yml b/config/streams/cerra_seviri/cerra.yml new file mode 100644 index 000000000..ea6d17e1d --- /dev/null +++ b/config/streams/cerra_seviri/cerra.yml @@ -0,0 +1,32 @@ +# (C) Copyright 2024 WeatherGenerator contributors. +# +# This software is licensed under the terms of the Apache Licence Version 2.0 +# which can be obtained at http://www.apache.org/licenses/LICENSE-2.0. +# +# In applying this licence, ECMWF does not waive the privileges and immunities +# granted to it by virtue of its status as an intergovernmental organisation +# nor does it submit to any jurisdiction. + +CERRA : + type : anemoi + filenames : ['cerra-rr-an-oper-se-al-ec-mars-5p5km-1985-2023-3h-v2.zarr'] + loss_weight : 1. + masking_rate : 0.6 + masking_rate_none : 0.05 + token_size : 512 + embed : + net : transformer + num_tokens : 1 + num_heads : 2 + dim_embed : 256 + num_blocks : 2 + embed_target_coords : + net : linear + dim_embed : 256 + target_readout : + type : 'obs_value' + num_layers : 2 + num_heads : 4 + pred_head : + ens_size : 1 + num_layers : 1 diff --git a/config/streams/cerra_seviri/seviri.yml b/config/streams/cerra_seviri/seviri.yml new file mode 100644 index 000000000..4dabf68cd --- /dev/null +++ b/config/streams/cerra_seviri/seviri.yml @@ -0,0 +1,32 @@ +# (C) Copyright 2024 WeatherGenerator contributors. +# +# This software is licensed under the terms of the Apache Licence Version 2.0 +# which can be obtained at http://www.apache.org/licenses/LICENSE-2.0. +# +# In applying this licence, ECMWF does not waive the privileges and immunities +# granted to it by virtue of its status as an intergovernmental organisation +# nor does it submit to any jurisdiction. + +SEVIRI : + type : obs + filenames : ['observations-file-2018-2019-seviri-h512-v1.zarr'] + loss_weight : 1. + # masking_rate_none : 0.05 + token_size : 1024 + # max_num_targets: 8192 + embed : + net : transformer + num_tokens : 1 + num_heads : 2 + dim_embed : 256 + num_blocks : 2 + embed_target_coords : + net : linear + dim_embed : 256 + target_readout : + type : 'obs_value' + num_layers : 2 + num_heads : 8 + pred_head : + ens_size : 1 + num_layers : 1 \ No newline at end of file diff --git a/config/streams/era5_1deg/era5.yml b/config/streams/era5_1deg/era5.yml new file mode 100644 index 000000000..bb2234c4e --- /dev/null +++ b/config/streams/era5_1deg/era5.yml @@ -0,0 +1,37 @@ +# (C) Copyright 2024 WeatherGenerator contributors. +# +# This software is licensed under the terms of the Apache Licence Version 2.0 +# which can be obtained at http://www.apache.org/licenses/LICENSE-2.0. +# +# In applying this licence, ECMWF does not waive the privileges and immunities +# granted to it by virtue of its status as an intergovernmental organisation +# nor does it submit to any jurisdiction. + +ERA5 : + type : anemoi + filenames : ['aifs-ea-an-oper-0001-mars-o96-1979-2023-6h-v8.zarr'] + source_exclude : ['w_', 'skt', 'tcw', 'cp', 'tp'] + target_exclude : ['w_', 'slor', 'sdor', 'tcw', 'cp', 'tp'] + loss_weight : 1. + masking_rate : 0.6 + masking_rate_none : 0.05 + token_size : 8 + tokenize_spacetime : True + max_num_targets: -1 + embed : + net : transformer + num_tokens : 1 + num_heads : 8 + dim_embed : 256 + num_blocks : 2 + embed_target_coords : + net : linear + dim_embed : 256 + target_readout : + type : 'obs_value' # token or obs_value + num_layers : 2 + num_heads : 4 + # sampling_rate : 0.2 + pred_head : + ens_size : 1 + num_layers : 1 diff --git a/config/streams/era5_nppatms_synop/era5.yml b/config/streams/era5_nppatms_synop/era5.yml new file mode 100644 index 000000000..c51eb6e33 --- /dev/null +++ b/config/streams/era5_nppatms_synop/era5.yml @@ -0,0 +1,38 @@ +# (C) Copyright 2024 WeatherGenerator contributors. +# +# This software is licensed under the terms of the Apache Licence Version 2.0 +# which can be obtained at http://www.apache.org/licenses/LICENSE-2.0. +# +# In applying this licence, ECMWF does not waive the privileges and immunities +# granted to it by virtue of its status as an intergovernmental organisation +# nor does it submit to any jurisdiction. + +ERA5 : + type : anemoi + filenames : ['aifs-ea-an-oper-0001-mars-o96-1979-2023-6h-v8.zarr'] + loss_weight : 1. + source_exclude : ['w_', 'skt', 'sp', 'tcw', 'cp', 'tp'] + target_exclude : ['w_', 'skt', 'sp', 'tcw', 'cp', 'tp'] + diagnostic : False + masking_rate : 0.6 + masking_rate_none : 0.05 + token_size : 32 + tokenize_spacetime : True + max_num_targets: -1 + embed : + net : transformer + num_tokens : 1 + num_heads : 4 + dim_embed : 128 + num_blocks : 2 + embed_target_coords : + net : linear + dim_embed : 128 + target_readout : + type : 'obs_value' + num_layers : 2 + num_heads : 4 + pred_head : + ens_size : 1 + num_layers : 1 + diff --git a/config/streams/era5_nppatms_synop/npp_atms.yml b/config/streams/era5_nppatms_synop/npp_atms.yml new file mode 100644 index 000000000..583c1b4b2 --- /dev/null +++ b/config/streams/era5_nppatms_synop/npp_atms.yml @@ -0,0 +1,31 @@ +# (C) Copyright 2024 WeatherGenerator contributors. +# +# This software is licensed under the terms of the Apache Licence Version 2.0 +# which can be obtained at http://www.apache.org/licenses/LICENSE-2.0. +# +# In applying this licence, ECMWF does not waive the privileges and immunities +# granted to it by virtue of its status as an intergovernmental organisation +# nor does it submit to any jurisdiction. + +NPPATMS : + type : obs + filenames : ['observations-ea-ofb-0001-2012-2023-npp-atms-radiances-v2.zarr'] + loss_weight : 1.0 + token_size : 32 + max_num_targets: -1 + embed : + net : transformer + num_tokens : 1 + num_heads : 2 + dim_embed : 128 + num_blocks : 2 + embed_target_coords : + net : linear + dim_embed : 128 + target_readout : + type : 'obs_value' + num_layers : 1 + num_heads : 4 + pred_head : + ens_size : 1 + num_layers : 1 diff --git a/config/streams/era5_nppatms_synop/synop.yml b/config/streams/era5_nppatms_synop/synop.yml new file mode 100644 index 000000000..97a575019 --- /dev/null +++ b/config/streams/era5_nppatms_synop/synop.yml @@ -0,0 +1,30 @@ +# obs_types +# 0 : polar orbiting satellites +# 1 : geostationay satellites +# 2 : conventional observations + +SurfaceCombined : + type : obs + filenames : ['observations-ea-ofb-0001-1979-2023-combined-surface-v2.zarr'] + loss_weight : 1.0 + masking_rate : 0.6 + masking_rate_none : 0.05 + token_size : 64 + tokenize_spacetime : True + max_num_targets: -1 + embed : + net : transformer + num_tokens : 1 + num_heads : 2 + dim_embed : 256 + num_blocks : 2 + embed_target_coords : + net : linear + dim_embed : 256 + target_readout : + type : 'obs_value' # token or obs_value + num_layers : 2 + num_heads : 4 + pred_head : + ens_size : 1 + num_layers : 1 diff --git a/config/streams/fesom/fesom.yml b/config/streams/fesom/fesom.yml new file mode 100644 index 000000000..789011e2d --- /dev/null +++ b/config/streams/fesom/fesom.yml @@ -0,0 +1,36 @@ +# (C) Copyright 2024 WeatherGenerator contributors. +# +# This software is licensed under the terms of the Apache Licence Version 2.0 +# which can be obtained at http://www.apache.org/licenses/LICENSE-2.0. +# +# In applying this licence, ECMWF does not waive the privileges and immunities +# granted to it by virtue of its status as an intergovernmental organisation +# nor does it submit to any jurisdiction. + +FESOM_NODE : + type : fesom + filenames : ['ocean_node'] + target_file: "/work/ab0995/a270088/Kacper/weathergenertor/AWICM3/ocean_elem" + loss_weight : 1. + source : null + target : null + masking_rate : 0.6 + masking_rate_none : 0.05 + token_size : 128 + embed : + net : transformer + num_tokens : 1 + num_heads : 8 + dim_embed : 256 + num_blocks : 2 + embed_target_coords : + net : linear + dim_embed : 256 + target_readout : + type : 'obs_value' # token or obs_value + num_layers : 2 + num_heads : 4 + # sampling_rate : 0.2 + pred_head : + ens_size : 1 + num_layers : 1 \ No newline at end of file diff --git a/config/streams/fesom/fesom_elem.yml b/config/streams/fesom/fesom_elem.yml new file mode 100644 index 000000000..f9c07e847 --- /dev/null +++ b/config/streams/fesom/fesom_elem.yml @@ -0,0 +1,36 @@ +# (C) Copyright 2024 WeatherGenerator contributors. +# +# This software is licensed under the terms of the Apache Licence Version 2.0 +# which can be obtained at http://www.apache.org/licenses/LICENSE-2.0. +# +# In applying this licence, ECMWF does not waive the privileges and immunities +# granted to it by virtue of its status as an intergovernmental organisation +# nor does it submit to any jurisdiction. + +FESOM_ELEM : + type : fesom + filenames : ['ocean_elem'] + target_file: "/work/ab0995/a270088/Kacper/weathergenertor/AWICM3/ocean_node" + loss_weight : 1. + source : null + target : null + masking_rate : 0.6 + masking_rate_none : 0.05 + token_size : 128 + embed : + net : transformer + num_tokens : 1 + num_heads : 8 + dim_embed : 256 + num_blocks : 2 + embed_target_coords : + net : linear + dim_embed : 256 + target_readout : + type : 'obs_value' # token or obs_value + num_layers : 2 + num_heads : 4 + # sampling_rate : 0.2 + pred_head : + ens_size : 1 + num_layers : 1 \ No newline at end of file diff --git a/config/streams/fesom/ifs.yml b/config/streams/fesom/ifs.yml new file mode 100644 index 000000000..c0625152d --- /dev/null +++ b/config/streams/fesom/ifs.yml @@ -0,0 +1,36 @@ +# (C) Copyright 2024 WeatherGenerator contributors. +# +# This software is licensed under the terms of the Apache Licence Version 2.0 +# which can be obtained at http://www.apache.org/licenses/LICENSE-2.0. +# +# In applying this licence, ECMWF does not waive the privileges and immunities +# granted to it by virtue of its status as an intergovernmental organisation +# nor does it submit to any jurisdiction. + +IFS_ATMO : + type : fesom + filenames : ['atmos_all'] + loss_weight : 1. + source : null + target : null + target_exclude: ['msl', 'tsr', 'tsrc', 'z'] + masking_rate : 0.6 + masking_rate_none : 0.05 + token_size : 128 + embed : + net : transformer + num_tokens : 1 + num_heads : 8 + dim_embed : 256 + num_blocks : 2 + embed_target_coords : + net : linear + dim_embed : 256 + target_readout : + type : 'obs_value' # token or obs_value + num_layers : 2 + num_heads : 4 + # sampling_rate : 0.2 + pred_head : + ens_size : 1 + num_layers : 1 \ No newline at end of file diff --git a/config/streams/icon/icon.yml b/config/streams/icon/icon.yml new file mode 100644 index 000000000..a38bbdc97 --- /dev/null +++ b/config/streams/icon/icon.yml @@ -0,0 +1,36 @@ +# (C) Copyright 2024 WeatherGenerator contributors. +# +# This software is licensed under the terms of the Apache Licence Version 2.0 +# which can be obtained at http://www.apache.org/licenses/LICENSE-2.0. +# +# In applying this licence, ECMWF does not waive the privileges and immunities +# granted to it by virtue of its status as an intergovernmental organisation +# nor does it submit to any jurisdiction. + +ICON : + type : icon + filenames : ['icon-art-NWP_OH_CHEMISTRY-chem_DOM01_ML_daily_repeat_reduced_levels.zarr'] + source : ['u_00', 'v_00', 'w_80', 'temp_00'] + target : ['u_00', 'v_00', 'w_80', 'temp_00'] + loss_weight : 1. + diagnostic : False + masking_rate : 0.6 + masking_rate_none : 0.05 + token_size : 32 + embed : + net : transformer + num_tokens : 1 + num_heads : 8 + dim_embed : 256 + num_blocks : 2 + embed_target_coords : + net : linear + dim_embed : 256 + target_readout : + type : 'obs_value' # token or obs_value + num_layers : 2 + num_heads : 4 + # sampling_rate : 0.2 + pred_head : + ens_size : 1 + num_layers : 1 \ No newline at end of file diff --git a/config/streams/igra/igra.yml b/config/streams/igra/igra.yml new file mode 100644 index 000000000..a7016e71c --- /dev/null +++ b/config/streams/igra/igra.yml @@ -0,0 +1,34 @@ +# (C) Copyright 2025 WeatherGenerator contributors. +# +# This software is licensed under the terms of the Apache Licence Version 2.0 +# which can be obtained at http://www.apache.org/licenses/LICENSE-2.0. +# +# In applying this licence, ECMWF does not waive the privileges and immunities +# granted to it by virtue of its status as an intergovernmental organisation +# nor does it submit to any jurisdiction. + +IGRA : + type : obs + filenames : ['igra.zarr'] + base_datetime : '1750-01-01T00:00:00' + loss_weight : 1.0 + #masking_rate_none : 0.05 + token_size : 64 + tokenize_spacetime : True + max_num_targets: -1 + embed : + net : transformer + num_tokens : 1 + num_heads : 2 + dim_embed : 256 + num_blocks : 2 + embed_target_coords : + net : linear + dim_embed : 256 + target_readout : + type : 'obs_value' # token or obs_value + num_layers : 2 + num_heads : 4 + pred_head : + ens_size : 1 + num_layers : 1 diff --git a/integration_tests/small1.yaml b/integration_tests/small1.yaml new file mode 100644 index 000000000..cfa294164 --- /dev/null +++ b/integration_tests/small1.yaml @@ -0,0 +1,19 @@ +streams_directory: "./integration_tests/streams/" +run_path: "./results" +model_path: "./models" +loss_fcts: [["mse", 1.0]] +loss_fcts_val: [["mse", 1.0]] +num_mini_epochs: 1 +samples_per_mini_epoch: 100 +samples_per_validation: 5 +lr_steps: 4 +lr_steps_warmup: 2 +lr_steps_cooldown: 2 +loader_num_workers: 8 + +forecast_offset : 0 +# len_hrs: 6 +# step_hrs: 6 + +train_log: + log_interval: 1 diff --git a/integration_tests/small1_test.py b/integration_tests/small1_test.py new file mode 100644 index 000000000..6a7d398ef --- /dev/null +++ b/integration_tests/small1_test.py @@ -0,0 +1,200 @@ +""" +Small test for the Weather Generator. +This test must run on a GPU machine. +It performs a training and inference of the Weather Generator model. + +Command: +uv run pytest ./integration_tests/small1.py +""" + +import json +import logging +import os +import shutil +from pathlib import Path + +import omegaconf +import pytest + +from weathergen.evaluate.run_evaluation import evaluate_from_config +from weathergen.run_train import inference_from_args, train_with_args +from weathergen.utils.metrics import get_train_metrics_path + +logger = logging.getLogger(__name__) + +# Read from git the current commit hash and take the first 5 characters: +try: + from git import Repo + + repo = Repo(search_parent_directories=False) + commit_hash = repo.head.object.hexsha[:5] + logger.info(f"Current commit hash: {commit_hash}") +except Exception as e: + commit_hash = "unknown" + logger.warning(f"Could not get commit hash: {e}") + +WEATHERGEN_HOME = Path(__file__).parent.parent + + +@pytest.fixture() +def setup(test_run_id): + logger.info(f"setup fixture with {test_run_id}") + shutil.rmtree(WEATHERGEN_HOME / "results" / test_run_id, ignore_errors=True) + shutil.rmtree(WEATHERGEN_HOME / "models" / test_run_id, ignore_errors=True) + yield + logger.info("end fixture") + + +@pytest.mark.parametrize("test_run_id", ["test_small1_" + commit_hash]) +def test_train(setup, test_run_id): + logger.info(f"test_train with run_id {test_run_id} {WEATHERGEN_HOME}") + + train_with_args( + f"--config={WEATHERGEN_HOME}/integration_tests/small1.yaml".split() + + [ + "--run_id", + test_run_id, + ], + f"{WEATHERGEN_HOME}/config/streams/streams_test/", + ) + + infer_with_missing(test_run_id) + evaluate_results(test_run_id) + assert_missing_metrics_file(test_run_id) + assert_train_loss_below_threshold(test_run_id) + assert_val_loss_below_threshold(test_run_id) + logger.info("end test_train") + + +def infer(run_id): + logger.info("run inference") + inference_from_args( + ["-start", "2022-10-10", "-end", "2022-10-11", "--samples", "10", "--mini_epoch", "0"] + + [ + "--from_run_id", + run_id, + "--run_id", + run_id, + "--config", + f"{WEATHERGEN_HOME}/integration_tests/small1.yaml", + ] + ) + + +def infer_with_missing(run_id): + logger.info("run inference") + inference_from_args( + ["-start", "2022-10-10", "-end", "2022-10-11", "--samples", "10", "--mini_epoch", "0"] + + [ + "--from_run_id", + run_id, + "--run_id", + run_id, + "--config", + f"{WEATHERGEN_HOME}/integration_tests/small1.yaml", + ] + ) + + +def evaluate_results(run_id): + logger.info("run evaluation") + cfg = omegaconf.OmegaConf.create( + { + "global_plotting_options": { + "image_format": "png", + "dpi_val": 300, + }, + "evaluation": { + "metrics": ["rmse", "l1", "mse"], + "verbose": True, + "summary_plots": True, + "summary_dir": "./plots/", + "print_summary": True, + }, + "run_ids": { + run_id: { # would be nice if this could be done with option + "streams": { + "ERA5": { + "results_base_dir": "./results/", + "channels": ["t_850"], # "all" indicator would be nice + "evaluation": {"forecast_steps": "all", "sample": "all"}, + "plotting": { + "sample": [0, 1], + "forecast_step": [0], + "plot_maps": True, + "plot_histograms": True, + "plot_animations": True, + }, + } + }, + "label": "MTM ERA5", + "mini_epoch": 0, + "rank": 0, + } + }, + } + ) + # Not passing the mlflow client for tests. + evaluate_from_config(cfg, None) + + +def load_metrics(run_id): + """Helper function to load metrics""" + file_path = get_train_metrics_path(base_path=WEATHERGEN_HOME / "results", run_id=run_id) + if not os.path.exists(file_path): + raise FileNotFoundError(f"Metrics file not found for run_id: {run_id}") + with open(file_path) as f: + json_str = f.readlines() + return json.loads("[" + r"".join([s.replace("\n", ",") for s in json_str])[:-1] + "]") + + +def assert_missing_metrics_file(run_id): + """Test that a missing metrics file raises FileNotFoundError.""" + file_path = get_train_metrics_path(base_path=WEATHERGEN_HOME / "results", run_id=run_id) + assert os.path.exists(file_path), f"Metrics file does not exist for run_id: {run_id}" + metrics = load_metrics(run_id) + logger.info(f"Loaded metrics for run_id: {run_id}: {metrics}") + assert metrics is not None, f"Failed to load metrics for run_id: {run_id}" + + +def assert_train_loss_below_threshold(run_id): + """Test that the 'stream.ERA5.loss_mse.loss_avg' metric is below a threshold.""" + metrics = load_metrics(run_id) + loss_metric = next( + ( + metric.get("stream.ERA5.loss_mse.loss_avg", None) + for metric in reversed(metrics) + if metric.get("stage") == "train" + ), + None, + ) + assert loss_metric is not None, ( + "'stream.ERA5.loss_mse.loss_avg' metric is missing in metrics file" + ) + # Check that the loss does not explode in a single mini_epoch + # This is meant to be a quick test, not a convergence test + target = 0.25 + assert loss_metric < target, ( + f"'stream.ERA5.loss_mse.loss_avg' is {loss_metric}, expected to be below {target}" + ) + + +def assert_val_loss_below_threshold(run_id): + """Test that the 'stream.ERA5.loss_mse.loss_avg' metric is below a threshold.""" + metrics = load_metrics(run_id) + loss_metric = next( + ( + metric.get("stream.ERA5.loss_mse.loss_avg", None) + for metric in reversed(metrics) + if metric.get("stage") == "val" + ), + None, + ) + assert loss_metric is not None, ( + "'stream.ERA5.loss_mse.loss_avg' metric is missing in metrics file" + ) + # Check that the loss does not explode in a single mini_epoch + # This is meant to be a quick test, not a convergence test + assert loss_metric < 1.25, ( + f"'stream.ERA5.loss_mse.loss_avg' is {loss_metric}, expected to be below 0.25" + ) diff --git a/integration_tests/streams/era5_small.yml b/integration_tests/streams/era5_small.yml new file mode 100644 index 000000000..2a06eb7df --- /dev/null +++ b/integration_tests/streams/era5_small.yml @@ -0,0 +1,39 @@ +# (C) Copyright 2024 WeatherGenerator contributors. +# +# This software is licensed under the terms of the Apache Licence Version 2.0 +# which can be obtained at http://www.apache.org/licenses/LICENSE-2.0. +# +# In applying this licence, ECMWF does not waive the privileges and immunities +# granted to it by virtue of its status as an intergovernmental organisation +# nor does it submit to any jurisdiction. + +ERA5 : + type : anemoi + filenames : ['aifs-ea-an-oper-0001-mars-o96-1979-2023-6h-v8.zarr'] + loss_weight : 1. + source_exclude : ['w_', 'skt', 'sp', 'tcw', 'cp', 'tp'] + target_exclude : ['w_', 'skt', 'sp', 'tcw', 'cp', 'tp'] + source : ["t_850", "z_850"] + target : ["t_850"] + diagnostic : False + masking_rate : 0.6 + masking_rate_none : 0.05 + token_size : 32 + tokenize_spacetime : True + max_num_targets: -1 + embed : + net : transformer + num_tokens : 1 + num_heads : 2 + dim_embed : 16 + num_blocks : 2 + embed_target_coords : + net : linear + dim_embed : 16 + target_readout : + type : 'obs_value' + num_layers : 2 + num_heads : 2 + pred_head : + ens_size : 1 + num_layers : 1 \ No newline at end of file diff --git a/packages/common/pyproject.toml b/packages/common/pyproject.toml new file mode 100644 index 000000000..f2c48e6c9 --- /dev/null +++ b/packages/common/pyproject.toml @@ -0,0 +1,100 @@ +[project] +name = "weathergen-common" +version = "0.1.0" +description = "The WeatherGenerator Machine Learning Earth System Model" +readme = "../../README.md" +requires-python = ">=3.12,<3.13" +dependencies = [ + "xarray>=2025.6.1", + "dask>=2024.9.1", + "zarr==2.18.4, <3", + "numcodecs<0.16.0", + "astropy_healpix~=1.1.2", + "omegaconf~=2.3.0", + "pyyaml", +] + +[dependency-groups] +dev = [ + "pytest~=8.3.5", + "pytest-mock>=3.14.1", + "ruff==0.9.7", + "pyrefly==0.36.0", +] + + +[tool.pyrefly] +project-includes = ["src/"] +project-excludes = [ +] + +[tool.pyrefly.errors] +bad-argument-type = false +unsupported-operation = false +missing-attribute = false +no-matching-overload = false +bad-context-manager = false + + + + +# The linting configuration +[tool.ruff] + +# Wide rows +line-length = 100 + +[tool.ruff.lint] +# All disabled until the code is formatted. +select = [ + # pycodestyle + "E", + # Pyflakes + "F", + # pyupgrade + "UP", + # flake8-bugbear + "B", + # flake8-simplify + "SIM", + # isort + "I", + # Banned imports + "TID", + # Naming conventions + "N", + # print + "T201" +] + +# These rules are sensible and should be enabled at a later stage. +ignore = [ + # "B006", + "B011", + "UP008", + "SIM117", + "SIM118", + "SIM102", + "SIM401", + # To ignore, not relevant for us + "SIM108", # in case additional norm layer supports are added in future + "N817", # we use heavy acronyms, e.g., allowing 'import LongModuleName as LMN' (LMN is accepted) + "E731", # overly restrictive and less readable code + "N812", # prevents us following the convention for importing torch.nn.functional as F +] + +[tool.ruff.lint.flake8-tidy-imports.banned-api] +"numpy.ndarray".msg = "Do not use 'ndarray' to describe a numpy array type, it is a function. Use numpy.typing.NDArray or numpy.typing.NDArray[np.float32] for example" + +[tool.ruff.format] +# Use Unix `\n` line endings for all files +line-ending = "lf" + + + +[build-system] +requires = ["hatchling"] +build-backend = "hatchling.build" + +[tool.hatch.build.targets.wheel] +packages = ["src/weathergen"] diff --git a/packages/common/src/weathergen/common/__init__.py b/packages/common/src/weathergen/common/__init__.py new file mode 100644 index 000000000..f2075fa5b --- /dev/null +++ b/packages/common/src/weathergen/common/__init__.py @@ -0,0 +1,10 @@ +# TODO: move here: +# - better_abc +# - run_id +# - config +# - distributed +# - logger + + +def common_function(): + return "This is a common function for weather generation." diff --git a/packages/common/src/weathergen/common/config.py b/packages/common/src/weathergen/common/config.py new file mode 100644 index 000000000..c32732ba7 --- /dev/null +++ b/packages/common/src/weathergen/common/config.py @@ -0,0 +1,575 @@ +# (C) Copyright 2025 WeatherGenerator contributors. +# +# This software is licensed under the terms of the Apache Licence Version 2.0 +# which can be obtained at http://www.apache.org/licenses/LICENSE-2.0. +# +# In applying this licence, ECMWF does not waive the privileges and immunities +# granted to it by virtue of its status as an intergovernmental organisation +# nor does it submit to any jurisdiction. + +import io +import json +import logging +import os +import random +import string +import subprocess +from pathlib import Path + +import yaml +import yaml.constructor +import yaml.scanner +from omegaconf import DictConfig, ListConfig, OmegaConf +from omegaconf.omegaconf import open_dict + +_REPO_ROOT = Path( + __file__ +).parent.parent.parent.parent.parent.parent # TODO use importlib for resources +_DEFAULT_CONFIG_PTH = _REPO_ROOT / "config" / "default_config.yml" + +_logger = logging.getLogger(__name__) + + +Config = DictConfig + + +def get_run_id(): + s1 = string.ascii_lowercase + s2 = string.ascii_lowercase + string.digits + return "".join(random.sample(s1, 1)) + "".join(random.sample(s2, 7)) + + +def format_cf(config: Config) -> str: + stream = io.StringIO() + for key, value in config.items(): + match key: + case "streams": + for rt in value: + for k, v in rt.items(): + whitespace = "" if k == "reportypes" else " " + stream.write(f"{whitespace}{k} : {v}") + case _: + stream.write(f"{key} : {value}\n") + + return stream.getvalue() + + +def save(config: Config, mini_epoch: int | None): + """Save current config into the current runs model directory.""" + path_models = Path(config.model_path) + # save in directory with model files + dirname = path_models / config.run_id + dirname.mkdir(exist_ok=True, parents=True) + + fname = _get_model_config_file_write_name(path_models, config.run_id, mini_epoch) + + json_str = json.dumps(OmegaConf.to_container(config)) + with fname.open("w") as f: + f.write(json_str) + + +def load_model_config(run_id: str, mini_epoch: int | None, model_path: str | None) -> Config: + """ + Load a configuration file from a given run_id and mini_epoch. + If run_id is a full path, loads it from the full path. + """ + if Path(run_id).exists(): # load from the full path if a full path is provided + fname = Path(run_id) + _logger.info(f"Loading config from provided full run_id path: {fname}") + else: + # Load model config here. In case model_path is not provided, get it from private conf + if model_path is None: + pconf = _load_private_conf() + model_path = _get_config_attribute( + config=pconf, attribute_name="model_path", fallback="models" + ) + path = Path(model_path) + fname = _get_model_config_file_read_name(path, run_id, mini_epoch) + assert fname.exists(), ( + "The fallback path to the model does not exist. Please provide a `model_path`.", + fname, + ) + + _logger.info(f"Loading config from specified run_id and mini_epoch: {fname}") + + with fname.open() as f: + json_str = f.read() + + config = OmegaConf.create(json.loads(json_str)) + + return _apply_fixes(config) + + +def _get_model_config_file_write_name(path: Path, run_id: str, mini_epoch: int | None): + if mini_epoch is None: + mini_epoch_str = "" + elif mini_epoch == -1: + mini_epoch_str = "_latest" + else: + mini_epoch_str = f"_chkpt{mini_epoch:05d}" + + return path / run_id / f"model_{run_id}{mini_epoch_str}.json" + + +def _get_model_config_file_read_name(path: Path, run_id: str, mini_epoch: int | None): + if mini_epoch is None: + mini_epoch_str = "" + elif mini_epoch == -1: + mini_epoch_str = "_latest" + elif (path / run_id / f"model_{run_id}_epoch{mini_epoch:05d}.json").exists(): + mini_epoch_str = f"_epoch{mini_epoch:05d}" + else: + mini_epoch_str = f"_chkpt{mini_epoch:05d}" + + return path / run_id / f"model_{run_id}{mini_epoch_str}.json" + + +def get_model_results(run_id: str, mini_epoch: int, rank: int) -> Path: + """ + Get the path to the model results zarr store from a given run_id and mini_epoch. + """ + run_results = Path(_load_private_conf(None)["path_shared_working_dir"]) / f"results/{run_id}" + + zarr_path_new = run_results / f"validation_chkpt{mini_epoch:05d}_rank{rank:04d}.zarr" + zarr_path_old = run_results / f"validation_epoch{mini_epoch:05d}_rank{rank:04d}.zarr" + + if zarr_path_new.exists() or zarr_path_new.is_dir(): + zarr_path = zarr_path_new + elif zarr_path_old.exists() or zarr_path_old.is_dir(): + zarr_path = zarr_path_old + else: + raise FileNotFoundError( + f"Zarr file with run_id {run_id}, mini_epoch {mini_epoch} and rank {rank} does not " + f"exist or is not a directory." + ) + + return zarr_path + + +def _apply_fixes(config: Config) -> Config: + """ + Apply fixes to maintain a best effort backward combatibility. + + This method should act as a central hook to implement config backward + compatibility fixes. This is needed to run inference/continuing from + "outdatet" run configurations. The fixes in this function should be + eventually removed. + """ + config = _check_logging(config) + return config + + +def _check_logging(config: Config) -> Config: + """ + Apply fixes to log frequency config. + """ + config = config.copy() + if config.get("train_log_freq") is None: # TODO remove this for next version + config.train_log_freq = OmegaConf.create( + {"checkpoint": 250, "terminal": 10, "metrics": config.train_log.log_interval} + ) + + return config + + +def load_config( + private_home: Path | None, + from_run_id: str | None, + mini_epoch: int | None, + *overwrites: Path | dict | Config, +) -> Config: + """ + Merge config information from multiple sources into one run_config. Anything in the + private configs "secrets" section will be discarted. + + Args: + private_home: Configuration file containing platform dependent information and secretes + from_run_id: Run id of the pretrained WeatherGenerator model + to continue training or inference + mini_epoch: mini_epoch of the checkpoint to load. -1 indicates last checkpoint available. + *overwrites: Additional overwrites from different sources + + Note: The order of precendence for merging the final config is in ascending order: + - base config (either default config or loaded from previous run) + - private config + - overwrites (also in ascending order) + """ + private_config = _load_private_conf(private_home) + overwrite_configs: list[Config] = [] + for overwrite in overwrites: + if isinstance(overwrite, (str | Path)): + # Because of the way we pass extra configs through slurm, + # all the paths may be concatenated with ":" + p = str(overwrite).split(":") + for path in p: + c = _load_overwrite_conf(Path(path)) + c = _load_streams_in_config(c) + overwrite_configs.append(c) + else: + # If it is a dict or DictConfig, we can directly use it + c = _load_overwrite_conf(overwrite) + c = _load_streams_in_config(c) + overwrite_configs.append(c) + + private_config = set_paths(private_config) + + if from_run_id is None: + base_config = _load_default_conf() + else: + base_config = load_model_config( + from_run_id, mini_epoch, private_config.get("model_path", None) + ) + from_run_id = base_config.run_id + with open_dict(base_config): + base_config.from_run_id = from_run_id + # use OmegaConf.unsafe_merge if too slow + c = OmegaConf.merge(base_config, private_config, *overwrite_configs) + assert isinstance(c, Config) + + # Ensure the config has mini-epoch notation + if hasattr(c, "samples_per_epoch"): + c.samples_per_mini_epoch = c.samples_per_epoch + c.num_mini_epochs = c.num_epochs + + return c + + +def _load_streams_in_config(config: Config) -> Config: + """If the config contains a streams_directory, loads the streams and returns the config with + the streams set.""" + streams_directory = config.get("streams_directory", None) + config = config.copy() + if streams_directory is not None: + streams_directory = Path(streams_directory) + config.streams = load_streams(streams_directory) + return config + + +def set_run_id(config: Config, run_id: str | None, reuse_run_id: bool) -> Config: + """ + Determine and set run_id of current run. + + Determining the run id should follow the following logic: + + 1. (default case): run train, train_continue or inference without any flags + => generate a new run_id for this run. + 2. (assign run_id): run train, train_continue or inference with --run_id flag + => assign a run_id manually to this run. + This is intend for outside tooling and should not be used manually. + 3. (reuse run_id -> only for train_continue and inference): + reuse the run_id from the run specified by --from_run_id . + Since the run_id correct run_id is already loaded in the config nothing has to be assigned. + This case will happen if --reuse_run_id is specified. + + + Args: + config: Base configuration loaded from previous run or default. + run_id: Id assigned to this run. If None a new one will be generated. + reuse_run_id: Reuse run_id from base configuration instead. + + Returns: + config object with the run_id attribute properly set. + """ + config = config.copy() + if reuse_run_id: + assert config.run_id is not None, "run_id loaded from previous run should not be None." + _logger.info(f"reusing run_id from previous run: {config.run_id}") + else: + if run_id is None: + # generate new id if run_id is None + config.run_id = run_id or get_run_id() + _logger.info(f"using generated run_id: {config.run_id}") + else: + config.run_id = run_id + _logger.info( + f"using assigned run_id: {config.run_id}." + f" If you manually selected this run_id, this is an error." + ) + + return config + + +def from_cli_arglist(arg_list: list[str]) -> Config: + """ + Parse a Config instance from cli arguments. + + This enables convenient collecting of arguments into an overwrite. + + Args: + arg_list: items in this list should be of the form: parent_obj.nested_obj=value + """ + return OmegaConf.from_cli(arg_list) + + +def _load_overwrite_conf(overwrite: Path | dict | DictConfig) -> DictConfig: + """ + Convert different sources into configs that can be used as overwrites. + + raises: ValueError if argument cannot be turned into DictConfig. + """ + + match overwrite: # match the type + case Path(): + _logger.info(f"Loading overwrite config from file: {overwrite}.") + overwrite_config = OmegaConf.load(overwrite) + case dict(): + _logger.info(f"Loading overwrite config from dict: {overwrite}.") + overwrite_config = OmegaConf.create(overwrite) + case DictConfig(): + _logger.info(f"Using existing config as overwrite: {overwrite}.") + overwrite_config = overwrite + case _: + msg = f"Cannot build config from overwrite: {overwrite}, with type {type(overwrite)}" + raise ValueError(msg) + + assert isinstance(overwrite_config, DictConfig) + return overwrite_config + + +def _load_private_conf(private_home: Path | None = None) -> DictConfig: + "Return the private configuration." + "If none, take it from the environment variable WEATHERGEN_PRIVATE_CONF." + + env_script_path = _REPO_ROOT.parent / "WeatherGenerator-private" / "hpc" / "platform-env.py" + + if private_home is not None and private_home.is_file(): + _logger.info(f"Loading private config from {private_home}.") + + elif "WEATHERGEN_PRIVATE_CONF" in os.environ: + private_home = Path(os.environ["WEATHERGEN_PRIVATE_CONF"]) + _logger.info(f"Loading private config from WEATHERGEN_PRIVATE_CONF:{private_home}.") + + elif env_script_path.is_file(): + _logger.info(f"Loading private config from platform-env.py: {env_script_path}.") + # This code does many checks to ensure that any error message is surfaced. + # Since it is a process call, it can be hard to diagnose the error. + # TODO: eventually, put all this wrapper code in a separate function + try: + result_hpc = subprocess.run( + [str(env_script_path), "hpc"], capture_output=True, text=True, check=True + ) + except subprocess.CalledProcessError as e: + _logger.error( + ( + "Error while running platform-env.py:", + f" {e} {e.stderr} {e.stdout} {e.output} {e.returncode}", + ) + ) + raise + if result_hpc.returncode != 0: + _logger.error(f"Error while running platform-env.py: {result_hpc.stderr.strip()}") + raise RuntimeError(f"Error while running platform-env.py: {result_hpc.stderr.strip()}") + _logger.info(f"Detected HPC: {result_hpc.stdout.strip()}.") + + result = subprocess.run( + [str(env_script_path), "hpc-config"], capture_output=True, text=True, check=True + ) + private_home = Path(result.stdout.strip()) + _logger.info(f"Loading private config from platform-env.py output: {private_home}.") + else: + _logger.info(f"Could not find platform script at {env_script_path}") + raise FileNotFoundError( + "Could not find private config. Please set the environment variable " + "WEATHERGEN_PRIVATE_CONF or provide a path." + ) + private_cf = OmegaConf.load(private_home) + + if "secrets" in private_cf: + del private_cf["secrets"] + + assert isinstance(private_cf, DictConfig) + return private_cf + + +def _load_default_conf() -> Config: + """Deserialize default configuration.""" + c = OmegaConf.load(_DEFAULT_CONFIG_PTH) + assert isinstance(c, Config) + return c + + +def load_streams(streams_directory: Path) -> list[Config]: + # TODO: might want to put this into config later instead of hardcoding it here... + streams_history = { + "streams_anemoi": "era5_1deg", + "streams_mixed": "era5_nppatms_synop", + "streams_ocean": "fesom", + "streams_icon": "icon", + "streams_mixed_experimental": "cerra_seviri", + } + if not streams_directory.is_dir(): + streams_directory_config = streams_directory + dirs = [streams_directory] + while streams_directory.name in streams_history and not streams_directory.is_dir(): + streams_directory = streams_directory.with_name(streams_history[streams_directory.name]) + dirs.append(streams_directory) + if not streams_directory.is_dir(): + msg = f"Could not find stream directory, nor its history: {[str(dir) for dir in dirs]}" + raise FileNotFoundError(msg) + _logger.info( + f"Streams directory {streams_directory} found in " + f"history for {streams_directory_config}. " + "Note: This change will not be reflected in the config. " + "Please update the 'streams_directory' variable manually." + ) + + # read all reportypes from directory, append to existing ones + streams_directory = streams_directory.absolute() + _logger.info(f"Reading streams from {streams_directory}") + + # append streams to existing (only relevant for evaluation) + streams = {} + # exclude temp files starting with "." or "#" (eg. emacs, vim, macos savefiles) + stream_files = sorted(streams_directory.rglob("[!.#]*.yml")) + _logger.info(f"Discover stream configs: {', '.join(map(str, stream_files))}") + for config_file in stream_files: + try: + config = OmegaConf.load(config_file) + for stream_name, stream_config in config.items(): + # Stream config schema is {stream_name: stream_config} + # where stream_config itself is a dict containing the actual options. + # stream_name needs to be added to this dict since only stream_config + # will be further processed. + stream_config.name = stream_name + if stream_name in streams: + msg = f"Duplicate stream name found: {stream_name}." + "Please ensure all stream names are unique." + raise ValueError(msg) + else: + streams[stream_name] = stream_config + _logger.info(f"Loaded stream config: {stream_name} from file {config_file}") + + except (yaml.scanner.ScannerError, yaml.constructor.ConstructorError) as e: + msg = f"Invalid yaml file while parsing stream configs: {config_file}" + raise ValueError(msg) from e + except AttributeError as e: + msg = f"Invalid yaml file while parsing stream configs: {config_file}" + raise ValueError(msg) from e + except IndexError: + # support commenting out entire stream files to avoid loading them. + _logger.warning(f"Parsed stream configuration file is empty: {config_file}") + continue + + return list(streams.values()) + + +def set_paths(config: Config) -> Config: + """Set the configs run_path model_path attributes to default values if not present.""" + config = config.copy() + config.run_path = _get_config_attribute( + config=config, attribute_name="run_path", fallback="results" + ) + config.model_path = _get_config_attribute( + config=config, attribute_name="model_path", fallback="models" + ) + + return config + + +def _get_config_attribute(config: Config, attribute_name: str, fallback: str) -> str: + """Get an attribute from a Config. If not available, fall back to path_shared_working_dir + concatenated with the desired fallback path. Raise an error if neither the attribute nor a + fallback is specified.""" + attribute = OmegaConf.select(config, attribute_name) + fallback_root = OmegaConf.select(config, "path_shared_working_dir") + assert attribute is not None or fallback_root is not None, ( + f"Must specify `{attribute_name}` in config if `path_shared_working_dir` is None in config" + ) + attribute = attribute if attribute else fallback_root + fallback + return attribute + + +def get_path_run(config: Config) -> Path: + """Get the current runs run_path for storing run results and logs.""" + return Path(config.run_path) / config.run_id + + +def get_path_model(config: Config) -> Path: + """Get the current runs model_path for storing model checkpoints.""" + return Path(config.model_path) / config.run_id + + +def get_path_output(config: Config, mini_epoch: int) -> Path: + base_path = get_path_run(config) + fname = f"validation_chkpt{mini_epoch:05d}_rank{config.rank:04d}.zarr" + + return base_path / fname + + +def get_shared_wg_path(local_path: str | Path) -> Path: + """ + Resolves a local, relative path to an absolute path within the configured shared working + directory. + + This utility function retrieves the base path defined for the shared WeatherGenerator (WG) + working directory from the private configuration and appends the provided local path segment. + + Parameters + ---------- + local_path : str or Path + The local or relative path segment (e.g., 'results', 'models', 'output') that needs + to be located within the shared working directory structure. + + Returns + ------- + Path + The absolute pathlib.Path object pointing to the specified location + within the shared working directory. + + Notes + ----- + The shared working directory base is retrieved from the 'path_shared_working_dir' + key found in the private configuration loaded by `_load_private_conf()`. + """ + pcfg = _load_private_conf() + return Path(pcfg.get("path_shared_working_dir")) / local_path + + +def validate_forecast_policy_and_steps(cf: OmegaConf): + """ + Validates the forecast policy and steps within a configuration object. + + This method enforces specific rules for the `forecast_steps` attribute, which can be + either a single integer or a list of integers, ensuring consistency with the + `forecast_policy` attribute. + + The validation logic is as follows: + - If `cf.forecast_steps` is a single integer, a `forecast_policy` must be defined + (i.e., not None or empty) only if `forecast_steps` is unequal to 0. + - If `cf.forecast_steps` is a list, it must be non-empty, and all of its elements + must be non-negative integers. Additionally, a `forecast_policy` must be + defined if any of the forecast steps in the list are greater than 0. + + Args: + cf (OmegaConf): The configuration object containing the `forecast_steps` + and `forecast_policy` attributes. + + Raises: + TypeError: If `cf.forecast_steps` is not an integer or a non-empty list. + AssertionError: If a `forecast_policy` is required but not provided, or + if `forecast_step` is negative while `forecast_policy` is provided, or + if any of the forecast steps in a list are negative. + """ + provide_forecast_policy = ( + "A 'forecast_policy' must be specified when 'forecast_steps' is not zero. " + ) + valid_forecast_policies = ( + "Valid values for 'forecast_policy' are, e.g., 'fixed' when using constant " + "forecast steps throughout the training, or 'sequential' when varying the forecast " + "steps over mini_epochs, such as, e.g., 'forecast_steps: [2, 2, 4, 4]'. " + ) + valid_forecast_steps = ( + "'forecast_steps' must be a positive integer or a non-empty list of positive integers. " + ) + if isinstance(cf.forecast_steps, int): + assert cf.forecast_policy and cf.forecast_steps > 0 if cf.forecast_steps != 0 else True, ( + provide_forecast_policy + valid_forecast_policies + valid_forecast_steps + ) + elif isinstance(cf.forecast_steps, ListConfig) and len(cf.forecast_steps) > 0: + assert ( + cf.forecast_policy and all(step >= 0 for step in cf.forecast_steps) + if any(n > 0 for n in cf.forecast_steps) + else True + ), provide_forecast_policy + valid_forecast_policies + valid_forecast_steps + else: + raise TypeError(valid_forecast_steps) diff --git a/packages/common/src/weathergen/common/io.py b/packages/common/src/weathergen/common/io.py new file mode 100644 index 000000000..a95419b1c --- /dev/null +++ b/packages/common/src/weathergen/common/io.py @@ -0,0 +1,666 @@ +# (C) Copyright 2025 WeatherGenerator contributors. +# +# This software is licensed under the terms of the Apache Licence Version 2.0 +# which can be obtained at http://www.apache.org/licenses/LICENSE-2.0. +# +# In applying this licence, ECMWF does not waive the privileges and immunities +# granted to it by virtue of its status as an intergovernmental organisation +# nor does it submit to any jurisdiction. + +import dataclasses +import functools +import itertools +import logging +import pathlib +import typing + +import dask.array as da +import numpy as np +import xarray as xr +import zarr +from numpy import datetime64 +from numpy.typing import NDArray + +# experimental value, should be inferred more intelligently +CHUNK_N_SAMPLES = 16392 +type DType = np.float32 +type NPDT64 = datetime64 +type ArrayType = zarr.Array | np.NDArray[DType] + + +_logger = logging.getLogger(__name__) + + +def is_ndarray(obj: typing.Any) -> bool: + """Check if object is an ndarray (wraps the linter warning).""" + return isinstance(obj, (np.ndarray)) # noqa: TID251 + + +class TimeRange: + """ + Holds information about a time interval used in forecasting. + + Time interval is left-closed, right-open. TimeRange can be instatiated from + numpy datetime64 objects or strings as outputed by TimeRange.as_dict. + Both will be converted to datetime64 with nanosecond precision. + + Attrs: + start: Start of the time range in nanoseconds. + end: End of the time range in nanoseconds + """ + + def __init__(self, start: NPDT64 | str, end: NPDT64 | str): + # ensure consistent type => convert serialized strings + self.start = np.datetime64(start, "ns") + self.end = np.datetime64(end, "ns") + + assert self.start < self.end + + def as_dict(self) -> dict[str, str]: + """ + Convert instance to a JSON-serializable dict. + + will convert datetime objects as "YYYY-MM-DDThh:mm:s.sssssssss" + + Returns: + JSON-serializable dict, wher datetime objects were converted to strings. + """ + return { + "start": str(self.start), + "end": str(self.end), + } + + def forecast_interval(self, forecast_dt_hours: int, fstep: int) -> "TimeRange": + """ + Infer the interval cosidered at forecast step `fstep`. + + Args: + forecast_dt_hours: number of hours the source TimeRange is shifted per forecast step. + fstep: current forecast step. + + Returns: + New TimeRange shifted TimeRange. + """ + assert forecast_dt_hours > 0 and fstep >= 0 + offset = np.timedelta64(forecast_dt_hours * fstep, "h") + return TimeRange(self.start + offset, self.end + offset) + + +@dataclasses.dataclass +class IOReaderData: + """ + Equivalent to data_reader_base.ReaderData + + This class needs to exist since otherwise the common package would + have a dependecy on the core model. Ultimately a unified data model + should be implemented in the common package. + """ + + coords: NDArray[DType] + geoinfos: NDArray[DType] + data: NDArray[DType] + datetimes: NDArray[NPDT64] + + def is_empty(self): + """ + Test if data object is empty + """ + return len(self.data) == 0 + + @classmethod + def create(cls, other: typing.Any) -> "IOReaderData": + """ + Create an instance from data_reader_base.ReaderData instance. + + other should be such an instance. + """ + coords = np.asarray(other.coords) + geoinfos = np.asarray(other.geoinfos) + data = np.asarray(other.data) + datetimes = np.asarray(other.datetimes) + + n_datapoints = len(data) + + assert coords.shape == (n_datapoints, 2), "number of datapoints do not match data" + assert geoinfos.shape[0] == n_datapoints, "number of datapoints do not match data" + assert datetimes.shape[0] == n_datapoints, "number of datapoints do not match data" + + return cls(**dataclasses.asdict(other)) + + @classmethod + def combine(cls, others: list["IOReaderData"]) -> "IOReaderData": + """ + Create an instance from data_reader_base.ReaderData instance by combining mulitple ones. + + others is list of ReaderData instances. + """ + assert len(others) > 0, len(others) + + other = others[0] + coords = np.zeros((0, other.coords.shape[1]), dtype=other.coords.dtype) + geoinfos = np.zeros((0, other.geoinfos.shape[1]), dtype=other.geoinfos.dtype) + data = np.zeros((0, other.data.shape[1]), dtype=other.data.dtype) + datetimes = np.array([], dtype=other.datetimes.dtype) + + for other in others: + n_datapoints = len(other.data) + assert other.coords.shape == (n_datapoints, 2), "number of datapoints do not match" + assert other.geoinfos.shape[0] == n_datapoints, "number of datapoints do not match" + assert other.datetimes.shape[0] == n_datapoints, "number of datapoints do not match" + + coords = np.concatenate([coords, other.coords]) + geoinfos = np.concatenate([geoinfos, other.geoinfos]) + data = np.concatenate([data, other.data]) + datetimes = np.concatenate([datetimes, other.datetimes]) + + return cls(coords, geoinfos, data, datetimes) + + +@dataclasses.dataclass +class ItemKey: + """Metadata to identify one output item.""" + + sample: int + forecast_step: int + stream: str + + @property + def path(self) -> str: + """Unique path within a hierarchy for one output item.""" + return f"{self.sample}/{self.stream}/{self.forecast_step}" + + @property + def with_source(self) -> bool: + """Decide if output item should contain source dataset.""" + return self.forecast_step == 0 + + def with_target(self, forecast_offset: typing.Literal[0, 1]) -> bool: + """Decide if output item should contain target and predictions.""" + assert forecast_offset in (0, 1) + return (not self.with_source) or (forecast_offset == 0) + + @staticmethod + def _infer_forecast_offset(datasets: dict[str, typing.Any]) -> int: + """ + Infer forecast offset by the (non)presence of targets at fstep 0. + + Args: + datasets: Datasets found in a fstep 0 OutputItem. + """ + # forecast offset=1 should produce no targets at fstep 0 + return 0 if "target" in datasets else 1 + + +@dataclasses.dataclass +class OutputDataset: + """Access source/target/prediction zarr data contained in one output item.""" + + name: str + item_key: ItemKey + source_interval: TimeRange + + # (datapoints, channels, ens) + data: ArrayType # wrong type => array like + + # (datapoints,) + times: ArrayType + + # (datapoints, 2) + coords: ArrayType + + # (datapoints, geoinfos) geoinfos are stream dependent => 0 for most gridded data + geoinfo: ArrayType + + channels: list[str] + geoinfo_channels: list[str] + + @classmethod + def create( + cls, name: str, key: ItemKey, arrays: dict[str, ArrayType], attrs: dict[str, typing.Any] + ): + """ + Create Output dataset from dictonaries. + + Args: + name: Name of dataset (target/prediction/source) + item_key: ItemKey to associated with the parent OutputItem. + arrays: Data and Coordinate arrays. + attrs: Additional metadata. + """ + assert "source_interval" in attrs, "missing expected attribute 'source_interval'" + + source_interval = TimeRange(**attrs.pop("source_interval")) + return cls(name, key, source_interval, **arrays, **attrs) + + @functools.cached_property + def arrays(self) -> dict[str, ArrayType]: + """Iterate over the arrays and their names.""" + return { + "data": self.data, + "times": self.times, + "coords": self.coords, + "geoinfo": self.geoinfo, + } + + @functools.cached_property + def datapoints(self) -> NDArray[np.int_]: + return np.arange(self.data.shape[0]) + + def as_xarray(self, chunk_nsamples=CHUNK_N_SAMPLES) -> xr.DataArray: + """Convert raw dask arrays into chunked dask-aware xarray dataset.""" + chunks = (chunk_nsamples, *self.data.shape[1:]) + + # maybe do dask conversion earlier? => usefull for parallel writing? + data = da.from_zarr(self.data, chunks=chunks) # dont call compute to lazy load + # include pseudo ens dim so all data arrays have same dimensionality + # TODO: does it make sense for target and source to have ens dim? + additional_dims = (0, 1, 2) if len(data.shape) == 3 else (0, 1, 2, 5) + expanded_data = da.expand_dims(data, axis=additional_dims) + coords = da.from_zarr(self.coords).compute() + times = da.from_zarr(self.times).compute().astype("datetime64[ns]") + geoinfo = da.from_zarr(self.geoinfo).compute() + geoinfo = {name: ("ipoint", geoinfo[:, i]) for i, name in enumerate(self.geoinfo_channels)} + # TODO: make sample, stream, forecast_step DataArray attribute, test how it + # interacts with concatenating + dims = ["sample", "stream", "forecast_step", "ipoint", "channel", "ens"] + ds_coords = { + "sample": [self.item_key.sample], + "source_interval_start": ("sample", [self.source_interval.start]), + "source_interval_end": ("sample", [self.source_interval.end]), + "stream": [self.item_key.stream], + "forecast_step": [self.item_key.forecast_step], + "ipoint": self.datapoints, + "channel": self.channels, # TODO: make sure channel names align with data + "valid_time": ("ipoint", times), + "lat": ("ipoint", coords[..., 0]), + "lon": ("ipoint", coords[..., 1]), + **geoinfo, + } + return xr.DataArray(expanded_data, dims=dims, coords=ds_coords, name=self.name) + + +class OutputItem: + def __init__( + self, + key: ItemKey, + forecast_offset=int | None, + target: OutputDataset | None = None, + prediction: OutputDataset | None = None, + source: OutputDataset | None = None, + ): + """Collection of possible datasets for one output item.""" + self.key = key + self.target = target + self.prediction = prediction + self.source = source + + self.datasets = [] + + if self.key.with_source: + self._append_dataset(self.source, "source") + + if self.key.with_target(forecast_offset): + self._append_dataset(self.target, "target") + self._append_dataset(self.prediction, "prediction") + + def _append_dataset(self, dataset: OutputDataset | None, name: str) -> None: + if dataset: + self.datasets.append(dataset) + else: + msg = f"Missing {name} dataset for item: {self.key.path}" + raise ValueError(msg) + + +class ZarrIO: + """Manage zarr storage hierarchy.""" + + def __init__(self, store_path: pathlib.Path): + self._store_path = store_path + self.data_root: zarr.Group | None = None + + def __enter__(self) -> typing.Self: + self._store = zarr.storage.DirectoryStore(self._store_path) + self.data_root = zarr.group(store=self._store) + + return self + + def __exit__(self, exc_type, exc_value, exc_tb): + self._store.close() + + def write_zarr(self, item: OutputItem): + """Write one output item to the zarr store.""" + group = self._get_group(item.key, create=True) + for dataset in item.datasets: + if dataset is not None: + self._write_dataset(group, dataset) + + def get_data(self, sample: int, stream: str, forecast_step: int) -> OutputItem: + """Get datasets for the output item matching the arguments.""" + key = ItemKey(sample, forecast_step, stream) + + return self.load_zarr(key) + + def load_zarr(self, key: ItemKey) -> OutputItem: + """Get datasets for a output item.""" + datasets = self._get_datasets(key) + + return OutputItem(key=key, forecast_offset=self.forecast_offset, **datasets) + + def _get_datasets(self, key: ItemKey): + group = self._get_group(key) + return { + name: OutputDataset.create( + name, key, dict(dataset.arrays()), dict(dataset.attrs).copy() + ) + for name, dataset in group.groups() + } + + def _get_group(self, item: ItemKey, create: bool = False) -> zarr.Group: + assert self.data_root is not None, "ZarrIO must be opened before accessing data." + group: zarr.Group | None + if create: + group = self.data_root.create_group(item.path) + else: + try: + group = self.data_root.get(item.path) + assert group is not None, f"Zarr group: {item.path} does not exist." + except KeyError as e: + msg = f"Zarr group: {item.path} has not been created." + raise FileNotFoundError(msg) from e + + assert group is not None, f"Zarr group: {item.path} does not exist." + return group + + def _write_dataset(self, item_group: zarr.Group, dataset: OutputDataset): + dataset_group = item_group.require_group(dataset.name) + self._write_metadata(dataset_group, dataset) + self._write_arrays(dataset_group, dataset) + + def _write_metadata(self, dataset_group: zarr.Group, dataset: OutputDataset): + dataset_group.attrs["channels"] = dataset.channels + dataset_group.attrs["geoinfo_channels"] = dataset.geoinfo_channels + dataset_group.attrs["source_interval"] = dataset.source_interval.as_dict() + + def _write_arrays(self, dataset_group: zarr.Group, dataset: OutputDataset): + for array_name, array in dataset.arrays.items(): # suffix is eg. data or coords + self._create_dataset(dataset_group, array_name, array) + + def _create_dataset(self, group: zarr.Group, name: str, array: NDArray): + assert is_ndarray(array), f"Expected ndarray but got: {type(array)}" + if array.size == 0: # sometimes for geoinfo + chunks = None + else: + chunks = (CHUNK_N_SAMPLES, *array.shape[1:]) + _logger.debug( + f"writing array: {name} with shape: {array.shape},chunks: {chunks}" + + "into group: {group}." + ) + group.create_dataset(name, data=array, chunks=chunks) + + @functools.cached_property + def forecast_offset(self) -> int: + fstep0_datasets = self._get_datasets(self.example_key) + return ItemKey._infer_forecast_offset(fstep0_datasets) + + @functools.cached_property + def example_key(self) -> ItemKey: + try: + sample, example_sample = next(self.data_root.groups()) + stream, example_stream = next(example_sample.groups()) + fstep = 0 + except StopIteration as e: + msg = f"Data store at: {self._store_path} is empty." + raise FileNotFoundError(msg) from e + + return ItemKey(sample, fstep, stream) + + @functools.cached_property + def samples(self) -> list[int]: + """Query available samples in this zarr store.""" + return list(self.data_root.group_keys()) + + @functools.cached_property + def streams(self) -> list[str]: + """Query available streams in this zarr store.""" + # assume stream/samples are orthogonal => use first sample + _, example_sample = next(self.data_root.groups()) + return list(example_sample.group_keys()) + + @functools.cached_property + def forecast_steps(self) -> list[int]: + """Query available forecast steps in this zarr store.""" + # assume stream/samples/forecast_steps are orthogonal + _, example_sample = next(self.data_root.groups()) + _, example_stream = next(example_sample.groups()) + + all_steps = list(example_stream.group_keys()) + if self.forecast_offset == 1: + return all_steps[1:] # exclude fstep with no targets/preds + else: + return all_steps + + +@dataclasses.dataclass +class DataCoordinates: + times: typing.Any + coords: typing.Any + geoinfo: typing.Any + channels: typing.Any + geoinfo_channels: typing.Any + + +@dataclasses.dataclass +class OutputBatchData: + """Provide convenient access to adapt existing output data structures.""" + + # sample, stream, tensor(datapoint, channel+coords) + # => datapoints is accross all datasets per stream + sources: list[list[IOReaderData]] + + # sample + source_intervals: list[TimeRange] + + # fstep, stream, redundant dim (size 1), tensor(sample x datapoint, channel) + targets: list[list[list]] + + # fstep, stream, redundant dim (size 1), tensor(ens, sample x datapoint, channel) + predictions: list[list[list]] + + # fstep, stream, tensor(sample x datapoint, 2 + geoinfos) + targets_coords: list[list] + + # fstep, stream, (sample x datapoint) + targets_times: list[list[NDArray[DType]]] + + # fstep, stream, redundant dim (size 1) + targets_lens: list[list[list[int]]] + + # stream name: index into data (only streams in streams_output) + streams: dict[str, int] + + # stream, channel name + target_channels: list[list[str]] + source_channels: list[list[str]] + geoinfo_channels: list[list[str]] + + sample_start: int + forecast_offset: int + + @functools.cached_property + def samples(self): + """Continous indices of all samples accross all batches.""" + + # TODO associate samples with the sampel idx used for the time window + return np.arange(len(self.sources)) + self.sample_start + + @functools.cached_property + def forecast_steps(self): + """Indices of all forecast steps adjusted by the forecast offset""" + # forecast offset should be either 1 for forecasting or 0 for MTM + assert self.forecast_offset in (0, 1) + return np.arange(len(self.targets) + self.forecast_offset) + + def items(self) -> typing.Generator[OutputItem, None, None]: + """Iterate over possible output items""" + # TODO: filter for empty items? + for s, fo_s, fi_s in itertools.product( + self.samples, self.forecast_steps, self.streams.keys() + ): + yield self.extract(ItemKey(int(s), int(fo_s), fi_s)) + + def extract(self, key: ItemKey) -> OutputItem: + """Extract datasets from lists for one output item.""" + _logger.debug(f"extracting subset: {key}") + offset_key = self._offset_key(key) + stream_idx = self.streams[key.stream] + + source_interval = self.source_intervals[offset_key.sample] + _logger.debug( + f"forecast_step: {key.forecast_step} = {offset_key.forecast_step} (rel_step) + " + + f"{self.forecast_offset} (forecast_offset)" + ) + _logger.debug(f"stream: {key.stream} with index: {stream_idx}") + + assert self.forecast_offset in (0, 1) + if key.with_source: + source_dataset = self._extract_sources( + offset_key.sample, stream_idx, key, source_interval + ) + else: + source_dataset = None + + if key.with_target(self.forecast_offset): + target_dataset, prediction_dataset = self._extract_targets_predictions( + stream_idx, offset_key, key, source_interval + ) + else: + target_dataset, prediction_dataset = (None, None) + + return OutputItem( + key=key, + forecast_offset=self.forecast_offset, + source=source_dataset, + target=target_dataset, + prediction=prediction_dataset, + ) + + def _offset_key(self, key: ItemKey): + """ + Correct indices in key to be useable for data extraction. + + `key` contains indices that are adjusted to have better output semantics. + To be useable in extraction these have to be adjusted to bridge the differences + compared to the semantics of the data. + - `sample` is adjusted from a global continous index to a per batch index + - `forecast_step` is adjusted from including `forecast_offset` to indexing + the data (always starts at 0) + """ + return ItemKey( + key.sample - self.sample_start, key.forecast_step - self.forecast_offset, key.stream + ) + + def _extract_targets_predictions(self, stream_idx, offset_key, key, source_interval): + datapoints = self._get_datapoints_per_sample(offset_key, stream_idx) + data_coords = self._extract_coordinates(stream_idx, offset_key, datapoints) + + if (datapoints.stop - datapoints.start) == 0: + target_data = np.zeros((0, len(self.target_channels[stream_idx])), dtype=np.float32) + preds_data = np.zeros((0, len(self.target_channels[stream_idx])), dtype=np.float32) + else: + target_data = self.targets[offset_key.forecast_step][stream_idx][0][datapoints] + preds_data = self.predictions[offset_key.forecast_step][stream_idx][0].transpose( + 1, 2, 0 + )[datapoints] + + assert len(data_coords.channels) == target_data.shape[1], ( + "Number of channel names does not align with target data." + ) + assert len(data_coords.channels) == preds_data.shape[1], ( + "Number of channel names does not align with prediction data." + ) + + target_dataset = OutputDataset( + "target", + key, + source_interval, + target_data, + **dataclasses.asdict(data_coords), + ) + prediction_dataset = OutputDataset( + "prediction", + key, + source_interval, + preds_data, + **dataclasses.asdict(data_coords), + ) + + return target_dataset, prediction_dataset + + def _get_datapoints_per_sample(self, offset_key, stream_idx): + lens = self.targets_lens[offset_key.forecast_step][stream_idx] + + # empty target/prediction + if len(lens) == 0: + start = 0 + n_samples = 0 + else: + start = sum(lens[: offset_key.sample]) + n_samples = lens[offset_key.sample] + + _logger.debug( + f"sample: start:{self.sample_start} rel_idx:{offset_key.sample}" + + f"range:{start}-{start + n_samples}" + ) + + return slice(start, start + n_samples) + + def _extract_coordinates(self, stream_idx, offset_key, datapoints) -> DataCoordinates: + _coords = self.targets_coords[offset_key.forecast_step][stream_idx][datapoints].numpy() + + # ensure _coords has size (?,2) + if len(_coords) == 0: + _coords = np.zeros((0, 2), dtype=np.float32) + + coords = _coords[..., :2] # first two columns are lat,lon + geoinfo = _coords[..., 2:] # the rest is geoinfo => potentially empty + if geoinfo.size > 0: # TODO: set geoinfo to be empty for now + geoinfo = np.empty((geoinfo.shape[0], 0)) + _logger.warning( + "geoinformation channels are not implemented yet." + + "will be truncated to be of size 0." + ) + times = self.targets_times[offset_key.forecast_step][stream_idx][ + datapoints + ] # make conversion to datetime64[ns] here? + channels = self.target_channels[stream_idx] + geoinfo_channels = self.geoinfo_channels[stream_idx] + + return DataCoordinates(times, coords, geoinfo, channels, geoinfo_channels) + + def _extract_sources( + self, sample: int, stream_idx: int, key: ItemKey, source_interval: TimeRange + ) -> OutputDataset: + channels = self.source_channels[stream_idx] + geoinfo_channels = self.geoinfo_channels[stream_idx] + + source: IOReaderData = self.sources[sample][stream_idx] + + assert source.data.shape[1] == len(channels), ( + "Number of source channel names does not align with source data" + ) + + source_dataset = OutputDataset( + "source", + key, + source_interval, + np.asarray(source.data), + np.asarray(source.datetimes), + np.asarray(source.coords), + np.asarray(source.geoinfos), + channels, + geoinfo_channels, + ) + + _logger.debug(f"source shape: {source_dataset.data.shape}") + + return source_dataset diff --git a/packages/common/src/weathergen/common/platform_env.py b/packages/common/src/weathergen/common/platform_env.py new file mode 100644 index 000000000..485969588 --- /dev/null +++ b/packages/common/src/weathergen/common/platform_env.py @@ -0,0 +1,38 @@ +""" +Platform environment configuration for WeatherGenerator. + +These are loaded from secrets in the private repository. +""" + +import importlib +import importlib.util +from functools import lru_cache +from typing import Protocol + +from weathergen.common.config import _REPO_ROOT + + +class PlatformEnv(Protocol): + """ + Interface for platform environment configuration. + """ + + def get_hpc(self) -> str | None: ... + + def get_hpc_user(self) -> str | None: ... + + def get_hpc_config(self) -> str | None: ... + + def get_hpc_certificate(self) -> str | None: ... + + +@lru_cache(maxsize=1) +def get_platform_env() -> PlatformEnv: + """ + Loads the platform environment module from the private repository. + """ + env_script_path = _REPO_ROOT.parent / "WeatherGenerator-private" / "hpc" / "platform-env.py" + spec = importlib.util.spec_from_file_location("platform_env", env_script_path) + platform_env = importlib.util.module_from_spec(spec) + spec.loader.exec_module(platform_env) # type: ignore + return platform_env # type: ignore diff --git a/packages/evaluate/pyproject.toml b/packages/evaluate/pyproject.toml new file mode 100644 index 000000000..862358e5d --- /dev/null +++ b/packages/evaluate/pyproject.toml @@ -0,0 +1,59 @@ +[project] +name = "weathergen-evaluate" +version = "0.1.0" +description = "The WeatherGenerator Machine Learning Earth System Model" +readme = "../../README.md" +requires-python = ">=3.12,<3.13" +dependencies = [ + "cartopy>=0.24.1", + "xskillscore", + "xhistogram", + "panel", + "omegaconf", + "plotly>=6.2.0", + "weathergen-common", + "weathergen-metrics", +] + +[dependency-groups] +dev = [ + "pytest~=8.3.5", + "pytest-mock>=3.14.1", + "ruff==0.9.7", + "pyrefly==0.36.0", +] + +[project.scripts] +evaluation = "weathergen.evaluate.run_evaluation:evaluate" +export = "weathergen.evaluate.export_inference:export" + +# The linting configuration +[tool.ruff] +extend = "../../pyproject.toml" + +[tool.pyrefly] +project-includes = ["src/"] +project-excludes = [ +] + +[tool.pyrefly.errors] +bad-argument-type = false +unsupported-operation = false +missing-attribute = false +no-matching-overload = false + +# To do: +bad-assignment = false +bad-return = false +index-error = false +not-iterable = false +not-callable = false + +[build-system] +requires = ["hatchling"] +build-backend = "hatchling.build" + +[tool.hatch.build.targets.wheel] +packages = ["src/weathergen"] + + diff --git a/packages/evaluate/src/weathergen/evaluate/__init__.py b/packages/evaluate/src/weathergen/evaluate/__init__.py new file mode 100644 index 000000000..e69de29bb diff --git a/packages/evaluate/src/weathergen/evaluate/clim_utils.py b/packages/evaluate/src/weathergen/evaluate/clim_utils.py new file mode 100644 index 000000000..7ff75986f --- /dev/null +++ b/packages/evaluate/src/weathergen/evaluate/clim_utils.py @@ -0,0 +1,226 @@ +# (C) Copyright 2025 WeatherGenerator contributors. +# +# This software is licensed under the terms of the Apache Licence Version 2.0 +# which can be obtained at http://www.apache.org/licenses/LICENSE-2.0. +# +# In applying this licence, ECMWF does not waive the privileges and immunities +# granted to it by virtue of its status as an intergovernmental organisation +# nor does it submit to any jurisdiction. + +import logging + +import numpy as np +import pandas as pd +import xarray as xr +from scipy.spatial import cKDTree +from tqdm import tqdm + +_logger = logging.getLogger(__name__) + + +def match_climatology_time(target_datetime: pd.Timestamp, clim_data: xr.Dataset) -> int | None: + """ + Find matching climatology time index for target datetime. + + Parameters + ---------- + target_datetime : pd.Timestamp + Target datetime to match + clim_data : xr.Dataset + Climatology dataset with time dimension + + Returns + ------- + int or None + Matching time index, or None if no match found + """ + # Convert numpy datetime64 to pandas datetime if needed + if isinstance(target_datetime, np.datetime64): + target_datetime = pd.to_datetime(target_datetime) + + target_doy = target_datetime.dayofyear + target_hour = target_datetime.hour + + # EFFICIENT TIME MATCHING using vectorized operations + clim_times = pd.to_datetime(clim_data.time.values) + clim_doys = clim_times.dayofyear + clim_hours = clim_times.hour + + time_matches = (clim_doys == target_doy) & (clim_hours == target_hour) + matching_indices = np.where(time_matches)[0] + + # To Do: leap years and other edge cases + if len(matching_indices) == 0: + _logger.warning( + f"No matching climatology time found for {target_datetime} (DOY: {target_doy}, " + f"Hour: {target_hour})" + f"Please check that climatology data and stream input data filenames match." + ) + return None + else: + # Use first match if multiple exist + if len(matching_indices) > 1: + _logger.debug(f"Found {len(matching_indices)} matching times, using first one") + return matching_indices[0] + + +def build_climatology_indexer(clim_lats: np.typing.NDArray, clim_lons: np.typing.NDArray): + """ + Build a fast KDTree indexer for climatology coordinates. + Returns a function that maps (target_lats, target_lons) -> climatology indices. + """ + # Normalize climatology longitudes once + clim_lons = np.where(clim_lons >= 180, clim_lons - 360, clim_lons) + + # Build KDTree on climatology coordinates + clim_coords = np.column_stack((clim_lats, clim_lons)) + tree = cKDTree(clim_coords) + + def indexer( + target_lats: np.typing.NDArray, target_lons: np.typing.NDArray, tol: float = 1e-5 + ) -> np.typing.NDArray: + target_coords = np.column_stack((target_lats, target_lons)) + dist, idx = tree.query(target_coords, distance_upper_bound=tol) + + # Mark unmatched points as -1 + idx[~np.isfinite(dist)] = -1 + return idx.astype(np.int32) + + return indexer + + +def align_clim_data( + target_output: dict, + clim_data: xr.Dataset, +) -> dict: + """ + Align climatology data with target data structure. + """ + # create empty climatology data for each forecast step + aligned_clim_data = {} + for fstep, _ in target_output.items(): + aligned_clim_data[fstep] = xr.DataArray( + np.full_like( + target_output[fstep].values, + np.nan, # Create array with same shape filled with NaNs + ), + coords=target_output[fstep].coords, # Use the same coordinates as target + dims=target_output[fstep].dims, # Use the same dimensions as target + ) + + # Cache for previously computed indices + cached_target_lats = None + cached_target_lons = None + cached_clim_indices = None + + if clim_data is None: + return aligned_clim_data + + # Build KDTree indexer once + clim_lats = clim_data.latitude.values + clim_lons = clim_data.longitude.values + clim_indexer = build_climatology_indexer(clim_lats, clim_lons) + + for fstep, target_data in target_output.items(): + samples = np.unique(target_data.sample.values) + for sample in tqdm(samples, f"Aligning climatology for forecast step {fstep}"): + sel_key = "sample" if "sample" in target_data.dims else "ipoint" + sel_val = ( + sample if "sample" in target_data.dims else (target_data.sample.values == sample) + ) + sel_mask = {sel_key: sel_val} + + timestamp = target_data.sel(sel_mask).valid_time.values[0] + # Prepare climatology data for each sample + matching_time_idx = match_climatology_time(timestamp, clim_data) + + if matching_time_idx is None: + continue + + prepared_clim_data = ( + clim_data.data.isel( + time=matching_time_idx, + ) + .sel( + channels=target_data.channel.values, + ) + .transpose("grid_points", "channels") # dimensions specific to anemoi + ) + target_lats = target_data.loc[sel_mask].lat.values + target_lons = target_data.loc[sel_mask].lon.values + # check if target coords match cached target coords + # if they do, use cached clim_indices + if ( + cached_clim_indices is not None + and np.array_equal(target_lats, cached_target_lats) + and np.array_equal(target_lons, cached_target_lons) + ): + clim_indices = cached_clim_indices + else: + clim_lats = prepared_clim_data.latitude.values + clim_lons = prepared_clim_data.longitude.values + + clim_indices = clim_indexer(target_lats, target_lons) + # Check for unmatched coordinates + unmatched_mask = clim_indices == -1 + if np.any(unmatched_mask): + n_unmatched = np.sum(unmatched_mask) + raise ValueError( + f"Found {n_unmatched} target coordinates with no matching climatology " + f"coordinates. This will cause incorrect ACC calculations. " + f"Check coordinate alignment between target and climatology data." + ) + # Cache the computed indices and target coords + cached_clim_indices = clim_indices + cached_target_lats = target_lats + cached_target_lons = target_lons + + # TODO: generalize to potential variation of grid_point dimension name + clim_values = prepared_clim_data.isel(grid_points=clim_indices).values + try: + if len(samples) > 1: + aligned_clim_data[fstep].loc[sel_mask] = clim_values + else: + aligned_clim_data[fstep] = clim_values + except (ValueError, IndexError) as e: + raise ValueError( + f"Failed to align climatology data with target data for ACC calculation. " + f"This error typically occurs when the number of points per sample varies " + f"between samples. " + f"ACC metric is currently only supported for forecasting data with constant " + f"points per sample. " + f"Please ensure all samples have the same spatial coverage and grid points. " + f"Original error: {e}" + ) from e + + return aligned_clim_data + + +def get_climatology(reader, da_tars, stream: str) -> xr.Dataset | None: + """ + Load climatology data if specified in the evaluation configuration. + + Parameters + ---------- + reader : WeatherGenReader + Reader object to access data and configurations + da_tars : dict + Dictionary of target data arrays keyed by forecast step + stream : str + Name of the data stream + Returns + ------- + xr.Dataset or None + Climatology dataset if available, otherwise None + """ + # Get climatology data path from configuration + clim_data_path = reader.get_climatology_filename(stream) + + aligned_clim_data = None + + if clim_data_path is not None: + clim_data = xr.open_dataset(clim_data_path) + _logger.info("Aligning climatological data with target structure...") + aligned_clim_data = align_clim_data(da_tars, clim_data) + + return aligned_clim_data diff --git a/packages/evaluate/src/weathergen/evaluate/derived_channels.py b/packages/evaluate/src/weathergen/evaluate/derived_channels.py new file mode 100644 index 000000000..7811407d7 --- /dev/null +++ b/packages/evaluate/src/weathergen/evaluate/derived_channels.py @@ -0,0 +1,155 @@ +import logging +import re +from dataclasses import dataclass + +import numpy as np +import xarray as xr + +_logger = logging.getLogger(__name__) +_logger.setLevel(logging.INFO) + + +@dataclass +class DeriveChannels: + def __init__( + self, + available_channels: np.array, + channels: list, + stream_cfg: dict, + ): + """ + Initializes the DeriveChannels class with necessary configurations for channel derivation. + + Args: + available_channels (np.array): an array of all available channel names + in the datasets (target or pred). + channels (list): A list of channels of interest to be evaluated and/or plotted. + stream_cfg (dict): A dictionary containing the stream configuration settings for + evaluation and plottings. + + Returns: + None + """ + self.available_channels = available_channels + self.channels = channels + self.stream_cfg = stream_cfg + + def calc_xxff_channel(self, da: xr.DataArray, level: str) -> xr.DataArray | None: + """ + Calculate wind speed at xx level ('xxff') from wind components or directly. + Args: + da: xarray DataArray with data + Returns: + xarray: Calculated xxff value, or None if calculation is not possible + """ + + channels = da.channel.values + + if f"{level}si" not in channels: + for suffix in ["u", "v"]: + for name in [ + f"{level}{suffix}", + f"{suffix}_{level}", + f"obsvalue_{suffix}{level}m_0", + ]: + component = da.sel(channel=name) if name in channels else None + if component is not None: + break + if suffix == "u": + u_component = component if component is not None else None + else: + v_component = component if component is not None else None + if not (u_component is None or v_component is None): + ff = np.sqrt(u_component**2 + v_component**2) + return ff + else: + _logger.debug( + f"u or v not found for level {level} - skipping {level}ff calculation" + ) + return None + elif f"{level}si" in channels: + ff = da.sel(channel=f"{level}si") + return ff + else: + _logger.debug(f"Skipping {level}ff calculation - unsupported data format") + return None + + def get_channel(self, data_tars, data_preds, tag, level, calc_func) -> None: + """ + Add a new channel data to both target and prediction datasets. + + This method computes new channel values using given calculations methods + and appends them as a new channel to both self.data_tars and self.data_preds. + If the calculation returns None, the original datasets are preserved unchanged. + + The method updates: + - data_tars: Target dataset with added 10ff channel + - data_preds: Prediction dataset with added 10ff channel + - self.channels: Channel list with '10ff' added + + Returns: + None + """ + + data_updated = [] + + for data in [data_tars, data_preds]: + new_channel = calc_func(data, level) + + if new_channel is not None: + conc = xr.concat( + [ + data, + new_channel.expand_dims("channel").assign_coords(channel=[tag]), + ], + dim="channel", + ) + + data_updated.append(conc) + + self.channels = self.channels + ([tag] if tag not in self.channels else []) + + else: + data_updated.append(data) + + data_tars, data_preds = data_updated + return data_tars, data_preds + + def get_derived_channels( + self, + data_tars: xr.DataArray, + data_preds: xr.DataArray, + ) -> tuple[xr.DataArray, xr.DataArray, list]: + """ + Function to derive channels from available channels in the data + + Parameters: + ----------- + - data_tars: Target dataset + - data_preds: Prediction dataset + + Returns: + -------- + - data_tars: Updated target dataset (if channel can be added) + - data_preds: Updated prediction dataset (if channel can be added) + - self.channels: all the channels of interest + + """ + + if "derive_channels" not in self.stream_cfg: + return data_tars, data_preds, self.channels + + for tag in self.stream_cfg["derive_channels"]: + if tag not in self.available_channels: + match = re.search(r"(\d+)", tag) + level = match.group() if match else None + if tag == f"{level}ff": + data_tars, data_preds = self.get_channel( + data_tars, data_preds, tag, level, self.calc_xxff_channel + ) + else: + _logger.debug( + f"Calculation of {tag} is skipped because it is included " + "in the available channels..." + ) + return data_tars, data_preds, self.channels diff --git a/packages/evaluate/src/weathergen/evaluate/export/__init__.py b/packages/evaluate/src/weathergen/evaluate/export/__init__.py new file mode 100644 index 000000000..8b1378917 --- /dev/null +++ b/packages/evaluate/src/weathergen/evaluate/export/__init__.py @@ -0,0 +1 @@ + diff --git a/packages/evaluate/src/weathergen/evaluate/export/cf_utils.py b/packages/evaluate/src/weathergen/evaluate/export/cf_utils.py new file mode 100644 index 000000000..e905015fe --- /dev/null +++ b/packages/evaluate/src/weathergen/evaluate/export/cf_utils.py @@ -0,0 +1,77 @@ +import logging +from pathlib import Path + +import numpy as np + +_logger = logging.getLogger(__name__) +_logger.setLevel(logging.INFO) + + +class CfParser: + """ + Base class for CF parsers. + """ + + def __init__(self, config, **kwargs): + """ + CF-compliant parser that handles both regular and Gaussian grids. + Parameters + ---------- + config : OmegaConf + Configuration defining variable mappings and dimension metadata. + grid_type : str + Type of grid ('regular' or 'gaussian'). + """ + + for k, v in kwargs.items(): + setattr(self, k, v) + + self.config = config + self.file_extension = _get_file_extension(self.output_format) + self.fstep_hours = np.timedelta64(self.fstep_hours, "h") + + def get_output_filename(self) -> Path: + """ + Generate output filename based on run_id and output directory. + """ + return Path(self.output_dir) / f"{self.run_id}.{self.file_extension}" + + def process_sample(self, fstep_iterator_results: iter, ref_time: np.datetime64): + """ + Process results from get_data_worker: reshape, concatenate, add metadata, and save. + Parameters + ---------- + fstep_iterator_results : Iterator over results from get_data_worker. + ref_time : Forecast reference time for the sample. + Returns + ------- + None + """ + pass + + +########################################## + + +# Helpers +def _get_file_extension(output_format: str) -> str: + """ + Get file extension based on output format. + + Parameters + ---------- + output_format : Output file format (currently only 'netcdf' supported). + + Returns + ------- + File extension as a string. + """ + if output_format == "netcdf": + return "nc" + elif output_format == "quaver": + return "grib" + else: + raise ValueError( + f"Unsupported output format: {output_format}," + "supported formats are ['netcdf', 'DWD', 'quaver']" + ) diff --git a/packages/evaluate/src/weathergen/evaluate/export/export_core.py b/packages/evaluate/src/weathergen/evaluate/export/export_core.py new file mode 100644 index 000000000..a33c1561f --- /dev/null +++ b/packages/evaluate/src/weathergen/evaluate/export/export_core.py @@ -0,0 +1,257 @@ +import logging +from multiprocessing import Pool + +import numpy as np +import xarray as xr +from omegaconf import OmegaConf +from tqdm import tqdm + +from weathergen.common.config import get_model_results +from weathergen.common.io import ZarrIO +from weathergen.evaluate.export.parser_factory import CfParserFactory +from weathergen.evaluate.export.reshape import detect_grid_type + +_logger = logging.getLogger(__name__) +_logger.setLevel(logging.INFO) + + +def get_data_worker(args: tuple) -> xr.DataArray: + """ + Worker function to retrieve data for a single sample and forecast step. + + Parameters + ---------- + args : Tuple containing (sample, fstep, run_id, stream, type). + + Returns + ------- + xarray DataArray for the specified sample and forecast step. + """ + sample, fstep, run_id, stream, dtype, epoch, rank = args + fname_zarr = get_model_results(run_id, epoch, rank) + with ZarrIO(fname_zarr) as zio: + out = zio.get_data(sample, stream, fstep) + if dtype == "target": + data = out.target + elif dtype == "prediction": + data = out.prediction + return data + + +def get_fsteps(fsteps, fname_zarr: str): + """ + Retrieve available forecast steps from the Zarr store and filter + based on requested forecast steps. + + Parameters + ---------- + fsteps : list + List of requested forecast steps. + If None, retrieves all available forecast steps. + fname_zarr : str + Path to the Zarr store. + Returns + ------- + list[str] + List of forecast steps to be used for data retrieval. + """ + with ZarrIO(fname_zarr) as zio: + zio_forecast_steps = sorted([int(step) for step in zio.forecast_steps]) + return zio_forecast_steps if fsteps is None else sorted([int(fstep) for fstep in fsteps]) + + +def get_samples(samples, fname_zarr: str): + """ + Retrieve available samples from the Zarr store + and filter based on requested samples. + Parameters + ---------- + samples : list + List of requested samples. If None, retrieves all available samples. + fname_zarr : str + Path to the Zarr store. + Returns + ------- + list[str] + List of samples to be used for data retrieval. + """ + with ZarrIO(fname_zarr) as zio: + zio_samples = sorted([int(sample) for sample in zio.samples]) + samples = ( + zio_samples + if samples is None + else sorted([int(sample) for sample in samples if sample in samples]) + ) + return samples + + +def get_channels(channels, stream: str, fname_zarr: str) -> list[str]: + """ + Retrieve available channels from the Zarr store and filter based on requested channels. + Parameters + ---------- + channels : list + List of requested channels. If None, retrieves all available channels. + stream : str + Stream name to retrieve data for (e.g., 'ERA5'). + fname_zarr : str + Path to the Zarr store. + Returns + ------- + list[str] + List of channels to be used for data retrieval. + """ + with ZarrIO(fname_zarr) as zio: + zio_forecast_steps = sorted([int(step) for step in zio.forecast_steps]) + dummy_out = zio.get_data(0, stream, zio_forecast_steps[0]) + all_channels = dummy_out.target.channels + + if channels is not None: + existing_channels = set(all_channels) & set(channels) + if existing_channels != set(channels): + missing_channels = set(channels) - set(existing_channels) + _logger.warning( + "The following requested channels are" + f"not available in the data and will be skipped: {missing_channels}" + ) + return all_channels if channels is None else list(existing_channels) + + +def get_grid_type(data_type, stream: str, fname_zarr: str) -> str: + """ + Determine the grid type of the data (regular or gaussian). + Parameters + ---------- + data_type : str + Type of data to retrieve ('target' or 'prediction'). + stream : str + Stream name to retrieve data for (e.g., 'ERA5'). + fname_zarr : str + Path to the Zarr store. + Returns + ------- + str + Grid type ('regular' or 'gaussian'). + """ + with ZarrIO(fname_zarr) as zio: + zio_forecast_steps = sorted([int(step) for step in zio.forecast_steps]) + dummy_out = zio.get_data(0, stream, zio_forecast_steps[0]) + data = dummy_out.target if data_type == "target" else dummy_out.prediction + return detect_grid_type(data.as_xarray().squeeze()) + + +# TODO: this will change after restructuring the lead time. +def get_ref_times(fname_zarr, stream, samples, fstep_hours) -> list[np.datetime64]: + """ + Retrieve reference times for the specified samples from the Zarr store. + Parameters + ---------- + fname_zarr : str + Path to the Zarr store. + stream : str + Stream name to retrieve data for (e.g., 'ERA5'). + samples : list + List of samples to process. + fstep_hours : np.timedelta64 + Time difference between forecast steps in hours. + Returns + ------- + list[np.datetime64] + List of reference times corresponding to the samples. + """ + ref_times = [] + with ZarrIO(fname_zarr) as zio: + zio_forecast_steps = sorted([int(step) for step in zio.forecast_steps]) + for sample in samples: + data = zio.get_data(sample, stream, zio_forecast_steps[0]) + data = data.target.as_xarray().squeeze() + ref_time = data.valid_time.values[0] - fstep_hours * int(data.forecast_step.values) + ref_times.append(ref_time) + return ref_times + + +def export_model_outputs(data_type: str, config: OmegaConf, **kwargs) -> None: + """ + Retrieve data from Zarr store and save one sample to each NetCDF file. + Using multiprocessing to speed up data retrieval. + + Parameters + ---------- + data_type: str + Type of data to retrieve ('target' or 'prediction'). + config : OmegaConf + Loaded config for cf_parser function. + + kwargs: + Additional keyword arguments for the parser. + + NOTE: it contains the following parameters: + run_id : str + Run ID to identify the Zarr store. + samples : list + Sample to process + stream : str + Stream name to retrieve data for (e.g., 'ERA5'). + data_type : str + Type of data to retrieve ('target' or 'prediction'). + fsteps : list + List of forecast steps to retrieve. If None, retrieves all available forecast steps. + channels : list + List of channels to retrieve. If None, retrieves all available channels. + n_processes : list + Number of parallel processes to use for data retrieval. + ecpoch : int + Epoch number to identify the Zarr store. + rank : int + Rank number to identify the Zarr store. + output_dir : str + Directory to save the NetCDF files. + output_format : str + Output file format (currently only 'netcdf' supported). + + """ + kwargs = OmegaConf.create(kwargs) + + run_id = kwargs.run_id + samples = kwargs.samples + fsteps = kwargs.fsteps + stream = kwargs.stream + channels = kwargs.channels + n_processes = kwargs.n_processes + epoch = kwargs.epoch + rank = kwargs.rank + fstep_hours = np.timedelta64(kwargs.fstep_hours, "h") + + if data_type not in ["target", "prediction"]: + raise ValueError(f"Invalid type: {data_type}. Must be 'target' or 'prediction'.") + + fname_zarr = get_model_results(run_id, epoch, rank) + fsteps = get_fsteps(fsteps, fname_zarr) + samples = get_samples(samples, fname_zarr) + grid_type = get_grid_type(data_type, stream, fname_zarr) + channels = get_channels(channels, stream, fname_zarr) + ref_times = get_ref_times(fname_zarr, stream, samples, fstep_hours) + + kwargs["grid_type"] = grid_type + kwargs["channels"] = channels + kwargs["data_type"] = data_type + + with Pool(processes=n_processes, maxtasksperchild=5) as pool: + parser = CfParserFactory.get_parser(config=config, **kwargs) + + for s_idx, sample in enumerate(tqdm(samples)): + ref_time = ref_times[s_idx] + + step_tasks = [ + (sample, fstep, run_id, stream, data_type, epoch, rank) for fstep in fsteps + ] + + results_iterator = pool.imap_unordered(get_data_worker, step_tasks, chunksize=1) + + parser.process_sample( + results_iterator, + ref_time=ref_time, + ) + + pool.terminate() + pool.join() diff --git a/packages/evaluate/src/weathergen/evaluate/export/export_inference.py b/packages/evaluate/src/weathergen/evaluate/export/export_inference.py new file mode 100755 index 000000000..139375df4 --- /dev/null +++ b/packages/evaluate/src/weathergen/evaluate/export/export_inference.py @@ -0,0 +1,209 @@ +#!/usr/bin/env -S uv run +# /// script +# dependencies = [ +# "weathergen-evaluate", +# "weathergen-common", +# "weathergen" +# ] +# [tool.uv.sources] +# weathergen-evaluate = { path = "../../../../../packages/evaluate" } +# weathergen-common = { path = "../../../../../packages/common" } +# weathergen = { path = "../../../../../" } +# /// +## Example USAGE: uv run export --run-id grwnhykd --stream ERA5 \ +## --output-dir /p/home/jusers/owens1/jureca/WeatherGen/test_output1 \ +## --format netcdf --type prediction target --fsteps 1 --samples 1 +import argparse +import logging +import sys +from pathlib import Path + +from omegaconf import OmegaConf + +from weathergen.common.config import _REPO_ROOT +from weathergen.evaluate.export.export_core import export_model_outputs + +_logger = logging.getLogger(__name__) +_logger.setLevel(logging.INFO) + +if not _logger.handlers: + handler = logging.StreamHandler() + formatter = logging.Formatter("%(asctime)s - %(name)s - %(levelname)s - %(message)s") + handler.setFormatter(formatter) + _logger.addHandler(handler) + + +def parse_args(args: list) -> argparse.Namespace: + """ + Parse command line arguments. + + Parameters + ---------- + args : + List of command line arguments. + + Returns + ------- + Parsed command line arguments. + """ + parser = argparse.ArgumentParser() + parser.add_argument( + "--run-id", + type=str, + help=" Zarr folder which contains target and inference results", + required=True, + ) + + parser.add_argument( + "--type", + type=str, + choices=["prediction", "target"], + nargs="+", + default=["prediction"], + help="List of type of data to convert (e.g. prediction target)", + required=False, + ) + + parser.add_argument( + "--output-dir", + type=str, + help="Output directory to save the NetCDF files", + required=True, + ) + + parser.add_argument( + "--format", + dest="output_format", + type=str, + choices=["netcdf", "grib", "quaver"], + help="Output file format (currently only netcdf supported)", + required=True, + ) + + parser.add_argument( + "--stream", + type=str, + choices=["ERA5"], + help="Stream name to retrieve data for", + required=True, + ) + + parser.add_argument( + "--fsteps", + type=int, + nargs="+", + default=None, + help="List of forecast steps to retrieve (e.g. 1 2 3). " + "If not provided, retrieves all available forecast steps.", + ) + + parser.add_argument( + "--samples", + type=int, + nargs="+", + default=None, + help="List of samples to process (e.g. 0 1 2). If not provided, processes all samples.", + ) + + parser.add_argument( + "--channels", + type=str, + nargs="+", + default=None, + help="List of channels to retrieve (e.g., 'q_500 t_2m'). " + "If not provided, retrieves all available channels.", + ) + + parser.add_argument( + "--n-processes", + type=int, + default=8, + help="Number of parallel processes to use for data retrieval", + ) + + parser.add_argument( + "--fstep-hours", + type=int, + default=6, + help="Time difference between forecast steps in hours (e.g., 6)", + ) + + parser.add_argument( + "--epoch", + type=int, + default=0, + help="Epoch number to identify the Zarr store", + ) + + parser.add_argument( + "--rank", + type=int, + default=0, + help="Rank number to identify the Zarr store", + ) + + parser.add_argument( + "--template", + type=str, + help="Path to GRIB template file", + required=False, + ) + + parser.add_argument( + "--expver", + type=str, + help="Expver to include in the output filename (i.e. 'iuoo')", + required=False, + ) + + args, unknown_args = parser.parse_known_args(args) + if unknown_args: + _logger.warning(f"Unknown arguments: {unknown_args}") + return args + + +def export() -> None: + """ + Main function to export data from Zarr store to NetCDF files. + """ + # By default, arguments from the command line are read. + export_from_args(sys.argv[1:]) + + +def export_from_args(args: list) -> None: + # Get run_id zarr data as lists of xarray DataArrays + """ + Export data from Zarr store to NetCDF files based on command line arguments. + Parameters + ---------- + args : List of command line arguments. + """ + args = parse_args(sys.argv[1:]) + + # Load configuration + config_file = Path(_REPO_ROOT, "config/evaluate/config_zarr2cf.yaml") + config = OmegaConf.load(config_file) + # check config loaded correctly + assert len(config["variables"].keys()) > 0, "Config file not loaded correctly" + + kwargs = vars(args).copy() + + _logger.info(kwargs) + + # Ensure output directory exists + out_dir = Path(args.output_dir) + out_dir.mkdir(parents=True, exist_ok=True) + + for dtype in args.type: + _logger.info( + f"Starting processing {dtype} for run ID {args.run_id}. " + f"Detected {args.samples} samples and {args.fsteps} forecast steps." + ) + + export_model_outputs(dtype, config, **kwargs) + + _logger.info(f"Finished processing {dtype} for run ID {args.run_id}.") + + +if __name__ == "__main__": + export() diff --git a/packages/evaluate/src/weathergen/evaluate/export/io_utils.py b/packages/evaluate/src/weathergen/evaluate/export/io_utils.py new file mode 100644 index 000000000..98cdbb04d --- /dev/null +++ b/packages/evaluate/src/weathergen/evaluate/export/io_utils.py @@ -0,0 +1,67 @@ +import logging +from pathlib import Path + +import numpy as np +import xarray as xr + +from weathergen.common.config import get_model_results +from weathergen.common.io import ZarrIO + +_logger = logging.getLogger(__name__) +_logger.setLevel(logging.INFO) + + +def output_filename( + prefix: str, + run_id: str, + output_dir: str, + output_format: str, + forecast_ref_time: np.datetime64, +) -> Path: + """ + Generate output filename based on prefix (should refer to type e.g. pred/targ), run_id, sample + index, output directory, format and forecast_ref_time. + + Parameters + ---------- + prefix : Prefix for file name (e.g., 'pred' or 'targ'). + run_id :Run ID to include in the filename. + output_dir : Directory to save the output file. + output_format : Output file format (currently only 'netcdf' supported). + forecast_ref_time : Forecast reference time to include in the filename. + + Returns + ------- + Full path to the output file. + """ + if output_format not in ["netcdf"]: + raise ValueError( + f"Unsupported output format: {output_format}, supported formates are ['netcdf']" + ) + file_extension = "nc" + frt = np.datetime_as_string(forecast_ref_time, unit="h") + out_fname = Path(output_dir) / f"{prefix}_{frt}_{run_id}.{file_extension}" + return out_fname + + +def get_data_worker(args: tuple) -> xr.DataArray: + """ + Worker function to retrieve data for a single sample and forecast step. + + Parameters + ---------- + args : Tuple containing (sample, fstep, run_id, stream, type). + + Returns + ------- + xarray DataArray for the specified sample and forecast step. + """ + sample, fstep, run_id, stream, dtype, epoch, rank = args + fname_zarr = get_model_results(run_id, epoch, rank) + with ZarrIO(fname_zarr) as zio: + out = zio.get_data(sample, stream, fstep) + if dtype == "target": + data = out.target + elif dtype == "prediction": + data = out.prediction + return data diff --git a/packages/evaluate/src/weathergen/evaluate/export/parser_factory.py b/packages/evaluate/src/weathergen/evaluate/export/parser_factory.py new file mode 100644 index 000000000..d248b0c78 --- /dev/null +++ b/packages/evaluate/src/weathergen/evaluate/export/parser_factory.py @@ -0,0 +1,46 @@ +from omegaconf import OmegaConf + +from weathergen.evaluate.export.cf_utils import CfParser +from weathergen.evaluate.export.parsers.netcdf_parser import NetcdfParser +from weathergen.evaluate.export.parsers.quaver_parser import QuaverParser + + +class CfParserFactory: + """ + Factory class to get appropriate CF parser based on output format. + """ + + @staticmethod + def get_parser(config: OmegaConf, **kwargs) -> CfParser: + """ + Get the appropriate CF parser based on the output format. + + Parameters + ---------- + config : OmegaConf + Configuration defining variable mappings and dimension metadata. + grid_type : str + Type of grid ('regular' or 'gaussian'). + + Returns + ------- + Instance of a CF_Parser subclass. + """ + + _parser_map = { + "netcdf": (NetcdfParser, ["grid_type"]), + "quaver": (QuaverParser, ["grid_type", "channels", "template"]), + } + + fmt = kwargs.get("output_format") + + parser_class = _parser_map.get(fmt) + parser = parser_class[0] + + # allowed_keys = parser_class[1] + # filtered_kwargs = {k: v for k, v in kwargs.items() if k in allowed_keys} + + if parser_class is None: + raise ValueError(f"Unsupported format: {fmt}") + + return parser(config, **kwargs) diff --git a/packages/evaluate/src/weathergen/evaluate/export/parsers/netcdf_parser.py b/packages/evaluate/src/weathergen/evaluate/export/parsers/netcdf_parser.py new file mode 100644 index 000000000..fa58d90b7 --- /dev/null +++ b/packages/evaluate/src/weathergen/evaluate/export/parsers/netcdf_parser.py @@ -0,0 +1,507 @@ +import logging +from pathlib import Path +from typing import Any + +import numpy as np +import xarray as xr +from omegaconf import OmegaConf + +from weathergen.evaluate.export.cf_utils import CfParser +from weathergen.evaluate.export.reshape import find_pl + +_logger = logging.getLogger(__name__) +_logger.setLevel(logging.INFO) + +""" +Usage: + +uv run export --run-id ciga1p9c --stream ERA5 +--output-dir ./test_output1 +--format netcdf --samples 1 2 --fsteps 1 2 3 +""" + + +class NetcdfParser(CfParser): + """ + Child class for handling NetCDF output format. + """ + + def __init__(self, config: OmegaConf, **kwargs): + """ + CF-compliant parser that handles both regular and Gaussian grids. + + Parameters + ---------- + config : OmegaConf + Configuration defining variable mappings and dimension metadata. + ds : xr.Dataset + Input dataset. + + Returns + ------- + xr.Dataset + CF-compliant dataset with consistent naming and attributes. + """ + for k, v in kwargs.items(): + setattr(self, k, v) + + super().__init__(config=config, grid_type=self.grid_type) + + self.mapping = config.get("variables", {}) + + def process_sample( + self, + fstep_iterator_results: iter, + ref_time: np.datetime64, + ): + """ + Process results from get_data_worker: reshape, concatenate, add metadata, and save. + Parameters + ---------- + fstep_iterator_results : Iterator over results from get_data_worker. + ref_time : Forecast reference time for the sample. + Returns + ------- + None + """ + da_fs = [] + + for result in fstep_iterator_results: + if result is None: + continue + + result = result.as_xarray().squeeze() + result = result.sel(channel=self.channels) + result = self.reshape(result) + da_fs.append(result) + + _logger.info(f"Retrieved {len(da_fs)} forecast steps for type {self.data_type}.") + _logger.info(f"Saved sample data to {self.output_format} in {self.output_dir}.") + + if da_fs: + da_fs = self.concatenate(da_fs) + da_fs = self.assign_coords(da_fs, ref_time) + da_fs = self.add_attrs(da_fs) + da_fs = self.add_metadata(da_fs) + self.save(da_fs, ref_time) + + def get_output_filename(self, forecast_ref_time: np.datetime64) -> Path: + """ + Generate output filename based on prefix (should refer to type e.g. pred/targ), + run_id, sample index, output directory, format and forecast_ref_time. + + Parameters + ---------- + forecast_ref_time : Forecast reference time to include in the filename. + + Returns + ------- + Full path to the output file. + """ + + frt = np.datetime_as_string(forecast_ref_time, unit="h") + out_fname = ( + Path(self.output_dir) / f"{self.data_type}_{frt}_{self.run_id}.{self.file_extension}" + ) + return out_fname + + def reshape(self, data: xr.DataArray) -> xr.Dataset: + """ + Reshape dataset while preserving grid structure (regular or Gaussian). + + Parameters + ---------- + data : xr.DataArray + Input data with dimensions (ipoint, channel) + + Returns + ------- + xr.Dataset + Reshaped dataset appropriate for the grid type + """ + grid_type = self.grid_type + + # Original logic + var_dict, pl = find_pl(data.channel.values) + data_vars = {} + + for new_var, old_vars in var_dict.items(): + if len(old_vars) > 1: + data_vars[new_var] = xr.DataArray( + data.sel(channel=old_vars).values, + dims=["ipoint", "pressure_level"], + ) + else: + data_vars[new_var] = xr.DataArray( + data.sel(channel=old_vars[0]).values, + dims=["ipoint"], + ) + + reshaped_dataset = xr.Dataset(data_vars) + reshaped_dataset = reshaped_dataset.assign_coords( + ipoint=data.coords["ipoint"], + pressure_level=pl, + ) + + if grid_type == "regular": + # Use original reshape logic for regular grids + # This is safe for regular grids + reshaped_dataset = reshaped_dataset.set_index( + ipoint=("valid_time", "lat", "lon") + ).unstack("ipoint") + else: + # Use new logic for Gaussian/unstructured grids + reshaped_dataset = reshaped_dataset.set_index(ipoint2=("ipoint", "valid_time")).unstack( + "ipoint2" + ) + # rename ipoint to ncells + reshaped_dataset = reshaped_dataset.rename_dims({"ipoint": "ncells"}) + reshaped_dataset = reshaped_dataset.rename_vars({"ipoint": "ncells"}) + + return reshaped_dataset + + def concatenate( + self, + array_list, + dim="valid_time", + data_vars="minimal", + coords="different", + compat="equals", + combine_attrs="drop", + sortby_dim="valid_time", + ) -> xr.Dataset: + """ + Uses list of pred/target xarray DataArrays to save one sample to a NetCDF file. + + Parameters + ---------- + type_str : str + Type of data ('pred' or 'targ') to include in the filename. + array_list : list of xr.DataArray + List of DataArrays to concatenate. + dim : str, optional + Dimension along which to concatenate. Default is 'valid_time'. + data_vars : str, optional + How to handle data variables during concatenation. Default is 'minimal'. + coords : str, optional + How to handle coordinates during concatenation. Default is 'different'. + compat : str, optional + Compatibility check for variables. Default is 'equals'. + combine_attrs : str, optional + How to combine attributes. Default is 'drop'. + sortby_dim : str, optional + Dimension to sort the final dataset by. Default is 'valid_time'. + + Returns + ------- + xr.Dataset + Concatenated xarray Dataset. + """ + + data = xr.concat( + array_list, + dim=dim, + data_vars=data_vars, + coords=coords, + compat=compat, + combine_attrs=combine_attrs, + ).sortby(sortby_dim) + + return data + + def assign_coords(self, ds: xr.Dataset, reference_time: np.datetime64) -> xr.Dataset: + """ + Assign forecast reference time coordinate to the dataset. + + Parameters + ---------- + ds : xarray Dataset to assign coordinates to. + reference_time : Forecast reference time to assign. + + Returns + ------- + xarray Dataset with assigned forecast reference time coordinate. + """ + ds = ds.assign_coords(forecast_ref_time=reference_time) + + if "sample" in ds.coords: + ds = ds.drop_vars("sample") + + n_hours = self.fstep_hours.astype("int64") + ds["forecast_period"] = ds["forecast_step"] * n_hours + + return ds + + def add_attrs(self, ds: xr.Dataset) -> xr.Dataset: + """ + Add CF-compliant attributes to the dataset variables. + + Parameters + ---------- + ds : xarray Dataset to add attributes to. + Returns + ------- + xarray Dataset with CF-compliant variable attributes. + """ + + ds["forecast_period"].attrs = { + "standard_name": "forecast_period", + "long_name": "time since forecast_reference_time", + "units": "hours", + } + + if self.grid_type == "gaussian": + variables = self._attrs_gaussian_grid(ds) + else: + variables = self._attrs_regular_grid(ds) + + dataset = xr.merge(variables.values()) + dataset.attrs = ds.attrs + + return dataset + + def _attrs_gaussian_grid(self, ds: xr.Dataset) -> xr.Dataset: + """ + Assign CF-compliant attributes to variables in a Gaussian grid dataset. + Parameters + ---------- + ds : xr.Dataset + Input dataset. + Returns + ------- + xr.Dataset + Dataset with CF-compliant variable attributes. + """ + variables = {} + + for var_name, da in ds.data_vars.items(): + if var_name in ["lat", "lon"]: + continue + + mapped_info = self.mapping.get(var_name, {}) + mapped_name = mapped_info.get("var", var_name) + + attributes = { + "standard_name": mapped_info.get("std", var_name), + "units": mapped_info.get("std_unit", "unknown"), + "coordinates": "lat lon", + } + + variables[mapped_name] = xr.DataArray( + data=da.values, + dims=list(da.dims), + coords={coord: ds.coords[coord] for coord in da.coords if coord in ds.coords}, + attrs=attributes, + name=mapped_name, + ) + + self._assign_latlon_attrs(ds) + + return variables + + def _attrs_regular_grid(self, ds: xr.Dataset) -> xr.Dataset: + """ + Assign CF-compliant attributes to variables in a regular grid dataset. + Parameters + ---------- + + ds : xr.Dataset + Input dataset. + Returns + ------- + xr.Dataset + Dataset with CF-compliant variable attributes. + """ + variables = {} + dims = self.config.get("dimensions", {}) + ds_attrs = self._assign_dim_attrs(ds, dims) + mapping = self.mapping + + for var_name, da in ds.data_vars.items(): + var_cfg = mapping.get(var_name) + if var_cfg is None: + continue + + dims = ["pressure", "valid_time", "latitude", "longitude"] + if var_cfg.get("level_type") == "sfc": + dims.remove("pressure") + + coords = self._build_coordinate_mapping(ds, var_cfg, ds_attrs) + + attrs = { + "standard_name": var_cfg.get("std", var_name), + "units": var_cfg.get("std_unit", "unknown"), + } + + mapped_name = var_cfg.get("var", var_name) + variables[mapped_name] = xr.DataArray( + data=da.values, + dims=dims, + coords={**coords, "valid_time": ds["valid_time"].values}, + attrs=attrs, + name=mapped_name, + ) + + return variables + + def _assign_latlon_attrs(self, ds: xr.Dataset) -> None: + """Add CF-compliant attributes to lat/lon coordinates if they exist. + Parameters + ---------- + ds : xr.Dataset + Input dataset. + Returns + ------- + None + """ + if "lat" in ds.coords: + ds.coords["lat"].attrs.update( + { + "standard_name": "latitude", + "long_name": "latitude", + "units": "degrees_north", + } + ) + if "lon" in ds.coords: + ds.coords["lon"].attrs.update( + { + "standard_name": "longitude", + "long_name": "longitude", + "units": "degrees_east", + } + ) + + def _assign_dim_attrs( + self, ds: xr.Dataset, dim_cfg: dict[str, Any] + ) -> dict[str, dict[str, str]]: + """ + Assign CF attributes from given config file. + Parameters + ---------- + ds : xr.Dataset + Input dataset. + dim_cfg : Dict[str, Any] + Dimension configuration from mapping. + Returns + ------- + Dict[str, Dict[str, str]]: + Attributes for each dimension. + """ + ds_attrs = {} + + for dim_name, meta in dim_cfg.items(): + wg_name = meta.get("wg", dim_name) + if dim_name in ds.dims and dim_name != wg_name: + ds = ds.rename_dims({dim_name: wg_name}) + + dim_attrs = {"standard_name": meta.get("std", wg_name)} + if meta.get("std_unit"): + dim_attrs["units"] = meta["std_unit"] + ds_attrs[wg_name] = dim_attrs + + return ds_attrs + + def _build_coordinate_mapping( + self, ds: xr.Dataset, var_cfg: dict[str, Any], attrs: dict[str, dict[str, str]] + ) -> dict[str, Any]: + """Create coordinate mapping for a given variable. + Parameters + ---------- + ds : xr.Dataset + Input dataset. + var_cfg : Dict[str, Any] + Variable configuration from mapping. + attrs : Dict[str, Dict[str, str]] + Attributes for dimensions. + Returns + ------- + Dict[str, Any]: + Coordinate mapping for the variable. + """ + coords = {} + coord_map = self.config.get("coordinates", {}).get(var_cfg.get("level_type"), {}) + + for coord, new_name in coord_map.items(): + coords[new_name] = ( + ds.coords[coord].dims, + ds.coords[coord].values, + attrs[new_name], + ) + + return coords + + def _add_grid_attrs(self, ds: xr.Dataset, grid_info: dict | None = None) -> xr.Dataset: + """ + Add Gaussian grid metadata following CF conventions. + + Parameters + ---------- + ds : xr.Dataset + Dataset to add metadata to + grid_info : dict, optional + Dictionary with grid information: + - 'N': Gaussian grid number (e.g., N320) + - 'reduced': Whether it's a reduced Gaussian grid + + Returns + ------- + xr.Dataset + Dataset with added grid metadata + """ + + if self.grid_type != "gaussian": + return ds + + # ds = ds.copy() + # Add grid mapping information + ds.attrs["grid_type"] = "gaussian" + + # If grid info provided, add it + if grid_info: + ds.attrs["gaussian_grid_number"] = grid_info.get("N", "unknown") + ds.attrs["gaussian_grid_type"] = ( + "reduced" if grid_info.get("reduced", False) else "regular" + ) + + return ds + + def add_metadata(self, ds: xr.Dataset) -> xr.Dataset: + """ + Add CF conventions to the dataset attributes. + + Parameters + ---------- + ds : Input xarray Dataset to add conventions to. + Returns + ------- + xarray Dataset with CF conventions added to attributes. + """ + # ds = ds.copy() + ds.attrs["title"] = f"WeatherGenerator Output for {self.run_id} using stream {self.stream}" + ds.attrs["institution"] = "WeatherGenerator Project" + ds.attrs["source"] = "WeatherGenerator v0.0" + ds.attrs["history"] = ( + "Created using the export_inference.py script on " + + np.datetime_as_string(np.datetime64("now"), unit="s") + ) + ds.attrs["Conventions"] = "CF-1.12" + return ds + + def save(self, ds: xr.Dataset, forecast_ref_time: np.datetime64) -> None: + """ + Save the dataset to a NetCDF file. + + Parameters + ---------- + ds : xarray Dataset to save. + data_type : Type of data ('pred' or 'targ') to include in the filename. + forecast_ref_time : Forecast reference time to include in the filename. + + Returns + ------- + None + """ + out_fname = self.get_output_filename(forecast_ref_time) + _logger.info(f"Saving to {out_fname}.") + ds.to_netcdf(out_fname) + _logger.info(f"Saved NetCDF file to {out_fname}.") diff --git a/packages/evaluate/src/weathergen/evaluate/export/parsers/quaver_parser.py b/packages/evaluate/src/weathergen/evaluate/export/parsers/quaver_parser.py new file mode 100644 index 000000000..d54aad4e4 --- /dev/null +++ b/packages/evaluate/src/weathergen/evaluate/export/parsers/quaver_parser.py @@ -0,0 +1,246 @@ +import logging +from pathlib import Path + +import earthkit.data as ekd +import numpy as np +import pandas as pd +import xarray as xr +from omegaconf import OmegaConf + +from weathergen.evaluate.export.cf_utils import CfParser + +_logger = logging.getLogger(__name__) +_logger.setLevel(logging.INFO) + +""" +Usage: + +uv run export --run-id ciga1p9c --stream ERA5 +--output-dir ./test_output1 +--format quaver --type prediction target +--samples 2 --fsteps 2 +--template "/quaver_templates/aifs_{level_type}_o96_data.grib" +--expver test +""" + + +class QuaverParser(CfParser): + """ + Child class for handling Quaver output format. + """ + + def __init__(self, config: OmegaConf, **kwargs): + """ + Initialize Quaver parser with configuration and additional parameters. + """ + for k, v in kwargs.items(): + setattr(self, k, v) + + if not hasattr(self, "template"): + raise ValueError("Template file must be provided for Quaver format.") + if not hasattr(self, "channels"): + raise ValueError("Channels must be provided for Quaver format.") + if not hasattr(self, "expver"): + raise ValueError("Expver must be provided for Quaver format.") + + super().__init__(config, **kwargs) + + self.template_cache = [] + + self.pl_template = ekd.from_source("file", self.template.format(level_type="pl")) + self.sf_template = ekd.from_source("file", self.template.format(level_type="sfc")) + + self.encoder = ekd.create_encoder("grib") + + self.pl_file = ekd.create_target("file", self.get_output_filename("pl")) + self.sf_file = ekd.create_target("file", self.get_output_filename("sfc")) + + self.mapping = config.get("variables", {}) + + self.template_cache = self.cache_templates() + + def process_sample( + self, + fstep_iterator_results: iter, + ref_time: np.datetime64, + ): + """ + Process results from get_data_worker: reshape, concatenate, add metadata, and save. + Parameters + ---------- + fstep_iterator_results : Iterator over results from get_data_worker. + ref_time : Forecast reference time for the sample. + Returns + ------- + None + """ + for result in fstep_iterator_results: + if result is None: + continue + + result = result.as_xarray().squeeze() + result = result.sel(channel=self.channels) + da_fs = self.assign_coords(result) + + step = np.unique(result.forecast_step.values) + if len(step) != 1: + raise ValueError(f"Expected single step value, got {step}") + + step = int(step[0]) + + sf_fields = [] + pl_fields = [] + for var in self.channels: + _, level, level_type = self.extract_var_info(var) + + _logger.info(f"[Worker] Encoding var={var}, level={level}") + + field_data = da_fs.sel(channel=var) + template_field = self.template_cache.get((var, level), None) + if template_field is None: + _logger.error(f"Template for var={var}, level={level} not found. Skipping.") + continue + + metadata = self.get_metadata(ref_time=ref_time, step=step, level=level) + + encoded = self.encoder.encode( + values=field_data.values, template=template_field, metadata=metadata + ) + + field_list = pl_fields if level_type == "pl" else sf_fields + field_list.append(encoded.to_field()) + + self.save(pl_fields, "pl") + self.save(sf_fields, "sfc") + + _logger.info(f"Saved sample data to {self.output_format} in {self.output_dir}.") + + def extract_var_info(self, var: str) -> tuple[str, str, str]: + """ + Extract variable short name, level, and level type from variable string. + Parameters + ---------- + var : str + Variable string (e.g., 'temperature_850'). + Returns + ------- + tuple[str, str, str] + Variable short name, level, and level type. + """ + var_short = var.split("_")[0] if "_" in var else var + level = int(var.split("_")[-1]) if "_" in var else "sfc" + + var_config = self.mapping.get(var_short, {}) + if not var_config: + raise ValueError( + f"Variable '{var} (using: {var_short})' not found in configuration mapping." + ) + + level_type = var_config.get("level_type", "None") + + return var_short, level, level_type + + def cache_templates(self) -> dict[tuple[str, str], object]: + """ + Get the index of the template field for a given variable and level. + + Returns + ------- + Template field matching the variable and level. + + """ + template_cache = {} + for var in self.channels: + var_short, level, level_type = self.extract_var_info(var) + template = self.pl_template if level_type != "sfc" else self.sf_template + + criteria = {"shortName": var_short} + if level_type != "sfc": + criteria["level"] = level # , "step": step} + + matching_messages = template.sel(**criteria) + + if matching_messages: + template_cache[(var, level)] = matching_messages[0] + else: + _logger.error(f"Template field for variable '{var}' at level '{level}' not found.") + + return template_cache + + def get_output_filename(self, level_type: str) -> Path: + """ + Generate output filename. + Parameters + ---------- + data_type : str + Type of data (e.g., 'prediction' or 'target'). + level_type : str + Level type (e.g., 'sfc', 'pl', etc.). + Returns + ------- + Path + Output filename as a Path object. + """ + return ( + Path(self.output_dir) + / f"{self.data_type}_{level_type}_{self.run_id}_{self.expver}.{self.file_extension}" + ) + + def assign_coords(self, data: xr.DataArray) -> xr.DataArray: + """ + Assign forecast reference time coordinate to the dataset. + Parameters + ---------- + data : xr.DataArray + Input data array. + Returns + ------- + xr.DataArray + Data array with assigned coordinates. + """ + + if {"lon", "lat"}.issubset(data.coords): + lons = (data.lon.values + 360) % 360 + data = data.assign_coords(lon=("ipoint", lons)) + order = np.lexsort((data.lon.values, -data.lat.values)) + data = data.isel(ipoint=order) + return data + + def get_metadata( + self, + ref_time: pd.Timestamp, + step: int, + level: str, + ): + """ + Add metadata to the dataset attributes. + """ + + metadata = { + "date": ref_time, + "step": step * self.fstep_hours.astype(int), + "expver": self.expver, + "marsClass": "rd", + } + if level != "sfc": + metadata["level"] = level + return metadata + + def save(self, encoded_fields: list, level_type: str): + """ + Save the dataset to a file. + Parameters + ---------- + encoded_fields : List + List of encoded fields to write. + level_type : str + Level type ('pl' or 'sfc'). + Returns + ------- + None + """ + + file = self.pl_file if level_type == "pl" else self.sf_file + + for field in encoded_fields: + file.write(field) diff --git a/packages/evaluate/src/weathergen/evaluate/export/reshape.py b/packages/evaluate/src/weathergen/evaluate/export/reshape.py new file mode 100644 index 000000000..67e7385ed --- /dev/null +++ b/packages/evaluate/src/weathergen/evaluate/export/reshape.py @@ -0,0 +1,79 @@ +import logging +import re + +import numpy as np +import xarray as xr + +_logger = logging.getLogger(__name__) +_logger.setLevel(logging.INFO) + +""" +Enhanced functions to handle Gaussian grids when converting from Zarr to NetCDF. +""" + + +def detect_grid_type(data: xr.DataArray) -> str: + """ + Detect whether data is on a regular lat/lon grid or Gaussian grid. + + Parameters + ---------- + data: + input dataset. + + Returns + ------- + str: + String with the grid type. + Supported options at the moment: "unknown", "regular", "gaussian" + """ + if "lat" not in data.coords or "lon" not in data.coords: + return "unknown" + + lats = data.coords["lat"].values + lons = data.coords["lon"].values + + unique_lats = np.unique(lats) + unique_lons = np.unique(lons) + + # Check if all (lat, lon) combinations exist (regular grid) + if len(lats) == len(unique_lats) * len(unique_lons): + lat_lon_pairs = set(zip(lats, lons, strict=False)) + expected_pairs = {(lat, lon) for lat in unique_lats for lon in unique_lons} + if lat_lon_pairs == expected_pairs: + return "regular" + + # Otherwise it's Gaussian (irregular spacing or reduced grid) + return "gaussian" + + +def find_pl(vars: list) -> tuple[dict[str, list[str]], list[int]]: + """ + Find all the pressure levels for each variable using regex and returns a dictionary + mapping variable names to their corresponding pressure levels. + + Parameters + ---------- + vars : list of variable names with pressure levels (e.g.,'q_500','t_2m'). + + Returns + ------- + A tuple containing: + - var_dict: dict + Dictionary mapping variable names to lists of their corresponding pressure levels. + - pl: list of int + List of unique pressure levels found in the variable names. + """ + var_dict = {} + pl = [] + for var in vars: + match = re.search(r"^([a-zA-Z0-9_]+)_(\d+)$", var) + if match: + var_name = match.group(1) + pressure_level = int(match.group(2)) + pl.append(pressure_level) + var_dict.setdefault(var_name, []).append(var) + else: + var_dict.setdefault(var, []).append(var) + pl = list(set(pl)) + return var_dict, pl diff --git a/packages/evaluate/src/weathergen/evaluate/io_reader.py b/packages/evaluate/src/weathergen/evaluate/io_reader.py new file mode 100644 index 000000000..66fb2602d --- /dev/null +++ b/packages/evaluate/src/weathergen/evaluate/io_reader.py @@ -0,0 +1,961 @@ +# (C) Copyright 2025 WeatherGenerator contributors. +# +# This software is licensed under the terms of the Apache Licence Version 2.0 +# which can be obtained at http://www.apache.org/licenses/LICENSE-2.0. +# +# In applying this licence, ECMWF does not waive the privileges and immunities +# granted to it by virtue of its status as an intergovernmental organisation +# nor does it submit to any jurisdiction. + +import json +import logging +import re +from dataclasses import dataclass +from pathlib import Path + +import numpy as np +import omegaconf as oc +import pandas as pd +import xarray as xr +from tqdm import tqdm + +from weathergen.common.config import get_shared_wg_path, load_config, load_model_config +from weathergen.common.io import ZarrIO +from weathergen.evaluate.derived_channels import DeriveChannels +from weathergen.evaluate.score_utils import RegionBoundingBox, to_list + +_logger = logging.getLogger(__name__) +_logger.setLevel(logging.INFO) + + +@dataclass +class ReaderOutput: + """ + Dataclass to hold the output of the Reader.get_data method. + Attributes + ---------- + target : dict[str, xr.Dataset] + Dictionary of xarray Datasets for targets, indexed by forecast step. + prediction : dict[str, xr.Dataset] + Dictionary of xarray Datasets for predictions, indexed by forecast step. + points_per_sample : xr.DataArray | None + xarray DataArray containing the number of points per sample, if `return_counts` is True + """ + + target: dict[str, xr.Dataset] + prediction: dict[str, xr.Dataset] + points_per_sample: xr.DataArray | None + + +@dataclass +class DataAvailability: + """ + Dataclass to hold information about data availability in the input files. + Attributes + ---------- + score_availability: bool + True if the metric file contains the requested combination. + channels: list[str] + List of channels requested + fsteps: list[int] + List of forecast steps requested + samples: list[int] + List of samples requested + """ + + score_availability: bool + channels: list[str] | None + fsteps: list[int] | None + samples: list[int] | None + ensemble: list[str] | None = None + + +class Reader: + def __init__(self, eval_cfg: dict, run_id: str, private_paths: dict[str, str] | None = None): + """ + Generic data reader class. + + Parameters + ---------- + eval_cfg : dir + config with plotting and evaluation options for that run id + run_id : str + run id of the model + private_paths: dict[srt, str] + dictionary of private paths for the supported HPC + """ + self.eval_cfg = eval_cfg + self.run_id = run_id + self.private_paths = private_paths + self.streams = eval_cfg.streams.keys() + # TODO: propagate it to the other functions using global plotting opts + self.global_plotting_options = eval_cfg.get("global_plotting_options", {}) + + # If results_base_dir and model_base_dir are not provided, default paths are used + self.model_base_dir = self.eval_cfg.get("model_base_dir", None) + + self.results_base_dir = self.eval_cfg.get( + "results_base_dir", None + ) # base directory where results will be stored + + def get_stream(self, stream: str): + """ + returns the dictionary associated to a particular stream + + Parameters + ---------- + stream: str + the stream name + + Returns + ------- + dict + the config dictionary associated to that stream + """ + return self.eval_cfg.streams.get(stream, {}) + + def get_samples(self) -> set[int]: + """Placeholder implementation of sample getter. Override in subclass.""" + return set() + + def get_forecast_steps(self) -> set[int]: + """Placeholder implementation forecast step getter. Override in subclass.""" + return set() + + # TODO: get this from config + def get_channels(self, stream: str | None = None) -> list[str]: + """Placeholder implementation channel names getter. Override in subclass.""" + return list() + + def get_ensemble(self, stream: str | None = None) -> list[str]: + """Placeholder implementation ensemble member names getter. Override in subclass.""" + return list() + + def is_regular(self, stream: str) -> bool: + """ + Placeholder implementation to check if lat/lon are regularly spaced. + Override in subclass. + """ + return True + + def load_scores(self, stream: str, region: str, metric: str) -> xr.DataArray: + """Placeholder to load pre-computed scores for a given run, stream, metric""" + return None + + def check_availability( + self, + stream: str, + available_data: dict | None = None, + mode: str = "", + ) -> DataAvailability: + """ + Check if requested channels, forecast steps and samples are + i) available in the previously saved metric file if specified (return False otherwise) + ii) available in the source file (e.g. the Zarr file, return error otherwise) + Additionally, if channels, forecast steps or samples is None/'all', it will + i) set the variable to all available vars in source file + ii) return True only if the respective variable contains the same indeces in metric file + and source file (return False otherwise) + + Parameters + ---------- + stream : str + The stream considered. + available_data : dict, optional + The available data loaded from metric file. + Returns + ------- + DataAvailability + A dataclass containing: + - channels: list of channels or None if 'all' + - fsteps: list of forecast steps or None if 'all' + - samples: list of samples or None if 'all' + """ + + # fill info for requested channels, fsteps, samples + requested_data = self._get_channels_fsteps_samples(stream, mode) + + channels = requested_data.channels + fsteps = requested_data.fsteps + samples = requested_data.samples + ensemble = requested_data.ensemble + requested = { + "channel": set(channels) if channels is not None else None, + "fstep": set(fsteps) if fsteps is not None else None, + "sample": set(samples) if samples is not None else None, + "ensemble": set(ensemble) if ensemble is not None else None, + } + + # fill info from available metric file (if provided) + available = { + "channel": ( + set(available_data["channel"].values.ravel()) + if available_data is not None + else set() + ), + "fstep": ( + set(available_data["forecast_step"].values.ravel()) + if available_data is not None + else set() + ), + "sample": ( + set(available_data.coords["sample"].values.ravel()) + if available_data is not None + else set() + ), + "ensemble": ( + set(available_data["ens"].values.ravel()) + if available_data is not None and "ens" in available_data.coords + else set() + ), + } + + # fill info from reader + reader_data = { + "fstep": set(int(f) for f in self.get_forecast_steps()), + "sample": set(int(s) for s in self.get_samples()), + "channel": set(self.get_channels(stream)), + "ensemble": set(self.get_ensemble(stream)), + } + + check_score = True + corrected = False + for name in ["channel", "fstep", "sample", "ensemble"]: + if requested[name] is None: + # Default to all in Zarr + requested[name] = reader_data[name] + # If file with metrics exists, must exactly match + if available_data is not None and reader_data[name] != available[name]: + _logger.info( + f"Requested all {name}s for {mode}, but previous config was a " + "strict subset. Recomputing." + ) + check_score = False + + # Must be subset of Zarr + if not requested[name] <= reader_data[name]: + missing = requested[name] - reader_data[name] + + if name == "ensemble" and "mean" in missing: + missing.remove("mean") + if missing: + _logger.info( + f"Requested {name}(s) {missing} do(es) not exist in Zarr. " + f"Removing missing {name}(s) for {mode}." + ) + requested[name] = requested[name] & reader_data[name] + corrected = True + + # Must be a subset of available_data (if provided) + if available_data is not None and not requested[name] <= available[name]: + missing = requested[name] - available[name] + _logger.info( + f"{name.capitalize()}(s) {missing} missing in previous evaluation. Recomputing." + ) + check_score = False + + if check_score and not corrected: + scope = "metric file" if available_data is not None else "Zarr file" + _logger.info( + f"All checks passed – All channels, samples, fsteps requested for {mode} are " + f"present in {scope}..." + ) + + return DataAvailability( + score_availability=check_score, + channels=sorted(list(requested["channel"])), + fsteps=sorted(list(requested["fstep"])), + samples=sorted(list(requested["sample"])), + ensemble=sorted(list(requested["ensemble"])), + ) + + def _get_channels_fsteps_samples(self, stream: str, mode: str) -> DataAvailability: + """ + Get channels, fsteps and samples for a given run and stream from the config. + Replace 'all' with None. + + Parameters + ---------- + stream: str + The stream considered. + mode: str + if plotting or evaluation mode + + Returns + ------- + DataAvailability + A dataclass containing: + - channels: list of channels or None if 'all' + - fsteps: list of forecast steps or None if 'all' + - samples: list of samples or None if 'all' + """ + assert mode == "plotting" or mode == "evaluation", ( + "get_channels_fsteps_samples:: Mode should be either 'plotting' or 'evaluation'" + ) + + stream_cfg = self.get_stream(stream) + assert stream_cfg.get(mode, False), "Mode does not exist in stream config. Please add it." + + samples = stream_cfg[mode].get("sample", None) + fsteps = stream_cfg[mode].get("forecast_step", None) + channels = stream_cfg.get("channels", None) + ensemble = stream_cfg[mode].get("ensemble", None) + if ensemble == "mean": + ensemble = ["mean"] + + if isinstance(fsteps, str) and fsteps != "all": + assert re.match(r"^\d+-\d+$", fsteps), ( + "String format for forecast_step in config must be 'digit-digit' or 'all'" + ) + fsteps = list(range(int(fsteps.split("-")[0]), int(fsteps.split("-")[1]) + 1)) + if isinstance(samples, str) and samples != "all": + assert re.match(r"^\d+-\d+$", samples), ( + "String format for sample in config must be 'digit-digit' or 'all'" + ) + samples = list(range(int(samples.split("-")[0]), int(samples.split("-")[1]) + 1)) + + return DataAvailability( + score_availability=True, + channels=None if (channels == "all" or channels is None) else list(channels), + fsteps=None if (fsteps == "all" or fsteps is None) else list(fsteps), + samples=None if (samples == "all" or samples is None) else list(samples), + ensemble=None if (ensemble == "all" or ensemble is None) else list(ensemble), + ) + + +##### Helper function for CSVReader #### +def _rename_channels(data) -> pd.DataFrame: + """ + The scores downloaded from Quaver have a different convention. Need renaming. + Rename channel names to include underscore between letters and digits. + E.g., 'z500' -> 'z_500', 't850' -> 't_850', '2t' -> '2t', '10ff' -> '10ff' + + Parameters + ---------- + name : str + Original channel name. + + Returns + ------- + pd.DataFrame + Dataset with renamed channel names. + """ + for name in list(data.index): + # If it starts with digits (surface vars like 2t, 10ff) → leave unchanged + if re.match(r"^\d", name): + continue + + # Otherwise, insert underscore between letters and digits + data = data.rename(index={name: re.sub(r"([a-zA-Z])(\d+)", r"\1_\2", name)}) + + return data + + +class CsvReader(Reader): + """ + Reader class to read evaluation data from CSV files and convert to xarray DataArray. + """ + + def __init__(self, eval_cfg: dict, run_id: str, private_paths: dict | None = None): + """ + Initialize the CsvReader. + + Parameters + ---------- + eval_cfg : dir + config with plotting and evaluation options for that run id + run_id : str + run id of the model + private_paths: lists + list of private paths for the supported HPC + """ + + super().__init__(eval_cfg, run_id, private_paths) + self.csv_path = eval_cfg.get("csv_path") + assert self.csv_path is not None, "CSV path must be provided in the config." + + pd_data = pd.read_csv(self.csv_path, index_col=0) + + self.data = _rename_channels(pd_data) + self.metrics_base_dir = Path(self.csv_path).parent + # for backward compatibility allow metric_dir to be specified in the run config + self.metrics_dir = Path( + self.eval_cfg.get("metrics_dir", self.metrics_base_dir / self.run_id / "evaluation") + ) + + assert len(eval_cfg.streams.keys()) == 1, "CsvReader only supports one stream." + self.stream = list(eval_cfg.streams.keys())[0] + self.channels = self.data.index.tolist() + self.samples = [0] + self.forecast_steps = [int(col.split()[0]) for col in self.data.columns] + self.npoints_per_sample = [0] + self.epoch = eval_cfg.get("epoch", 0) + self.metric = eval_cfg.get("metric") + self.region = eval_cfg.get("region") + + def get_samples(self) -> set[int]: + """get set of samples for the retrieved scores (initialisation times)""" + return set(self.samples) # Placeholder implementation + + def get_forecast_steps(self) -> set[int]: + """get set of forecast steps""" + return set(self.forecast_steps) # Placeholder implementation + + # TODO: get this from config + def get_channels(self, stream: str | None = None) -> list[str]: + """get set of channels""" + assert stream == self.stream, "streams do not match in CSVReader." + return list(self.channels) # Placeholder implementation + + def get_values(self) -> xr.DataArray: + """get score values in the right format""" + return self.data.values[np.newaxis, :, :, np.newaxis].T + + def load_scores(self, stream: str, region: str, metric: str) -> xr.DataArray: + """ + Load the existing scores for a given run, stream and metric. + + Parameters + ---------- + reader : + Reader object containing all info for a specific run_id + stream : + Stream name. + region : + Region name. + metric : + Metric name. + + Returns + ------- + xr.DataArray + The metric DataArray. + """ + + available_data = self.check_availability(stream, mode="evaluation") + + # fill it only for matching metric + if metric == self.metric and region == self.region and stream == self.stream: + data = self.get_values() + else: + data = np.full( + ( + len(available_data.samples), + len(available_data.fsteps), + len(available_data.channels), + 1, + ), + np.nan, + ) + + da = xr.DataArray( + data.astype(np.float32), + dims=("sample", "forecast_step", "channel", "metric"), + coords={ + "sample": available_data.samples, + "forecast_step": available_data.fsteps, + "channel": available_data.channels, + "metric": [metric], + }, + attrs={"npoints_per_sample": self.npoints_per_sample}, + ) + + return da + + +class WeatherGenReader(Reader): + def __init__(self, eval_cfg: dict, run_id: str, private_paths: dict | None = None): + """Data reader class for WeatherGenerator model outputs stored in Zarr format.""" + + super().__init__(eval_cfg, run_id, private_paths) + + self.mini_epoch = eval_cfg.mini_epoch + self.rank = eval_cfg.rank + + # Load model configuration and set (run-id specific) directories + self.inference_cfg = self.get_inference_config() + + if not self.results_base_dir: + self.results_base_dir = Path(get_shared_wg_path("results")) + _logger.info(f"Results directory obtained from private config: {self.results_base_dir}") + else: + _logger.info(f"Results directory parsed: {self.results_base_dir}") + + self.runplot_base_dir = Path( + self.eval_cfg.get("runplot_base_dir", self.results_base_dir) + ) # base directory where map plots and histograms will be stored + + self.metrics_base_dir = Path( + self.eval_cfg.get("metrics_base_dir", self.results_base_dir) + ) # base directory where score files will be stored + + self.results_dir, self.runplot_dir = ( + Path(self.results_base_dir) / self.run_id, + Path(self.runplot_base_dir) / self.run_id, + ) + # for backward compatibility allow metric_dir to be specified in the run config + self.metrics_dir = Path( + self.eval_cfg.get("metrics_dir", self.metrics_base_dir / self.run_id / "evaluation") + ) + + fname_zarr_new = self.results_dir.joinpath( + f"validation_chkpt{self.mini_epoch:05d}_rank{self.rank:04d}.zarr" + ) + fname_zarr_old = self.results_dir.joinpath( + f"validation_epoch{self.mini_epoch:05d}_rank{self.rank:04d}.zarr" + ) + + if fname_zarr_new.exists() or fname_zarr_new.is_dir(): + self.fname_zarr = fname_zarr_new + else: + self.fname_zarr = fname_zarr_old + + if not self.fname_zarr.exists() or not self.fname_zarr.is_dir(): + _logger.error(f"Zarr file {self.fname_zarr} does not exist.") + raise FileNotFoundError( + f"Zarr file {self.fname_zarr} does not exist or is not a directory." + ) + + def get_inference_config(self): + """ + load the config associated to the inference run (different from the eval_cfg which + contains plot and evaluaiton options.) + + Returns + ------- + dict + configuration file from the inference run + """ + if self.private_paths: + _logger.info( + f"Loading config for run {self.run_id} from private paths: {self.private_paths}" + ) + config = load_config(self.private_paths, self.run_id, self.mini_epoch) + else: + _logger.info( + f"Loading config for run {self.run_id} from model directory: {self.model_base_dir}" + ) + config = load_model_config(self.run_id, self.mini_epoch, self.model_base_dir) + + if type(config) not in [dict, oc.DictConfig]: + _logger.warning("Model config not found. inference config will be empty.") + config = {} + + return config + + def get_data( + self, + stream: str, + region: str = "global", + samples: list[int] | None = None, + fsteps: list[str] | None = None, + channels: list[str] | None = None, + ensemble: list[str] | None = None, + return_counts: bool = False, + ) -> ReaderOutput: + """ + Retrieve prediction and target data for a given run from the Zarr store. + + Parameters + ---------- + cfg : + Configuration dictionary containing all information for the evaluation. + results_dir : Path + Directory where the inference results are stored. + Expected scheme `/`. + stream : + Stream name to retrieve data for. + region : + Region name to retrieve data for. Possible values: "global", "shem", "nhem", "tropics" + samples : + List of sample indices to retrieve. If None, all samples are retrieved. + fsteps : + List of forecast steps to retrieve. If None, all forecast steps are retrieved. + channels : + List of channel names to retrieve. If None, all channels are retrieved. + return_counts : + If True, also return the number of points per sample. + + Returns + ------- + ReaderOutput + A dataclass containing: + - target: Dictionary of xarray DataArrays for targets, indexed by forecast step. + - prediction: Dictionary of xarray DataArrays for predictions, indexed by forecast step. + - points_per_sample: xarray DataArray containing the number of points per sample, + if `return_counts` is True. + """ + + bbox = RegionBoundingBox.from_region_name(region) + + with ZarrIO(self.fname_zarr) as zio: + stream_cfg = self.get_stream(stream) + all_channels = self.get_channels(stream) + _logger.info(f"RUN {self.run_id}: Processing stream {stream}...") + + fsteps = self.get_forecast_steps() if fsteps is None else fsteps + + # TODO: Avoid conversion of fsteps and sample to integers (as obtained from the ZarrIO) + fsteps = sorted([int(fstep) for fstep in fsteps]) + samples = samples or sorted([int(sample) for sample in self.get_samples()]) + channels = channels or stream_cfg.get("channels", all_channels) + channels = to_list(channels) + + ensemble = ensemble or self.get_ensemble(stream) + ensemble = to_list(ensemble) + + dc = DeriveChannels( + all_channels, + channels, + stream_cfg, + ) + + da_tars, da_preds = [], [] + + if return_counts: + points_per_sample = xr.DataArray( + np.full((len(fsteps), len(samples)), np.nan), + coords={"forecast_step": fsteps, "sample": samples}, + dims=("forecast_step", "sample"), + name=f"points_per_sample_{stream}", + ) + else: + points_per_sample = None + + fsteps_final = [] + + for fstep in fsteps: + _logger.info(f"RUN {self.run_id} - {stream}: Processing fstep {fstep}...") + da_tars_fs, da_preds_fs, pps = [], [], [] + + for sample in tqdm(samples, desc=f"Processing {self.run_id} - {stream} - {fstep}"): + out = zio.get_data(sample, stream, fstep) + target, pred = out.target.as_xarray(), out.prediction.as_xarray() + + if region != "global": + _logger.debug( + f"Applying bounding box mask for region '{region}' to targets " + "and predictions..." + ) + target = bbox.apply_mask(target) + pred = bbox.apply_mask(pred) + + npoints = len(target.ipoint) + pps.append(npoints) + + if npoints == 0: + _logger.info( + f"Skipping {stream} sample {sample} forecast step: {fstep}. " + "Dataset is empty." + ) + continue + + if ensemble == ["mean"]: + _logger.debug("Averaging over ensemble members.") + pred = pred.mean("ens", keepdims=True) + else: + _logger.debug(f"Selecting ensemble members {ensemble}.") + pred = pred.sel(ens=ensemble) + + da_tars_fs.append(target.squeeze()) + da_preds_fs.append(pred.squeeze()) + + if not da_tars_fs: + _logger.info( + f"[{self.run_id} - {stream}] No valid data found for fstep {fstep}." + ) + continue + + fsteps_final.append(fstep) + + _logger.debug( + f"Concatenating targets and predictions for stream {stream}, " + f"forecast_step {fstep}..." + ) + + # faster processing + if self.is_regular(stream): + # Efficient concatenation for regular grid + da_preds_fs = _force_consistent_grids(da_preds_fs) + da_tars_fs = _force_consistent_grids(da_tars_fs) + + else: + # Irregular (scatter) case. concatenate over ipoint + da_tars_fs = xr.concat(da_tars_fs, dim="ipoint") + da_preds_fs = xr.concat(da_preds_fs, dim="ipoint") + + if len(samples) == 1: + _logger.debug("Repeating sample coordinate for single-sample case.") + for da in (da_tars_fs, da_preds_fs): + da.assign_coords( + sample=("ipoint", np.repeat(da.sample.values, da.sizes["ipoint"])) + ) + + if set(channels) != set(all_channels): + _logger.debug( + f"Restricting targets and predictions to channels {channels} " + f"for stream {stream}..." + ) + + da_tars_fs, da_preds_fs, channels = dc.get_derived_channels( + da_tars_fs, da_preds_fs + ) + + da_tars_fs = da_tars_fs.sel(channel=channels) + da_preds_fs = da_preds_fs.sel(channel=channels) + + da_tars.append(da_tars_fs) + da_preds.append(da_preds_fs) + if return_counts: + points_per_sample.loc[{"forecast_step": fstep}] = np.array(pps) + + # Safer than a list + da_tars = {fstep: da for fstep, da in zip(fsteps_final, da_tars, strict=True)} + da_preds = {fstep: da for fstep, da in zip(fsteps_final, da_preds, strict=True)} + + return ReaderOutput( + target=da_tars, prediction=da_preds, points_per_sample=points_per_sample + ) + + ######## reader utils ######## + + def get_climatology_filename(self, stream: str) -> str | None: + """ + Get the climatology filename for a given stream from the inference configuration. + Parameters + ---------- + stream : str + Name of the data stream. + Returns + ------- + str or None + Climatology filename if specified, otherwise None. + """ + + stream_dict = self.get_stream(stream) + + clim_data_path = stream_dict.get("climatology_path", None) + if not clim_data_path: + clim_base_dir = self.inference_cfg.get("data_path_aux", None) + + clim_fn = next( + ( + item.get("climatology_filename") + for item in self.inference_cfg["streams"] + if item.get("name") == stream + ), + None, + ) + + if clim_base_dir and clim_fn: + clim_data_path = Path(clim_base_dir).join(clim_fn) + else: + _logger.warning( + f"No climatology path specified for stream {stream}. Setting climatology to " + "NaN. Add 'climatology_path' to evaluation config to use metrics like ACC." + ) + + return clim_data_path + + def get_stream(self, stream: str): + """ + returns the dictionary associated to a particular stream. + Returns an empty dictionary if the stream does not exist in the Zarr file. + + Parameters + ---------- + stream: str + the stream name + + Returns + ------- + dict + the config dictionary associated to that stream + """ + stream_dict = {} + with ZarrIO(self.fname_zarr) as zio: + if stream in zio.streams: + stream_dict = self.eval_cfg.streams.get(stream, {}) + return stream_dict + + def get_samples(self) -> set[int]: + """Get the set of sample indices from the Zarr file.""" + with ZarrIO(self.fname_zarr) as zio: + return set(int(s) for s in zio.samples) + + def get_forecast_steps(self) -> set[int]: + """Get the set of forecast steps from the Zarr file.""" + with ZarrIO(self.fname_zarr) as zio: + return set(int(f) for f in zio.forecast_steps) + + def get_channels(self, stream: str) -> list[str]: + """ + Get the list of channels for a given stream from the config. + + Parameters + ---------- + stream : str + The name of the stream to get channels for. + + Returns + ------- + list[str] + A list of channel names. + """ + _logger.debug(f"Getting channels for stream {stream}...") + all_channels = self.get_inference_stream_attr(stream, "val_target_channels") + _logger.debug(f"Channels found in config: {all_channels}") + return all_channels + + def get_ensemble(self, stream: str | None = None) -> list[str]: + """Get the list of ensemble member names for a given stream from the config. + Parameters + ---------- + stream : str + The name of the stream to get channels for. + + Returns + ------- + list[str] + A list of ensemble members. + """ + _logger.debug(f"Getting ensembles for stream {stream}...") + + # TODO: improve this to get ensemble from io class + with ZarrIO(self.fname_zarr) as zio: + dummy = zio.get_data(0, stream, zio.forecast_steps[0]) + return list(dummy.prediction.as_xarray().coords["ens"].values) + + # TODO: improve this + def is_regular(self, stream: str) -> bool: + """Check if the latitude and longitude coordinates are regularly spaced for a given stream. + Parameters + ---------- + stream : str + The name of the stream to get channels for. + + Returns + ------- + bool + True if the stream is regularly spaced. False otherwise. + """ + _logger.debug(f"Checking regular spacing for stream {stream}...") + + with ZarrIO(self.fname_zarr) as zio: + dummy = zio.get_data(0, stream, zio.forecast_steps[0]) + + sample_idx = zio.samples[1] if len(zio.samples) > 1 else zio.samples[0] + fstep_idx = ( + zio.forecast_steps[1] if len(zio.forecast_steps) > 1 else zio.forecast_steps[0] + ) + dummy1 = zio.get_data(sample_idx, stream, fstep_idx) + + da = dummy.prediction.as_xarray() + da1 = dummy1.prediction.as_xarray() + + if ( + da["lat"].shape != da1["lat"].shape + or da["lon"].shape != da1["lon"].shape + or not ( + np.allclose(sorted(da["lat"].values), sorted(da1["lat"].values)) + and np.allclose(sorted(da["lon"].values), sorted(da1["lon"].values)) + ) + ): + _logger.debug("Latitude and/or longitude coordinates are not regularly spaced.") + return False + + _logger.debug("Latitude and longitude coordinates are regularly spaced.") + return True + + def load_scores(self, stream: str, region: str, metric: str) -> xr.DataArray | None: + """ + Load the pre-computed scores for a given run, stream and metric and epoch. + + Parameters + ---------- + reader : + Reader object containing all info for a specific run_id + stream : + Stream name. + region : + Region name. + metric : + Metric name. + + Returns + ------- + xr.DataArray + The metric DataArray or None if the file does not exist. + """ + score_path = ( + Path(self.metrics_dir) + / f"{self.run_id}_{stream}_{region}_{metric}_epoch{self.epoch:05d}.json" + ) + _logger.debug(f"Looking for: {score_path}") + + if score_path.exists(): + with open(score_path) as f: + data_dict = json.load(f) + return xr.DataArray.from_dict(data_dict) + else: + return None + + def get_inference_stream_attr(self, stream_name: str, key: str, default=None): + """ + Get the value of a key for a specific stream from the a model config. + + Parameters: + ------------ + config: dict + The full configuration dictionary. + stream_name: str + The name of the stream (e.g. 'ERA5'). + key: str + The key to look up (e.g. 'tokenize_spacetime'). + default: Optional + Value to return if not found (default: None). + + Returns: + The parameter value if found, otherwise the default. + """ + for stream in self.inference_cfg.get("streams", []): + if stream.get("name") == stream_name: + return stream.get(key, default) + return default + + +################### Helper functions ######################## + + +def _force_consistent_grids(ref: list[xr.DataArray]) -> xr.DataArray: + """ + Force all samples to share the same ipoint order. + + Parameters + ---------- + ref: + Input dataset + Returns + ------- + xr.DataArray + Returns a Dataset where all samples have the same lat lon and ipoint ordering + """ + + # Pick first sample as reference + ref_lat = ref[0].lat + ref_lon = ref[0].lon + + sort_idx = np.lexsort((ref_lon.values, ref_lat.values)) + npoints = sort_idx.size + aligned = [] + for a in ref: + a_sorted = a.isel(ipoint=sort_idx) + + a_sorted = a_sorted.assign_coords( + ipoint=np.arange(npoints), + lat=("ipoint", ref_lat.values[sort_idx]), + lon=("ipoint", ref_lon.values[sort_idx]), + ) + aligned.append(a_sorted) + + return xr.concat(aligned, dim="sample") diff --git a/packages/evaluate/src/weathergen/evaluate/plot_utils.py b/packages/evaluate/src/weathergen/evaluate/plot_utils.py new file mode 100644 index 000000000..361ab73c4 --- /dev/null +++ b/packages/evaluate/src/weathergen/evaluate/plot_utils.py @@ -0,0 +1,264 @@ +# (C) Copyright 2025 WeatherGenerator contributors. +# +# This software is licensed under the terms of the Apache Licence Version 2.0 +# which can be obtained at http://www.apache.org/licenses/LICENSE-2.0. +# +# In applying this licence, ECMWF does not waive the privileges and immunities +# granted to it by virtue of its status as an intergovernmental organisation +# nor does it submit to any jurisdiction. + +import logging + +import numpy as np + +_logger = logging.getLogger(__name__) + + +def collect_streams(runs: dict): + """Get all unique streams across runs, sorted. + + Parameters + ---------- + runs : dict + The dictionary containing all run configs. + + Returns + ------- + set + all available streams + """ + return sorted({s for run in runs.values() for s in run["streams"].keys()}) + + +def collect_channels(scores_dict: dict, metric: str, region: str, runs) -> list[str]: + """Get all unique channels available for given metric and region across runs. + + Parameters + ---------- + scores_dict : dict + The dictionary containing all computed metrics. + metric: str + String specifying the metric to plot + region: str + String specifying the region to plot + runs: dict + Dictionary containing the config for all runs + Returns + ------- + list + returns a list with all available channels + """ + channels = set() + if metric not in scores_dict or region not in scores_dict[metric]: + return [] + for _stream, run_data in scores_dict[metric][region].items(): + for run_id in runs: + if run_id not in run_data: + continue + values = run_data[run_id]["channel"].values + channels.update([str(x) for x in np.atleast_1d(values)]) + return list(channels) + + +def plot_metric_region( + metric: str, + region: str, + runs: dict, + scores_dict: dict, + plotter: object, + print_summary: bool, +) -> None: + """Plot data for all streams and channels for a given metric and region. + + Parameters + ---------- + metric: str + String specifying the metric to plot + region: str + String specifying the region to plot + runs: dict + Dictionary containing the config for all runs + scores_dict : dict + The dictionary containing all computed metrics. + plotter: + Plotter object to handle the plotting part + print_summary: bool + Option to print plot values to screen + + """ + streams_set = collect_streams(runs) + channels_set = collect_channels(scores_dict, metric, region, runs) + + for stream in streams_set: + for ch in channels_set: + selected_data, labels, run_ids = [], [], [] + + for run_id, data in scores_dict[metric][region].get(stream, {}).items(): + # skip if channel is missing or contains NaN + if ch not in np.atleast_1d(data.channel.values) or data.isnull().all(): + continue + + selected_data.append(data.sel(channel=ch)) + labels.append(runs[run_id].get("label", run_id)) + run_ids.append(run_id) + + if selected_data: + _logger.info(f"Creating plot for {metric} - {region} - {stream} - {ch}.") + name = "_".join([metric, region] + sorted(set(run_ids)) + [stream, ch]) + plotter.plot( + selected_data, + labels, + tag=name, + x_dim="forecast_step", + y_dim=metric, + print_summary=print_summary, + ) + + +def score_card_metric_region( + metric: str, + region: str, + runs: dict, + scores_dict: dict, + sc_plotter: object, +) -> None: + """ + Create score cards for all streams and channels for a given metric and region. + + Parameters + ---------- + metric: str + String specifying the metric to plot + region: str + String specifying the region to plot + runs: dict + Dictionary containing the config for all runs + scores_dict : dict + The dictionary containing all computed metrics. + sc_plotter: + Plotter object to handle the plotting part + """ + streams_set = collect_streams(runs) + channels_set = collect_channels(scores_dict, metric, region, runs) + + for stream in streams_set: + selected_data, run_ids = [], [] + channels_common = None + for _, data in scores_dict[metric][region].get(stream, {}).items(): + channels_per_run = [] + for ch in channels_set: + if ch not in np.atleast_1d(data.channel.values) or data.isnull().all(): + continue + else: + channels_per_run.append(ch) + + if channels_common is None: + channels_common = set(channels_per_run) + else: + channels_common = set(channels_common).intersection(set(channels_per_run)) + + if not channels_common: + continue + + for run_id, data in scores_dict[metric][region].get(stream, {}).items(): + selected_data.append(data.sel(channel=list(channels_common))) + run_ids.append(run_id) + + if selected_data and len(selected_data) > 1.0: + _logger.info(f"Creating score cards for {metric} - {region} - {stream}.") + name = "_".join([metric, region, stream]) + sc_plotter.plot(selected_data, run_ids, metric, channels_common, name) + else: + _logger.info( + f"Only one run_id under stream: {stream}. Creating score card is skipped..." + ) + + +def bar_plot_metric_region( + metric: str, + region: str, + runs: dict, + scores_dict: dict, + br_plotter: object, +) -> None: + """ + Create bar plots for all streams and run_ids for a given metric and region. + + Parameters + ---------- + metric: str + String specifying the metric to plot + region: str + String specifying the region to plot + runs: dict + Dictionary containing the config for all runs + scores_dict : dict + The dictionary containing all computed metrics. + plotter: + Plotter object to handle the plotting part + """ + streams_set = collect_streams(runs) + channels_set = collect_channels(scores_dict, metric, region, runs) + + for stream in streams_set: + selected_data, run_ids = [], [] + + for run_id, data in scores_dict[metric][region].get(stream, {}).items(): + if data.isnull().all(): + continue + selected_data.append(data) + run_ids.append(run_id) + + if selected_data and len(selected_data) > 1.0: + _logger.info(f"Creating bar plots for {metric} - {region} - {stream}.") + name = "_".join([metric, region, stream]) + br_plotter.plot(selected_data, run_ids, metric, channels_set, name) + else: + _logger.info( + f"Only one run_id for ({region}) region under stream : {stream}. " + "Creating bar plot is skipped..." + ) + + +class DefaultMarkerSize: + """ + Utility class for managing default configuration values, such as marker sizes + for various data streams. + """ + + _marker_size_stream = { + "era5": 2.5, + "imerg": 0.25, + "cerra": 0.1, + } + + _default_marker_size = 0.5 + + @classmethod + def get_marker_size(cls, stream_name: str) -> float: + """ + Get the default marker size for a given stream name. + + Parameters + ---------- + stream_name : str + The name of the stream. + + Returns + ------- + float + The default marker size for the stream. + """ + return cls._marker_size_stream.get(stream_name.lower(), cls._default_marker_size) + + @classmethod + def list_streams(cls): + """ + List all streams with defined marker sizes. + + Returns + ------- + list[str] + List of stream names. + """ + return list(cls._marker_size_stream.keys()) diff --git a/packages/evaluate/src/weathergen/evaluate/plotter.py b/packages/evaluate/src/weathergen/evaluate/plotter.py new file mode 100644 index 000000000..cb15e6f24 --- /dev/null +++ b/packages/evaluate/src/weathergen/evaluate/plotter.py @@ -0,0 +1,1420 @@ +import datetime +import glob +import logging +import os +from pathlib import Path + +import cartopy +import cartopy.crs as ccrs +import matplotlib as mpl +import matplotlib.pyplot as plt +import numpy as np +import omegaconf as oc +import xarray as xr +from matplotlib.lines import Line2D +from PIL import Image +from scipy.stats import wilcoxon + +from weathergen.common.config import _load_private_conf +from weathergen.evaluate.plot_utils import ( + DefaultMarkerSize, +) + +work_dir = Path(_load_private_conf(None)["path_shared_working_dir"]) / "assets/cartopy" + +cartopy.config["data_dir"] = str(work_dir) +cartopy.config["pre_existing_data_dir"] = str(work_dir) +os.environ["CARTOPY_DATA_DIR"] = str(work_dir) + +np.seterr(divide="ignore", invalid="ignore") + +logging.getLogger("matplotlib.category").setLevel(logging.ERROR) + +_logger = logging.getLogger(__name__) +_logger.setLevel(logging.INFO) + +_logger.debug(f"Taking cartopy paths from {work_dir}") + + +class Plotter: + """ + Contains all basic plotting functions. + """ + + def __init__(self, plotter_cfg: dict, output_basedir: str | Path, stream: str | None = None): + """ + Initialize the Plotter class. + + Parameters + ---------- + plotter_cfg: + Configuration dictionary containing basic information for plotting. + Expected keys are: + - image_format: Format of the saved images (e.g., 'png', 'pdf', etc.) + - dpi_val: DPI value for the saved images + - fig_size: Size of the figure (width, height) in inches + - tokenize_spacetime: If True, all valid times will be plotted in one plot + output_basedir: + Base directory under which the plots will be saved. + Expected scheme `/`. + stream: + Stream identifier for which the plots will be created. + It can also be set later via update_data_selection. + """ + + _logger.info(f"Taking cartopy paths from {work_dir}") + + self.image_format = plotter_cfg.get("image_format") + self.dpi_val = plotter_cfg.get("dpi_val") + self.fig_size = plotter_cfg.get("fig_size") + self.fps = plotter_cfg.get("fps") + self.plot_subtimesteps = plotter_cfg.get( + "plot_subtimesteps", False + ) # True if plots are created for each valid time separately + self.run_id = output_basedir.name + + self.out_plot_basedir = Path(output_basedir) / "plots" + + if not os.path.exists(self.out_plot_basedir): + _logger.info(f"Creating dir {self.out_plot_basedir}") + os.makedirs(self.out_plot_basedir, exist_ok=True) + + self.sample = None + self.stream = stream + self.fstep = None + + self.select = {} + + def update_data_selection(self, select: dict): + """ + Set the selection for the plots. This will be used to filter the data for plotting. + + Parameters + ---------- + select: + Dictionary containing the selection criteria. Expected keys are: + - "sample": Sample identifier + - "stream": Stream identifier + - "forecast_step": Forecast step identifier + """ + self.select = select + + if "sample" not in select: + _logger.warning("No sample in the selection. Might lead to unexpected results.") + else: + self.sample = select["sample"] + + if "stream" not in select: + _logger.warning("No stream in the selection. Might lead to unexpected results.") + else: + self.stream = select["stream"] + + if "forecast_step" not in select: + _logger.warning("No forecast_step in the selection. Might lead to unexpected results.") + else: + self.fstep = select["forecast_step"] + + return self + + def clean_data_selection(self): + """ + Clean the data selection by resetting all selected values. + """ + self.sample = None + self.stream = None + self.fstep = None + + self.select = {} + return self + + def select_from_da(self, da: xr.DataArray, selection: dict) -> xr.DataArray: + """ + Select data from an xarray DataArray based on given selectors. + + Parameters + ---------- + da: + xarray DataArray to select data from. + selection: + Dictionary of selectors where keys are coordinate names and values are the values to + select. + + Returns + ------- + xarray DataArray with selected data. + """ + for key, value in selection.items(): + if key in da.coords and key not in da.dims: + # Coordinate like 'sample' aligned to another dim + da = da.where(da[key] == value, drop=True) + else: + # Scalar coord or dim coord (e.g., 'forecast_step', 'channel') + da = da.sel({key: value}) + return da + + def create_histograms_per_sample( + self, + target: xr.DataArray, + preds: xr.DataArray, + variables: list, + select: dict, + tag: str = "", + ) -> list[str]: + """ + Plot histogram of target vs predictions for each variable and valid time in the DataArray. + + Parameters + ---------- + target: xr.DataArray + Target sample for a specific (stream, sample, fstep) + preds: xr.DataArray + Predictions sample for a specific (stream, sample, fstep) + variables: list + List of variables to be plotted + select: dict + Selection to be applied to the DataArray + tag: str + Any tag you want to add to the plot + + Returns + ------- + List of plot names for the saved histograms. + """ + plot_names = [] + + self.update_data_selection(select) + + # Basic map output directory for this stream + hist_output_dir = self.out_plot_basedir / self.stream / "histograms" + + if not os.path.exists(hist_output_dir): + _logger.info(f"Creating dir {hist_output_dir}") + os.makedirs(hist_output_dir) + + for var in variables: + select_var = self.select | {"channel": var} + + targ, prd = ( + self.select_from_da(target, select_var), + self.select_from_da(preds, select_var), + ) + + # Remove NaNs + targ = targ.dropna(dim="ipoint") + prd = prd.dropna(dim="ipoint") + assert targ.size > 0, "Data array must not be empty or contain only NAs" + assert prd.size > 0, "Data array must not be empty or contain only NAs" + + if self.plot_subtimesteps: + ntimes_unique = len(np.unique(targ.valid_time)) + _logger.info( + f"Creating histograms for {ntimes_unique} valid times of variable {var}." + ) + + groups = zip(targ.groupby("valid_time"), prd.groupby("valid_time"), strict=False) + else: + _logger.info(f"Plotting histogram for all valid times of {var}") + + groups = [((None, targ), (None, prd))] # wrap once with dummy valid_time + + for (valid_time, targ_t), (_, prd_t) in groups: + if valid_time is not None: + _logger.debug(f"Plotting histogram for {var} at valid_time {valid_time}") + name = self.plot_histogram(targ_t, prd_t, hist_output_dir, var, tag=tag) + plot_names.append(name) + + self.clean_data_selection() + + return plot_names + + def plot_histogram( + self, + target_data: xr.DataArray, + pred_data: xr.DataArray, + hist_output_dir: Path, + varname: str, + tag: str = "", + ) -> str: + """ + Plot a histogram comparing target and prediction data for a specific variable. + + Parameters + ---------- + target_data: xr.DataArray + DataArray containing the target data for the variable. + pred_data: xr.DataArray + DataArray containing the prediction data for the variable. + hist_output_dir: Path + Directory where the histogram will be saved. + varname: str + Name of the variable to be plotted. + tag: str + Any tag you want to add to the plot. + + Returns + ------- + Name of the saved plot file. + """ + + # Get common bin edges + vals = np.concatenate([target_data, pred_data]) + bins = np.histogram_bin_edges(vals, bins=50) + + # Plot histograms + plt.hist(target_data, bins=bins, alpha=0.7, label="Target") + plt.hist(pred_data, bins=bins, alpha=0.7, label="Prediction") + + # set labels and title + plt.xlabel(f"Variable: {varname}") + plt.ylabel("Frequency") + plt.title( + f"Histogram of Target and Prediction: {self.stream}, {varname} : " + f"fstep = {self.fstep:03}" + ) + plt.legend(frameon=False) + + valid_time = ( + target_data["valid_time"][0] + .values.astype("datetime64[m]") + .astype(datetime.datetime) + .strftime("%Y-%m-%dT%H%M") + ) + + # TODO: make this nicer + parts = [ + "histogram", + self.run_id, + tag, + str(self.sample), + valid_time, + self.stream, + varname, + str(self.fstep).zfill(3), + ] + name = "_".join(filter(None, parts)) + + fname = hist_output_dir / f"{name}.{self.image_format}" + _logger.debug(f"Saving histogram to {fname}") + plt.savefig(fname) + plt.close() + + return name + + def create_maps_per_sample( + self, + data: xr.DataArray, + variables: list, + select: dict, + tag: str = "", + map_kwargs: dict | None = None, + ) -> list[str]: + """ + Plot 2D map for each variable and valid time in the DataArray. + + Parameters + ---------- + data: xr.DataArray + DataArray for a specific (stream, sample, fstep) + variables: list + List of variables to be plotted + label: str + Any tag you want to add to the plot + select: dict + Selection to be applied to the DataArray + tag: str + Any tag you want to add to the plot. Note: This is added to the plot directory. + map_kwargs: dict + Additional keyword arguments for the map. + Known keys are: + - marker_size: base size of the marker (default is 1) + - scale_marker_size: if True, the marker size will be scaled based on latitude + (default is False) + - marker: marker style (default is 'o') + Unknown keys will be passed to the scatter plot function. + + Returns + ------- + List of plot names for the saved maps. + """ + self.update_data_selection(select) + + # copy global plotting options, not specific to any variable + map_kwargs_global = { + key: value + for key, value in (map_kwargs or {}).items() + if not isinstance(value, oc.DictConfig) + } + + # Basic map output directory for this stream + map_output_dir = self.get_map_output_dir(tag) + + if not os.path.exists(map_output_dir): + _logger.info(f"Creating dir {map_output_dir}") + os.makedirs(map_output_dir) + + plot_names = [] + for var in variables: + select_var = self.select | {"channel": var} + da = self.select_from_da(data, select_var).compute() + + if self.plot_subtimesteps: + ntimes_unique = len(np.unique(da.valid_time)) + _logger.info( + f"Creating maps for {ntimes_unique} valid times of variable {var} - {tag}" + ) + + groups = da.groupby("valid_time") + else: + _logger.info(f"Creating maps for all valid times of {var} - {tag}") + groups = [(None, da)] # single dummy group + + for valid_time, da_t in groups: + if valid_time is not None: + _logger.debug(f"Plotting map for {var} at valid_time {valid_time}") + + da_t = da_t.dropna(dim="ipoint") + assert da_t.size > 0, "Data array must not be empty or contain only NAs" + + name = self.scatter_plot( + da_t, + map_output_dir, + var, + tag=tag, + map_kwargs=dict(map_kwargs.get(var, {})) | map_kwargs_global, + title=f"{self.stream}, {var} : fstep = {self.fstep:03} ({valid_time})", + ) + plot_names.append(name) + + self.clean_data_selection() + + return plot_names + + def scatter_plot( + self, + data: xr.DataArray, + map_output_dir: Path, + varname: str, + tag: str = "", + map_kwargs: dict | None = None, + title: str | None = None, + ): + """ + Plot a 2D map for a data array using scatter plot. + + Parameters + ---------- + data: xr.DataArray + DataArray to be plotted + map_output_dir: Path + Directory where the map will be saved + varname: str + Name of the variable to be plotted + tag: str + Any tag you want to add to the plot + map_kwargs: dict | None + Additional keyword arguments for the map. + title: str | None + Title for the plot. + + Returns + ------- + Name of the saved plot file. + """ + # check for known keys in map_kwargs + map_kwargs_save = map_kwargs.copy() if map_kwargs is not None else {} + marker_size_base = map_kwargs_save.pop( + "marker_size", DefaultMarkerSize.get_marker_size(self.stream) + ) + scale_marker_size = map_kwargs_save.pop("scale_marker_size", False) + marker = map_kwargs_save.pop("marker", "o") + vmin = map_kwargs_save.pop("vmin", None) + vmax = map_kwargs_save.pop("vmax", None) + cmap = plt.get_cmap(map_kwargs_save.pop("colormap", "coolwarm")) + + if isinstance(map_kwargs_save.get("levels", False), oc.listconfig.ListConfig): + norm = mpl.colors.BoundaryNorm( + map_kwargs_save.pop("levels", None), cmap.N, extend="both" + ) + else: + norm = mpl.colors.Normalize( + vmin=vmin, + vmax=vmax, + clip=False, + ) + + # scale marker size + marker_size = marker_size_base + if scale_marker_size: + marker_size = np.clip( + marker_size / np.cos(np.radians(data["lat"])) ** 2, + a_max=marker_size * 10.0, + a_min=marker_size, + ) + + # Create figure and axis objects + fig = plt.figure(dpi=self.dpi_val) + ax = fig.add_subplot(1, 1, 1, projection=ccrs.Robinson()) + ax.coastlines() + + assert data["lon"].shape == data["lat"].shape == data.shape, ( + f"Scatter plot:: Data shape do not match. Shapes: " + f"lon {data['lon'].shape}, lat {data['lat'].shape}, data {data.shape}." + ) + + scatter_plt = ax.scatter( + data["lon"], + data["lat"], + c=data, + norm=norm, + cmap=cmap, + s=marker_size, + marker=marker, + transform=ccrs.PlateCarree(), + linewidths=0.0, # only markers, avoids aliasing for very small markers + **map_kwargs_save, + ) + + plt.colorbar(scatter_plt, ax=ax, orientation="horizontal", label=f"Variable: {varname}") + plt.title(title) + ax.set_global() + ax.gridlines(draw_labels=False, linestyle="--", color="black", linewidth=1) + + # TODO: make this nicer + parts = ["map", self.run_id, tag] + + if self.sample: + parts.append(str(self.sample)) + + if "valid_time" in data.coords: + valid_time = data["valid_time"][0].values + if ~np.isnat(valid_time): + valid_time = ( + valid_time.astype("datetime64[m]") + .astype(datetime.datetime) + .strftime("%Y-%m-%dT%H%M") + ) + + parts.append(valid_time) + + if self.stream: + parts.append(self.stream) + + parts.append(varname) + + if self.fstep is not None: + parts.extend(["fstep", f"{self.fstep:03d}"]) + + name = "_".join(filter(None, parts)) + fname = f"{map_output_dir.joinpath(name)}.{self.image_format}" + + _logger.debug(f"Saving map to {fname}") + plt.savefig(fname) + plt.close() + + return name + + def animation(self, samples, fsteps, variables, select, tag) -> list[str]: + """ + Plot 2D animations for a dataset + + Parameters + ---------- + samples: list + List of the samples to be plotted + fsteps: list + List of the forecast steps to be plotted + variables: list + List of variables to be plotted + select: dict + Selection to be applied to the DataArray + tag: str + Any tag you want to add to the plot + + Returns + ------- + List of plot names for the saved animations. + + """ + + self.update_data_selection(select) + map_output_dir = self.get_map_output_dir(tag) + + # Convert FPS to duration in milliseconds + duration_ms = int(1000 / self.fps) if self.fps > 0 else 400 + + for _, sa in enumerate(samples): + for _, var in enumerate(variables): + _logger.info(f"Creating animation for {var} sample: {sa} - {tag}") + image_paths = [] + for _, fstep in enumerate(fsteps): + # TODO: refactor to avoid code duplication with scatter_plot + parts = [ + "map", + self.run_id, + tag, + str(sa), + "*", + self.stream, + var, + "fstep", + str(fstep).zfill(3), + ] + + name = "_".join(filter(None, parts)) + fname = f"{map_output_dir.joinpath(name)}.{self.image_format}" + + names = glob.glob(fname) + image_paths += names + + if image_paths: + images = [Image.open(path) for path in image_paths] + images[0].save( + f"{map_output_dir}/animation_{self.run_id}_{tag}_{sa}_{self.stream}_{var}.gif", + save_all=True, + append_images=images[1:], + duration=duration_ms, + loop=0, + ) + + else: + _logger.warning(f"No images found for animation {var} sample {sa}") + + return image_paths + + def get_map_output_dir(self, tag): + return self.out_plot_basedir / self.stream / "maps" / tag + + +class LinePlots: + def __init__(self, plotter_cfg: dict, output_basedir: str | Path): + """ + Initialize the LinePlots class. + + Parameters + ---------- + plotter_cfg: + Configuration dictionary containing basic information for plotting. + Expected keys are: + - image_format: Format of the saved images (e.g., 'png', 'pdf', etc.) + - dpi_val: DPI value for the saved images + - fig_size: Size of the figure (width, height) in inches + - plot_ensemble: + If True, plot ensemble spread if 'ens' dimension is present. Options are: + - False: do not plot ensemble spread + - "std": plot mean +/- standard deviation + - "minmax": plot min-max range + - "members": plot individual ensemble members + output_basedir: + Base directory under which the plots will be saved. + Expected scheme `/`. + """ + + self.image_format = plotter_cfg.get("image_format") + self.dpi_val = plotter_cfg.get("dpi_val") + self.fig_size = plotter_cfg.get("fig_size") + self.log_scale = plotter_cfg.get("log_scale") + self.add_grid = plotter_cfg.get("add_grid") + self.plot_ensemble = plotter_cfg.get("plot_ensemble", False) + + self.out_plot_dir = Path(output_basedir) / "line_plots" + if not os.path.exists(self.out_plot_dir): + _logger.info(f"Creating dir {self.out_plot_dir}") + os.makedirs(self.out_plot_dir, exist_ok=True) + + _logger.info(f"Saving summary plots to: {self.out_plot_dir}") + + def _check_lengths(self, data: xr.DataArray | list, labels: str | list) -> tuple[list, list]: + """ + Check if the lengths of data and labels match. + + Parameters + ---------- + data: + DataArray or list of DataArrays to be plotted + labels: + Label or list of labels for each dataset + + Returns + ------- + data_list, label_list - lists of data and labels + """ + assert isinstance(data, xr.DataArray | list), ( + "Compare::plot - Data should be of type xr.DataArray or list" + ) + assert isinstance(labels, str | list), ( + "Compare::plot - Labels should be of type str or list" + ) + + # convert to lists + + data_list = [data] if isinstance(data, xr.DataArray) else data + label_list = [labels] if isinstance(labels, str) else labels + + assert len(data_list) == len(label_list), "Compare::plot - Data and Labels do not match" + + return data_list, label_list + + def print_all_points_from_graph(self, fig: plt.Figure) -> None: + for ax in fig.get_axes(): + for line in ax.get_lines(): + ydata = line.get_ydata() + xdata = line.get_xdata() + label = line.get_label() + _logger.info(f"Summary for {label} plot:") + for xi, yi in zip(xdata, ydata, strict=False): + _logger.info(f" x: {xi:.3f}, y: {yi:.3f}") + _logger.info("--------------------------") + return + + def _plot_ensemble(self, data: xr.DataArray, x_dim: str, label: str) -> None: + """ + Plot ensemble spread for a data array. + + Parameters + ---------- + data: xr.xArray + DataArray to be plotted + x_dim: str + Dimension to be used for the x-axis. + label: str + Label for the dataset + Returns + ------- + None + """ + averaged = data.mean(dim=[dim for dim in data.dims if dim != x_dim], skipna=True).sortby( + x_dim + ) + + lines = plt.plot( + averaged[x_dim], + averaged.values, + label=label, + marker="o", + linestyle="-", + ) + line = lines[0] + color = line.get_color() + + ens = data.mean( + dim=[dim for dim in data.dims if dim not in [x_dim, "ens"]], skipna=True + ).sortby(x_dim) + + if self.plot_ensemble == "std": + std_dev = ens.std(dim="ens", skipna=True).sortby(x_dim) + plt.fill_between( + averaged[x_dim], + (averaged - std_dev).values, + (averaged + std_dev).values, + label=f"{label} - std dev", + color=color, + alpha=0.2, + ) + + elif self.plot_ensemble == "minmax": + ens_min = ens.min(dim="ens", skipna=True).sortby(x_dim) + ens_max = ens.max(dim="ens", skipna=True).sortby(x_dim) + + plt.fill_between( + averaged[x_dim], + ens_min.values, + ens_max.values, + label=f"{label} - min max", + color=color, + alpha=0.2, + ) + + elif self.plot_ensemble == "members": + for j in range(ens.ens.size): + plt.plot( + ens[x_dim], + ens.isel(ens=j).values, + color=color, + alpha=0.2, + ) + else: + _logger.warning( + f"LinePlot:: Unknown option for plot_ensemble: {self.plot_ensemble}. " + "Skipping ensemble plotting." + ) + + def _plot_ensemble(self, data: xr.DataArray, x_dim: str, label: str) -> None: + """ + Plot ensemble spread for a data array. + + Parameters + ---------- + data: xr.xArray + DataArray to be plotted + x_dim: str + Dimension to be used for the x-axis. + label: str + Label for the dataset + Returns + ------- + None + """ + averaged = data.mean(dim=[dim for dim in data.dims if dim != x_dim], skipna=True).sortby( + x_dim + ) + + lines = plt.plot( + averaged[x_dim], + averaged.values, + label=label, + marker="o", + linestyle="-", + ) + line = lines[0] + color = line.get_color() + + ens = data.mean( + dim=[dim for dim in data.dims if dim not in [x_dim, "ens"]], skipna=True + ).sortby(x_dim) + + if self.plot_ensemble == "std": + std_dev = ens.std(dim="ens", skipna=True).sortby(x_dim) + plt.fill_between( + averaged[x_dim], + (averaged - std_dev).values, + (averaged + std_dev).values, + label=f"{label} - std dev", + color=color, + alpha=0.2, + ) + + elif self.plot_ensemble == "minmax": + ens_min = ens.min(dim="ens", skipna=True).sortby(x_dim) + ens_max = ens.max(dim="ens", skipna=True).sortby(x_dim) + + plt.fill_between( + averaged[x_dim], + ens_min.values, + ens_max.values, + label=f"{label} - min max", + color=color, + alpha=0.2, + ) + + elif self.plot_ensemble == "members": + for j in range(ens.ens.size): + plt.plot( + ens[x_dim], + ens.isel(ens=j).values, + color=color, + alpha=0.2, + ) + else: + _logger.warning( + f"LinePlot:: Unknown option for plot_ensemble: {self.plot_ensemble}. " + "Skippingensemble plotting." + ) + + def plot( + self, + data: xr.DataArray | list, + labels: str | list, + tag: str = "", + x_dim: str = "forecast_step", + y_dim: str = "value", + print_summary: bool = False, + plot_ensemble: str | bool = False, + ) -> None: + """ + Plot a line graph comparing multiple datasets. + + Parameters + ---------- + data: + DataArray or list of DataArrays to be plotted + labels: + Label or list of labels for each dataset + tag: + Tag to be added to the plot title and filename + x_dim: + Dimension to be used for the x-axis. The code will average over all other dimensions. + y_dim: + Name of the dimension to be used for the y-axis. + print_summary: + If True, print a summary of the values from the graph. + Returns + ------- + None + """ + + data_list, label_list = self._check_lengths(data, labels) + + assert x_dim in data_list[0].dims, ( + "x dimension '{x_dim}' not found in data dimensions {data_list[0].dims}" + ) + + fig = plt.figure(figsize=(12, 6), dpi=self.dpi_val) + + for i, data in enumerate(data_list): + non_zero_dims = [dim for dim in data.dims if dim != x_dim and data[dim].shape[0] > 1] + + if self.plot_ensemble and "ens" in non_zero_dims: + _logger.info(f"LinePlot:: Plotting ensemble with option {self.plot_ensemble}.") + self._plot_ensemble(data, x_dim, label_list[i]) + else: + if non_zero_dims: + _logger.info( + f"LinePlot:: Found multiple entries for dimensions: {non_zero_dims}. " + "Averaging..." + ) + + averaged = data.mean( + dim=[dim for dim in data.dims if dim != x_dim], skipna=True + ).sortby(x_dim) + + plt.plot( + averaged[x_dim], + averaged.values, + label=label_list[i], + marker="o", + linestyle="-", + ) + + xlabel = "".join(c if c.isalnum() else " " for c in x_dim) + plt.xlabel(xlabel) + + ylabel = "".join(c if c.isalnum() else " " for c in y_dim) + plt.ylabel(ylabel) + + title = "".join(c if c.isalnum() else " " for c in tag) + plt.title(title) + plt.legend(frameon=False) + + if self.add_grid: + plt.grid(True, linestyle="--", color="gray", alpha=0.5) + + if self.log_scale: + plt.yscale("log") + + if print_summary: + _logger.info(f"Summary values for {tag}") + self.print_all_points_from_graph(fig) + + parts = ["compare", tag] + name = "_".join(filter(None, parts)) + plt.savefig(f"{self.out_plot_dir.joinpath(name)}.{self.image_format}") + plt.close() + + +class ScoreCards: + """ + Initialize the ScoreCards class. + + Parameters + ---------- + plotter_cfg: + Configuration dictionary containing basic information for plotting. + Expected keys are: + - image_format: Format of the saved images (e.g., 'png', 'pdf', etc.) + - improvement: Size of the figure (width, height) in inches + output_basedir: + Base directory under which the score cards will be saved. + """ + + def __init__(self, plotter_cfg: dict, output_basedir: str | Path) -> None: + self.image_format = plotter_cfg.get("image_format") + self.dpi_val = plotter_cfg.get("dpi_val") + self.improvement = plotter_cfg.get("improvement_scale", 0.2) + self.out_plot_dir = Path(output_basedir) / "score_cards" + if not os.path.exists(self.out_plot_dir): + _logger.info(f"Creating dir {self.out_plot_dir}") + os.makedirs(self.out_plot_dir, exist_ok=True) + + def plot( + self, + data: list[xr.DataArray], + runs: list[str], + metric: str, + channels: list[str], + tag: str, + ) -> None: + """ + Plot score cards comparing performance between run_ids against a baseline over channels + of interest. + + Parameters + ---------- + data: + List of (xarray) DataArrays with the scores (stream, region and metric specific) + runs: + List containing runs (in str format) to be compared (provided in the config) + metric: + Metric for which we are plotting + channels: + List containing channels (in str format) of interest (provided in the config) + tag: + Tag to be added to the plot title and filename + """ + n_runs, n_vars = len(runs), len(channels) + fig, ax = plt.subplots(figsize=(2 * n_runs, 1.2 * n_vars)) + + baseline = data[0] + skill_models = [] + + for run_index in range(1, n_runs): + skill_model = 0.0 + for var_index, var in enumerate(channels): + diff, avg_diff, avg_skill = self.compare_models( + data, baseline, run_index, var, metric + ) + skill_model += avg_skill.values + + # Get symbols based on difference and performance as well as coordinates + # for the position of the triangles. + + x, y, alt, color, triangle, size = self.get_plot_symbols( + run_index, var_index, avg_skill, avg_diff, metric + ) + + ax.scatter(x, y, marker=triangle, color=color, s=size.values, zorder=3) + + # Perform Wilcoxon test + stat, p = wilcoxon(diff, alternative=alt) + + # Draw rectangle border for significance + if p < 0.05: + lw = 2 if p < 0.01 else 1 + rect_color = color + rect = plt.Rectangle( + (x - 0.25, y - 0.25), + 0.5, + 0.5, + fill=False, + edgecolor=rect_color, + linewidth=lw, + zorder=2, + ) + ax.add_patch(rect) + + skill_models.append(skill_model / n_vars) + + # Set axis labels + ylabels = [ + f"{var}\n({baseline.coords['metric'].item().upper()}={baseline.sel(channel=var).mean().values.squeeze():.3f})" + for var in channels + ] + xlabels = [ + f"{model_name}\nSkill: {skill_models[i]:.3f}" for i, model_name in enumerate(runs[1::]) + ] + ax.set_xticks(np.arange(1, n_runs)) + ax.set_xticklabels(xlabels, fontsize=10) + ax.set_yticks(np.arange(n_vars) + 0.5) + ax.set_yticklabels(ylabels, fontsize=10) + for label in ax.get_yticklabels(): + label.set_horizontalalignment("center") + label.set_x(-0.17) + ax.set_ylabel("Variable", fontsize=14) + ax.set_title( + f"Model Scorecard vs. Baseline '{runs[0]}'", + fontsize=16, + pad=20, + ) + for x in np.arange(0.5, n_runs - 1, 1): + ax.axvline(x, color="gray", linestyle="--", linewidth=0.5, zorder=0, alpha=0.5) + ax.set_xlim(0.5, n_runs - 0.5) + ax.set_ylim(0, n_vars) + + legend = [ + Line2D( + [0], + [0], + marker="^", + color="white", + label=f"{self.improvement * 100:.0f}% improvement", + markerfacecolor="blue", + markersize=np.sqrt(200), + ) + ] + plt.legend(handles=legend, loc="upper left", bbox_to_anchor=(1.02, 1.0)) + + _logger.info(f"Saving scorecards to: {self.out_plot_dir}") + + parts = ["score_card", tag] + runs + name = "_".join(filter(None, parts)) + plt.savefig( + f"{self.out_plot_dir.joinpath(name)}.{self.image_format}", + bbox_inches="tight", + dpi=self.dpi_val, + ) + plt.close(fig) + + def compare_models( + self, + data: list[xr.DataArray], + baseline: xr.DataArray, + run_index: int, + var: str, + metric: str, + x_dim="forecast_step", + ) -> tuple[xr.DataArray, xr.DataArray, xr.DataArray]: + """ + Compare a model with a baseline model and calculate skill scores. + + Parameters + ---------- + data: list[xr.DataArray] + List of all scores in xarray format for each model. + + baseline: xarray DataArray + The baseline scores in xarrays format. + + run_index: int + The order index over the run_ids. + + var: str + The specified channel over which we compare. + + xdim: str + The dimension for which an average will not be calculated. + + Returns + ---------- + diff: xr.DataArray + Difference in scores between baseline and model. + + diff.mean(dim="forecast_step"): xr.DataArray + Average difference in scores over all forecast steps between baseline and model . + + skill.mean(dim="forecast_step"): xr.DataArray + Average skill scores over all forecast steps between baseline and model . + + """ + baseline_var = baseline.sel({"channel": var}) + data_var = data[run_index].sel({"channel": var}) + + baseline_score, model_score = calculate_average_over_dim(x_dim, baseline_var, data_var) + diff = baseline_score - model_score + + skill = self.get_skill_score(model_score, baseline_score, metric) + return diff, diff.mean(dim=x_dim), skill.mean(dim=x_dim) + + def get_skill_score( + self, score_model: xr.DataArray, score_ref: xr.DataArray, metric: str + ) -> xr.DataArray: + """ + Calculate skill score comparing a model against a baseline. + + Skill score is defined as: (model_score - baseline_score) / (perfect_score - baseline_score) + + Parameters + ---------- + score_model : xr.DataArray + The scores of the model being evaluated + score_ref : xr.DataArray + The scores of the reference/baseline model + metric : str + The metric name for which to calculate skill score + + Returns + ------- + xr.DataArray + Skill scores comparing model to baseline + """ + perf_score = self.get_perf_score(metric) + skill_score = (score_model - score_ref) / (perf_score - score_ref) + return skill_score + + def get_perf_score(self, metric: str) -> float: + """ + Get the perfect score for a given metric. + + Perfect scores represent ideal performance: + - Error metrics: 0 (lower is better) + - Skill/score metrics: 1 (higher is better) + - PSNR: 100 (higher is better) + + Parameters + ---------- + metric : str + Metric name + + Returns + ------- + float + Perfect score for the specified metric + """ + # Metrics where lower values indicate better performance (error metrics) + if lower_is_better(metric): + return 0.0 + + # Metrics where higher values indicate better performance (with specific perfect score) + elif metric in ["psnr"]: + return 100.0 + + # Metrics where higher values indicate better performance (default perfect score) + else: + return 1.0 + + def get_plot_symbols( + self, + run_index: int, + var_index: int, + avg_skill: xr.DataArray, + avg_diff: xr.DataArray, + metric: str, + ) -> tuple[int, float, str, str, str, xr.DataArray]: + """ + Determine plot symbol properties based on performance difference. + + Parameters + ---------- + run_index : int + Index of the model. + var_index : int + Index of the variable/channel. + avg_skill : xr.DataArray + Average skill score of the model. + avg_diff : xr.DataArray + Average difference between baseline and model. + metric : str + Metric used for interpretation. + + Returns + ------- + Tuple[int, float, str, str, str, xr.DataArray] + x, y coordinates, alternative hypothesis, color, triangle symbol, size. + """ + # Conservative choice + alt = "two-sided" + modus = "different" + color = "gray" + + # Determine if diff_mean indicates improvement + is_improvement = (avg_diff > 0 and lower_is_better(metric)) or ( + avg_diff < 0 and not lower_is_better(metric) + ) + + if is_improvement: + alt = "greater" + modus = "better" + color = "blue" + elif not is_improvement and avg_diff != 0: + alt = "less" + modus = "worse" + color = "red" + else: + alt = "two-sided" + modus = "different" + + triangle = "^" if modus == "better" else "v" + + # Triangle coordinates + x = run_index + # First row is model 1 vs model 0 + y = var_index + 0.5 + + size = 200 * (1 - (1 / (1 + abs(avg_skill) / self.improvement))) # Add base size to all + + return x, y, alt, color, triangle, size + + +class BarPlots: + """ + Initialize the BarPlots class. + + Parameters + ---------- + plotter_cfg: + Configuration dictionary containing basic information for plotting. + Expected keys are: + - image_format: Format of the saved images (e.g., 'png', 'pdf', etc.) + - improvement: Size of the figure (width, height) in inches + output_basedir: + Base directory under which the score cards will be saved. + """ + + def __init__(self, plotter_cfg: dict, output_basedir: str | Path) -> None: + self.image_format = plotter_cfg.get("image_format") + self.dpi_val = plotter_cfg.get("dpi_val") + self.cmap = plotter_cfg.get("cmap", "bwr") + self.out_plot_dir = Path(output_basedir) / "bar_plots" + _logger.info(f"Saving bar plots to: {self.out_plot_dir}") + if not os.path.exists(self.out_plot_dir): + _logger.info(f"Creating dir {self.out_plot_dir}") + os.makedirs(self.out_plot_dir, exist_ok=True) + + def plot( + self, + data: list[xr.DataArray], + runs: list[str], + metric: str, + channels: list[str], + tag: str, + ) -> None: + """ + Plot (ratio) bar plots comparing performance between different run_ids over channels of + interest. + + Parameters + ---------- + data: + List of (xarray) DataArrays with the scores (stream, region and metric specific) + runs: + List containing runs (in str format) to be compared (provided in the config) + metric: + Metric name + channels: + List containing channels (in str format) of interest (provided in the config) + tag: + Tag to be added to the plot title and filename + """ + + fig, ax = plt.subplots( + 1, + len(runs) - 1, + figsize=(5 * len(runs), 2 * len(channels)), + dpi=self.dpi_val, + squeeze=False, + ) + ax = ax.flatten() + + for run_index in range(1, len(runs)): + ratio_score, channels_per_comparison = self.calc_ratio_per_run_id( + data, channels, run_index + ) + + ax[run_index - 1].barh( + np.arange(len(ratio_score)), + ratio_score, + color=self.colors(ratio_score, metric), + align="center", + edgecolor="black", + linewidth=0.5, + ) + ax[run_index - 1].set_yticks( + np.arange(len(ratio_score)), labels=channels_per_comparison + ) + ax[run_index - 1].invert_yaxis() + ax[run_index - 1].set_xlabel( + f"Relative {data[0].coords['metric'].item().upper()}: " + f"Target Model ({runs[run_index]}) / Reference Model ({runs[0]})" + ) + + _logger.info(f"Saving bar plots to: {self.out_plot_dir}") + parts = ["bar_plot_compare", tag] + runs + name = "_".join(filter(None, parts)) + plt.savefig( + f"{self.out_plot_dir.joinpath(name)}.{self.image_format}", + bbox_inches="tight", + dpi=self.dpi_val, + ) + plt.close(fig) + + def calc_ratio_per_run_id( + self, + data: list[xr.DataArray], + channels: list[str], + run_index: int, + x_dim="channel", + ) -> tuple[np.array, str]: + """ + This function calculates the ratio per comparison model for each channel. + + Parameters + ---------- + data: list[xr.DataArray] + List of all scores for each model in xarrays format. + channels: list[str] + All the available channels. + run_index: int + The order index over the run_ids. + xdim: str + The dimension for which an average will not be calculated. + + Returns + ---------- + ratio_score: np.array + The (ratio) skill over each channel for a specific model + channels_per_comparison: str + The common channels over which the baseline and the other model will be compared. + + """ + ratio_score = [] + channels_per_comparison = [] + for _, var in enumerate(channels): + if var not in data[0].channel.values or var not in data[run_index].channel.values: + continue + baseline_var = data[0].sel({"channel": var}) + data_var = data[run_index].sel({"channel": var}) + channels_per_comparison.append(var) + + baseline_score, model_score = calculate_average_over_dim(x_dim, baseline_var, data_var) + + ratio_score.append(model_score / baseline_score) + + ratio_score = np.array(ratio_score) - 1 + return ratio_score, channels_per_comparison + + def colors(self, ratio_score: np.array, metric: str) -> list[tuple]: + """ + This function calculates colormaps based on the skill scores. From negative value blue + color variations should be given otherwise red color variations should be given. + + Parameters + ---------- + ratio_score: np.array + The (ratio) skill for a specific model + metric: str + The metric of interest + Returns + ---------- + colors: list[tuple] + The color magnitude (blue to red) of the bars in the plots + """ + max_val = np.abs(ratio_score).max() + if lower_is_better(metric): + cmap = plt.get_cmap("bwr") + else: + cmap = plt.get_cmap("bwr_r") + colors = [cmap(0.5 + v / (2 * max_val)) for v in ratio_score] + return colors + + +def calculate_average_over_dim( + x_dim: str, baseline_var: xr.DataArray, data_var: xr.DataArray +) -> tuple[xr.DataArray, xr.DataArray]: + """ + Calculate average over xarray dimensions that are larger than 1. Those might be the + forecast-steps or the samples. + + Parameters + ---------- + xdim: str + The dimension for which an average will not be calculated. + baseline_var: xr.DataArray + xarray DataArray with the scores of the baseline model for a specific channel/variable + data_var: xr.DataArray + xarray DataArray with the scores of the comparison model for a specific channel/variable + + Returns + ------- + baseline_score: xarray DataArray + The baseline average scores over the dimensions not specified by xdim + model_score: xarray DataArray + The model average scores over the dimensions not specified by xdim + """ + non_zero_dims = [ + dim for dim in baseline_var.dims if dim != x_dim and baseline_var[dim].shape[0] > 1 + ] + + if non_zero_dims: + _logger.info( + f"LinePlot:: Found multiple entries for dimensions: {non_zero_dims}. Averaging..." + ) + + baseline_score = baseline_var.mean( + dim=[dim for dim in baseline_var.dims if dim != x_dim], skipna=True + ) + model_score = data_var.mean(dim=[dim for dim in data_var.dims if dim != x_dim], skipna=True) + + return baseline_score, model_score + + +def lower_is_better(metric: str) -> bool: + # Determine whether lower or higher is better + return metric in {"l1", "l2", "mse", "rmse", "vrmse", "bias", "crps", "spread"} diff --git a/packages/evaluate/src/weathergen/evaluate/run_evaluation.py b/packages/evaluate/src/weathergen/evaluate/run_evaluation.py new file mode 100755 index 000000000..3bf198d07 --- /dev/null +++ b/packages/evaluate/src/weathergen/evaluate/run_evaluation.py @@ -0,0 +1,230 @@ +#!/usr/bin/env -S uv run +# /// script +# dependencies = [ +# "weathergen-evaluate", +# "weathergen-common", +# "weathergen-metrics", +# ] +# [tool.uv.sources] +# weathergen-evaluate = { path = "../../../../../packages/evaluate" } +# /// + +import argparse +import logging +import sys +from collections import defaultdict +from pathlib import Path + +import mlflow +from mlflow.client import MlflowClient +from omegaconf import OmegaConf +from xarray import DataArray + +from weathergen.common.config import _REPO_ROOT +from weathergen.common.platform_env import get_platform_env +from weathergen.evaluate.io_reader import CsvReader, WeatherGenReader +from weathergen.evaluate.plot_utils import collect_channels +from weathergen.evaluate.utils import ( + calc_scores_per_stream, + metric_list_to_json, + plot_data, + plot_summary, +) +from weathergen.metrics.mlflow_utils import ( + MlFlowUpload, + get_or_create_mlflow_parent_run, + log_scores, + setup_mlflow, +) + +_logger = logging.getLogger(__name__) + +_DEFAULT_PLOT_DIR = _REPO_ROOT / "plots" + +_platform_env = get_platform_env() + + +def evaluate() -> None: + # By default, arguments from the command line are read. + evaluate_from_args(sys.argv[1:]) + + +def evaluate_from_args(argl: list[str]) -> None: + # configure logging + logging.basicConfig(level=logging.INFO) + parser = argparse.ArgumentParser(description="Fast evaluation of WeatherGenerator runs.") + parser.add_argument( + "--config", + type=str, + default=None, + help="Path to the configuration yaml file for plotting. e.g. config/plottig_config.yaml", + ) + parser.add_argument( + "--push-metrics", + required=False, + action="store_true", + help="(optional) Upload scores to MLFlow.", + ) + + args = parser.parse_args(argl) + if args.config: + config = Path(args.config) + else: + _logger.info( + "No config file provided, using the default template config (please edit accordingly)" + ) + config = Path(_REPO_ROOT / "config" / "evaluate" / "eval_config.yml") + mlflow_client: MlflowClient | None = None + if args.push_metrics: + hpc_conf = _platform_env.get_hpc_config() + assert hpc_conf is not None + private_home = Path(hpc_conf) + private_cf = OmegaConf.load(private_home) + mlflow_client = setup_mlflow(private_cf) + _logger.info(f"MLFlow client set up: {mlflow_client}") + + evaluate_from_config(OmegaConf.load(config), mlflow_client) + + +def evaluate_from_config(cfg, mlflow_client: MlflowClient | None) -> None: + # load configuration + + runs = cfg.run_ids + + _logger.info(f"Detected {len(runs)} runs") + + # Directory to store the summary plots + private_paths = cfg.get("private_paths", None) + summary_dir = Path( + cfg.evaluation.get("summary_dir", _DEFAULT_PLOT_DIR) + ) # base directory where summary plots will be stored + + metrics = cfg.evaluation.metrics + regions = cfg.evaluation.get("regions", ["global"]) + plot_score_maps = cfg.evaluation.get("plot_score_maps", False) + + global_plotting_opts = cfg.get("global_plotting_options", {}) + + # to get a structure like: scores_dict[metric][region][stream][run_id] = plot + scores_dict = defaultdict(lambda: defaultdict(lambda: defaultdict(dict))) + + for run_id, run in runs.items(): + _logger.info(f"RUN {run_id}: Getting data...") + + type = run.get("type", "zarr") + if type == "zarr": + reader = WeatherGenReader(run, run_id, private_paths) + elif type == "csv": + reader = CsvReader(run, run_id, private_paths) + else: + raise ValueError(f"Unknown run type {type} for run {run_id}. Supported: zarr, csv.") + + for stream in reader.streams: + _logger.info(f"RUN {run_id}: Processing stream {stream}...") + + stream_dict = reader.get_stream(stream) + if not stream_dict: + _logger.info( + f"Stream {stream} does not exist in source data or config file is empty. " + "Skipping." + ) + continue + + if stream_dict.get("plotting"): + _logger.info(f"RUN {run_id}: Plotting stream {stream}...") + _ = plot_data(reader, stream, global_plotting_opts) + + if stream_dict.get("evaluation"): + _logger.info(f"Retrieve or compute scores for {run_id} - {stream}...") + + for region in regions: + metrics_to_compute = [] + + for metric in metrics: + metric_data = reader.load_scores( + stream, + region, + metric, + ) + + if metric_data is None or plot_score_maps: + metrics_to_compute.append(metric) + continue + + available_data = reader.check_availability( + stream, metric_data, mode="evaluation" + ) + + if not available_data.score_availability: + metrics_to_compute.append(metric) + else: + # simply select the chosen eval channels, samples, fsteps here... + scores_dict[metric][region][stream][run_id] = metric_data.sel( + sample=available_data.samples, + channel=available_data.channels, + forecast_step=available_data.fsteps, + ) + + if metrics_to_compute: + all_metrics, points_per_sample = calc_scores_per_stream( + reader, stream, region, metrics_to_compute, plot_score_maps + ) + + metric_list_to_json( + reader, + [all_metrics], + [points_per_sample], + [stream], + region, + ) + + for metric in metrics_to_compute: + scores_dict[metric][region][stream][run_id] = all_metrics.sel( + {"metric": metric} + ) + + if mlflow_client: + # Reorder scores_dict to push to MLFlow per run_id: + # Create a new defaultdict with the target structure: [run_id][metric][region][stream] + reordered_dict: dict[str, dict[str, dict[str, dict[str, DataArray]]]] = defaultdict( + lambda: defaultdict(lambda: defaultdict(dict)) + ) + + # Iterate through the original dictionary to get all keys and the final value + for metric, regions_dict in scores_dict.items(): + for region, streams_dict in regions_dict.items(): + for stream, runs_dict in streams_dict.items(): + for run_id, final_dict in runs_dict.items(): + # Assign the final_dict to the new structure using the reordered keys + reordered_dict[run_id][metric][region][stream] = final_dict + + channels_set = collect_channels(scores_dict, metric, region, runs) + + for run_id, run in runs.items(): + reader = WeatherGenReader(run, run_id, private_paths) + from_run_id = reader.inference_cfg["from_run_id"] + parent_run = get_or_create_mlflow_parent_run(mlflow_client, from_run_id) + _logger.info(f"MLFlow parent run: {parent_run}") + phase = "eval" + with mlflow.start_run(run_id=parent_run.info.run_id): + with mlflow.start_run( + run_name=f"{phase}_{from_run_id}_{run_id}", + parent_run_id=parent_run.info.run_id, + nested=True, + ) as run: + mlflow.set_tags(MlFlowUpload.run_tags(run_id, phase, from_run_id)) + log_scores( + reordered_dict[run_id], + mlflow_client, + run.info.run_id, + channels_set, + ) + + # plot summary + if scores_dict and cfg.evaluation.get("summary_plots", True): + _logger.info("Started creating summary plots..") + plot_summary(cfg, scores_dict, summary_dir) + + +if __name__ == "__main__": + evaluate() diff --git a/packages/evaluate/src/weathergen/evaluate/score.py b/packages/evaluate/src/weathergen/evaluate/score.py new file mode 100755 index 000000000..88d48a098 --- /dev/null +++ b/packages/evaluate/src/weathergen/evaluate/score.py @@ -0,0 +1,1498 @@ +# (C) Copyright 2025 WeatherGenerator contributors. +# +# This software is licensed under the terms of the Apache Licence Version 2.0 +# which can be obtained at http://www.apache.org/licenses/LICENSE-2.0. +# +# In applying this licence, ECMWF does not waive the privileges and immunities +# granted to it by virtue of its status as an intergovernmental organisation +# nor does it submit to any jurisdiction. +import inspect +import logging +from dataclasses import dataclass + +import dask.array as da +import numpy as np +import pandas as pd +import xarray as xr +from scipy.spatial import cKDTree + +from weathergen.evaluate.score_utils import to_list + +# from common.io import MockIO + +_logger = logging.getLogger(__name__) + +try: + import xskillscore + from xhistogram.xarray import histogram +except Exception: + _logger.warning( + "Could not import xskillscore and xhistogram. Thus, CRPS and " + "rank histogram-calculations are not supported." + ) + + +# helper function to calculate skill score + + +def _get_skill_score( + score_fcst: xr.DataArray, score_ref: xr.DataArray, score_perf: float +) -> xr.DataArray: + """ + Calculate the skill score of a forecast data array w.r.t. a reference and a perfect score. + Definition follows Wilks, Statistical Methods in the Atmospheric Sciences (2006), + Chapter 7.1.4, Equation 7.4 + + Parameters + ---------- + score_fcst : xr.DataArray + Forecast score data array + score_ref : xr.DataArray + Score data array of a reference forecast, e.g. a climatological mean + score_perf : float + Score data array of a perfect forecast, e.g. 0 for the RMSE-score + + Returns + ---------- + skill_score : xr.DataArray + Skill score data array + """ + + skill_score = (score_fcst - score_ref) / (score_perf - score_ref) + + return skill_score + + +@dataclass(frozen=True) +class VerifiedData: + """ + # Used to ensure that the prediction and ground truth data are compatible, + # i.e. dimensions, broadcastability. + # This is meant to ensure that the data can be used for score calculations. + """ + + prediction: xr.DataArray + ground_truth: xr.DataArray + prediction_next: xr.DataArray | None + ground_truth_next: xr.DataArray | None + climatology: xr.DataArray | None + + def __post_init__(self): + # Perform checks on initialization + self._validate_dimensions() + self._validate_broadcastability() + + # TODO: add checks for prediction_next, ground_truth_next, climatology + def _validate_dimensions(self): + # Ensure all dimensions in truth are in forecast (or equal) + missing_dims = set(self.ground_truth.dims) - set(self.prediction.dims) + if missing_dims: + raise ValueError( + f"Truth data has extra dimensions not found in forecast: {missing_dims}" + ) + + # TODO: add checks for prediction_next, ground_truth_next, climatology + def _validate_broadcastability(self): + try: + # Attempt broadcast + xr.broadcast(self.prediction, self.ground_truth) + except ValueError as e: + raise ValueError(f"Forecast and truth are not broadcastable: {e}") from e + + +def get_score( + data: VerifiedData, + score_name: str, + agg_dims: str | list[str] = "all", + group_by_coord: str | None = None, + ens_dim: str = "ens", + compute: bool = False, + **kwargs, +) -> xr.DataArray: + """ + Get the score for the given data and score name. + Note that the scores are aggregated over all dimensions of the prediction data by default. + + Parameters + ---------- + data : VerifiedData + VerifiedData object containing prediction and ground truth data. + score_name : str + Name of the score to calculate. + agg_dims : str | List[str] + List of dimension names over which the score will be aggregated (most often averaged). + If set to 'all', aggregation will be performed over all dimensions of the forecast data. + ens_dim : str + Name of the ensemble dimension in the forecast data. Only used for probabilistic scores. + compute : bool + If True, the score will be computed immediately. If False, the score will be returned + as a lazy xarray DataArray, which allows for efficient graph construction and execution + kwargs : dict + Additional keyword arguments to pass to the score function. + + Returns + ------- + xr.DataArray + Calculated score as an xarray DataArray. + """ + sc = Scores(agg_dims=agg_dims, ens_dim=ens_dim) + + score_data = sc.get_score(data, score_name, group_by_coord, **kwargs) + if compute: + # If compute is True, compute the score immediately + return score_data.compute() + + return score_data + + +# scores class +class Scores: + """ + Class to calculate scores and skill scores. + """ + + def __init__( + self, + agg_dims: str | list[str] = "all", + ens_dim: str = "ens", + ): + """ + Parameters + ---------- + agg_dims : str | List[str] + List of dimension names over which the score will be aggregated (most often averaged). + If set to 'all', aggregation will be performed over all dimensions of the forecast data. + ens_dim: str + Name of the ensemble dimension in the forecast data. Only used for probablistic scores. + + Returns + ------- + """ + self._agg_dims_in = self._validate_agg_dims(agg_dims) + self._ens_dim = self._validate_ens_dim(ens_dim) + + self.det_metrics_dict = { + "ets": self.calc_ets, + "pss": self.calc_pss, + "fbi": self.calc_fbi, + "mae": self.calc_mae, + "l1": self.calc_l1, + "l2": self.calc_l2, + "mse": self.calc_mse, + "rmse": self.calc_rmse, + "vrmse": self.calc_vrmse, + "bias": self.calc_bias, + "acc": self.calc_acc, + "froct": self.calc_froct, + "troct": self.calc_troct, + "fact": self.calc_fact, + "tact": self.calc_tact, + "grad_amplitude": self.calc_spatial_variability, + "psnr": self.calc_psnr, + "seeps": self.calc_seeps, + } + self.prob_metrics_dict = { + "ssr": self.calc_ssr, + "crps": self.calc_crps, + "rank_histogram": self.calc_rank_histogram, + "spread": self.calc_spread, + } + + def get_score( + self, + data: VerifiedData, + score_name: str, + group_by_coord: str | None = None, + compute: bool = False, + **kwargs, + ): + """ + Calculate the score for the given data and score name. + + If data is a dask array, the score will be calculated lazily. + This allows for efficient graph construction and execution when calculating several scores. + Example usage: + >>> # Initialize Scores object with aggregation dimensions + >>> sc = Scores(agg_dims=["ipoints"]) + >>> # Collect list of scores for a given VerifiedData object + >>> score_list = [sc(data, score_name) for score_name in ["ets", "pss", "fbi"]] + >>> combined_metrics = xr.concat(score_list, dim="score_name") + >>> combined_metrics["score_name"] = score_list + >>> # Do the computation with a joint graph + >>> combined_metrics = combined_metrics.compute() + + Parameters + ---------- + data : VerifiedData + VerifiedData object containing prediction and ground truth data. + score_name : str + Name of the score to calculate. + compute : bool + If True, the score will be computed immediately. If False, the score will be returned + as a lazy xarray DataArray, which allows for efficient graph construction and execution. + kwargs : dict + Additional keyword arguments to pass to the score function. + + Returns + ------- + xr.DataArray + Calculated score as an xarray DataArray. + + """ + if score_name in self.det_metrics_dict.keys(): + f = self.det_metrics_dict[score_name] + elif score_name in self.prob_metrics_dict.keys(): + assert self.ens_dim in data.prediction.dims, ( + f"Probablistic score {score_name} chosen, but ensemble dimension {self.ens_dim} " + "not found in prediction data. Skipping score calculation." + ) + return None + f = self.prob_metrics_dict[score_name] + else: + raise ValueError( + f"Unknown score chosen. Supported scores: { + ', '.join(self.det_metrics_dict.keys()) + + ', ' + + ', '.join(self.prob_metrics_dict.keys()) + }" + ) + + if self._agg_dims_in == "all": + # Aggregate over all dimensions of the prediction data + self._agg_dims = list(data.prediction.dims) + else: + # Check if _agg_dims is in prediction data + for dim in self._agg_dims_in: + if dim not in data.prediction.dims: + raise ValueError( + f"Average dimension '{dim}' not found in prediction data " + f"dimensions: {data.prediction.dims}" + ) + self._agg_dims = self._agg_dims_in + + arg_names: list[str] = inspect.getfullargspec(f).args[1:] + + score_args_map = { + "froct": ["p", "gt", "p_next", "gt_next"], + "troct": ["p", "gt", "p_next", "gt_next"], + "acc": ["p", "gt", "c"], + "fact": ["p", "c"], + "tact": ["gt", "c"], + } + + available = { + "p": data.prediction, + "gt": data.ground_truth, + "p_next": data.prediction_next, + "gt_next": data.ground_truth_next, + "c": data.climatology, + } + + # assign p and gt by default if metrics do not have specific args + keys = score_args_map.get(score_name, ["p", "gt"]) + args = {k: available[k] for k in keys} + + for an in arg_names: + if an in kwargs: + args[an] = kwargs[an] + + if group_by_coord is not None and self._validate_groupby_coord(data, group_by_coord): + # Apply groupby to all DataArrays in args + grouped_args = { + k: (v.groupby(group_by_coord) if isinstance(v, xr.DataArray) else v) + for k, v in args.items() + } + + # Apply function f to each group and concatenate results + group_names = list(next(iter(grouped_args.values())).groups.keys()) + results = [] + for name in group_names: + group_slice = { + k: (v[name] if v is not None else v) for k, v in grouped_args.items() + } + res = f(**group_slice) + # Add coordinate for concatenation + res = res.expand_dims({group_by_coord: [name]}) + results.append(res) + result = xr.concat(results, dim=group_by_coord) + else: + # No grouping: just call the function + result = f(**args) + + if compute: + return result.compute() + else: + return result + + def _validate_agg_dims(self, dims: str | list[str]) -> list[str] | str: + if dims == "all": + return dims + if isinstance(dims, str): + return [dims] + if isinstance(dims, list) and all(isinstance(d, str) for d in dims): + return dims + raise ValueError("agg_dims must be 'all', a string, or list of strings.") + + def _validate_ens_dim(self, dim: str) -> str: + if not isinstance(dim, str): + raise ValueError("ens_dim must be a string.") + return dim + + def _validate_groupby_coord(self, data: VerifiedData, group_by_coord: str | None) -> bool: + """ + Check if the group_by_coord is present in both prediction and ground truth data + and compatible. Raises ValueError if conditions are not met. + If group_by_coord does not have more than one unique value in the prediction data, + a warning is logged and the function returns False, indicating that grouping is + not applicable. + + Parameters + ---------- + data : VerifiedData + VerifiedData object containing prediction and ground truth data. + group_by_coord : str + Name of the coordinate to group by. + + Returns + ------- + group_by_coord : bool + True if the group_by_coord is valid for grouping, False otherwise. + """ + p, gt = data.prediction, data.ground_truth + if group_by_coord not in p.coords or group_by_coord not in gt.coords: + raise ValueError( + f"Coordinate '{group_by_coord}' must be present in both prediction " + "and ground truth data." + ) + + # Check if the dims associated with the groupby_coord are compatible + dims_p = set(p.coords[group_by_coord].dims) + dims_gt = set(gt.coords[group_by_coord].dims) + if dims_p != dims_gt: + raise ValueError( + f"Coordinate '{group_by_coord}' is associated with different dimensions: " + f"{dims_p} in prediction, {dims_gt} in ground truth." + ) + + if len(np.atleast_1d(p.coords[group_by_coord].values)) > 1: + return True + else: + _logger.warning( + f"Coordinate '{group_by_coord}' has only one unique value in prediction data. " + "It will not be used for grouping." + ) + return False + + def _sum(self, data: xr.DataArray) -> xr.DataArray: + """ + Sum data over aggregation dimensions. + + Parameters + ---------- + data : xr.DataArray + xarray DataArray to sum over aggregation dimensions + + Returns + ------- + xr.DataArray + Summed data + """ + return data.sum(dim=self._agg_dims) + + def _mean(self, data: xr.DataArray) -> xr.DataArray: + """ + Average data over aggregation dimensions. + + Parameters + ---------- + data : xr.DataArray + xarray DataArray to average over aggregation dimensions + + Returns + ------- + xr.DataArray + Averaged data + """ + return data.mean(dim=self._agg_dims) + + def get_2x2_event_counts( + self, + p: xr.DataArray, + gt: xr.DataArray, + thresh: float, + ) -> tuple[xr.DataArray, xr.DataArray, xr.DataArray, xr.DataArray]: + """ + Get counts of 2x2 contingency tables + + Parameters + ---------- + p: xr.DataArray + Forecast data array + gt: xr.DataArray + Ground truth data array + thresh: float + Threshold to define event occurrence + Returns + ------- + tuple[xr.DataArray, xr.DataArray, xr.DataArray, xr.DataArray] + Counts of hits (a), false alarms (b), misses (c), and correct negatives (d) + """ + + a = self._sum((p >= thresh) & (gt >= thresh)) + b = self._sum((p >= thresh) & (gt >= thresh)) + c = self._sum((p < thresh) & (gt >= thresh)) + d = self._sum((p < thresh) & (gt < thresh)) + + return a, b, c, d + + ### Deterministic scores + + def calc_ets( + self, + p: xr.DataArray, + gt: xr.DataArray, + thresh: float = 0.1, + ) -> xr.DataArray: + """ + Calculate the equitable threat score (ETS) of forecast data w.r.t. reference data. + Parameters + ---------- + p: xr.DataArray + Forecast data array + gt: xr.DataArray + Ground truth data array + thresh: float + Threshold to define event occurrence + Returns + ------- + xr.DataArray + Equitable threat score (ETS) + """ + a, b, c, d = self.get_2x2_event_counts(p, gt, thresh) + n = a + b + c + d + ar = (a + b) * (a + c) / n # random reference forecast + + denom = a + b + c - ar + + ets = (a - ar) / denom + ets = ets.where(denom > 0, np.nan) + + return ets + + def calc_fbi( + self, + p: xr.DataArray, + gt: xr.DataArray, + thresh: float = 0.1, + ) -> xr.DataArray: + """ + Calculate the frequency bias index (FBI) of forecast data w.r.t. reference data. + Parameters + ---------- + p: xr.DataArray + Forecast data array + gt: xr.DataArray + Ground truth data array + thresh: float + Threshold to define event occurrence + Returns + ------- + xr.DataArray + Frequency bias index (FBI) + """ + + a, b, c, _ = self.get_2x2_event_counts(p, gt, thresh) + + denom = a + c + fbi = (a + b) / denom + + fbi = fbi.where(denom > 0, np.nan) + + return fbi + + def calc_pss( + self, + p: xr.DataArray, + gt: xr.DataArray, + thresh: float = 0.1, + ) -> xr.DataArray: + """ + Calculate the Peirce skill score (PSS) of forecast data w.r.t. reference data. + Parameters + ---------- + p: xr.DataArray + Forecast data array + gt: xr.DataArray + Ground truth data array + thresh: float + Threshold to define event occurrence + Returns + ------- + xr.DataArray + Pierce skill score (PSS) + """ + + a, b, c, d = self.get_2x2_event_counts(p, gt, thresh) + + denom = (a + c) * (b + d) + pss = (a * d - b * c) / denom + + pss = pss.where(denom > 0, np.nan) + + return pss + + def calc_l1( + self, + p: xr.DataArray, + gt: xr.DataArray, + scale_dims: list | None = None, + ) -> xr.DataArray: + """ + Calculate the L1 error norm of forecast data w.r.t. reference data. + Note that the L1 error norm is calculated as the sum of absolute differences. + + Parameters + ---------- + p: xr.DataArray + Forecast data array + gt: xr.DataArray + Ground truth data array + scale_dims: list | None + List of dimensions over which the L1 score will be scaled. + If provided, the L1 score will be divided by the product of the sizes of these + dimensions. + + Returns + ------- + xr.DataArray + L1 error norm + """ + l1 = np.abs(p - gt) + + l1 = self._sum(l1) + + if scale_dims: + scale_dims = to_list(scale_dims) + + assert all([dim in p.dims for dim in scale_dims]), ( + f"Provided scale dimensions {scale_dims} are not all present in the prediction " + f"data dimensions {p.dims}." + ) + + len_dims = np.array([p.sizes[dim] for dim in scale_dims]) + l1 /= np.prod(len_dims) + + return l1 + + def calc_l2( + self, + p: xr.DataArray, + gt: xr.DataArray, + scale_dims: list | None = None, + squared_l2: bool = False, + ) -> xr.DataArray: + """ + Calculate the L2 error norm of forecast data w.r.t. reference data. + + Parameters + ---------- + p: xr.DataArray + Forecast data array + gt: xr.DataArray + Ground truth data array + scale_dims: list | None + List of dimensions over which the L2 score will be scaled. + If provided, the L2 score will be divided by the product of the sizes of these + dimensions. + squared_l2: bool + If True, the L2 score will be returned as the sum of squared differences. + If False, the L2 score will be returned as the square root of the sum of squared + differences. Default is False, i.e. the L2 score is returned as the square root of the + sum of squared differences. + + Returns + ------- + xr.DataArray + L2 error norm + """ + l2 = np.square(p - gt) + + l2 = self._sum(l2) + + if not squared_l2: + l2 = np.sqrt(l2) + + if scale_dims: + scale_dims = to_list(scale_dims) + + assert all([dim in p.dims for dim in scale_dims]), ( + f"Provided scale dimensions {scale_dims} are not all present in the prediction " + f"data dimensions {p.dims}." + ) + + len_dims = np.array([p.sizes[dim] for dim in scale_dims]) + l2 /= np.prod(len_dims) + + return l2 + + def calc_mae(self, p: xr.DataArray, gt: xr.DataArray) -> xr.DataArray: + """ + Calculate mean absolute error (MAE) of forecast data w.r.t. reference data. + + Parameters + ---------- + p: xr.DataArray + Forecast data array + gt: xr.DataArray + Ground truth data array + """ + if self._agg_dims is None: + raise ValueError( + "Cannot calculate mean absolute error without aggregation dimensions " + "(agg_dims=None)." + ) + + return self._mean(np.abs(p - gt)) + + def calc_mse(self, p: xr.DataArray, gt: xr.DataArray) -> xr.DataArray: + """ + Calculate mean squared error (MSE) of forecast data w.r.t. reference data. + + Parameters + ---------- + p: xr.DataArray + Forecast data array + gt: xr.DataArray + Ground truth data array + Returns + ------- + xr.DataArray + Mean squared error (MSE) + """ + if self._agg_dims is None: + raise ValueError( + "Cannot calculate mean squared error without aggregation dimensions " + "(agg_dims=None)." + ) + + return self._mean(np.square(p - gt)) + + def calc_rmse(self, p: xr.DataArray, gt: xr.DataArray) -> xr.DataArray: + """ + Calculate root mean squared error (RMSE) of forecast data w.r.t. reference data + Parameters + ---------- + p: xr.DataArray + Forecast data array + gt: xr.DataArray + Ground truth data array + Returns + ------- + xr.DataArray + Root mean squared error (RMSE) + + """ + if self._agg_dims is None: + raise ValueError( + "Cannot calculate root mean squared error without aggregation dimensions " + "(agg_dims=None)." + ) + + rmse = np.sqrt(self.calc_mse(p, gt)) + + return rmse + + def calc_vrmse(self, p: xr.DataArray, gt: xr.DataArray): + """ + Calculate variance-normalized root mean squared error (VRMSE) of forecast data w.r.t. + reference data + + Parameters + ---------- + p: xr.DataArray + Forecast data array + gt: xr.DataArray + Ground truth data array + """ + if self._agg_dims is None: + raise ValueError( + "Cannot calculate variance-normalized root mean squared error without aggregation " + "dimensions (agg_dims=None)." + ) + + vrmse = np.sqrt(self.calc_mse(p, gt) / (gt.var(dim=self._agg_dims) + 1e-6)) + + return vrmse + + @staticmethod + def sort_by_coords(da_to_sort: xr.DataArray, da_reference: xr.DataArray) -> xr.DataArray: + """ + Sorts one xarray.DataArray's coordinate ordering to match a reference array using KDTree. + + This method finds the nearest neighbor in `da_to_sort` for every coordinate in + `da_reference`, effectively reordering `da_to_sort` along its indexed dimension to align + with the sequence of coordinates in the reference. + + Parameters + ---------- + da_to_sort : xr.DataArray + The DataArray whose coordinate ordering needs to be matched. + Must contain 'lat' and 'lon' coordinates and an indexed dimension (e.g., 'ipoint'). + da_reference : xr.DataArray + The DataArray providing the target coordinate ordering (the template). Must contain + 'lat' and 'lon' coordinates. + + Returns + ------- + xr.DataArray + A new DataArray with the data from `da_to_sort` reordered to match the + coordinate sequence of `da_reference`. + + Raises + ------ + ValueError + If any reference coordinate does not have a matching coordinate in + `da_to_sort` within the allowed distance tolerance (1e-5). + + Notes + ----- + The matching uses `scipy.spatial.cKDTree.query` with a strict distance threshold + (`distance_upper_bound=1e-5`) to ensure precise one-to-one alignment. + """ + + # Extract coordinates + ref_lats = da_reference.lat.values + ref_lons = da_reference.lon.values + sort_lats = da_to_sort.lat.values + sort_lons = da_to_sort.lon.values + + # Build KDTree on coordinates to sort + sort_coords = np.column_stack((sort_lats, sort_lons)) + tree = cKDTree(sort_coords) + + # Find nearest neighbors for reference coordinates + ref_coords = np.column_stack((ref_lats, ref_lons)) + dist, indices = tree.query(ref_coords, distance_upper_bound=1e-5) + + # Check for unmatched coordinates + unmatched_mask = ~np.isfinite(dist) + if np.any(unmatched_mask): + n_unmatched = np.sum(unmatched_mask) + _logger.info( + f"Found {n_unmatched} reference coordinates with no matching coordinates in array" + "to sort. Returning NaN DataArray." + ) + return xr.full_like(da_reference, np.nan) + + # Reorder da_to_sort to match reference ordering + return da_to_sort.isel(ipoint=indices) + + def calc_change_rate( + self, + s0: xr.DataArray, + s1: xr.DataArray, + ) -> xr.DataArray: + """ + Calculate the "change rate" of a data array as the mean absolute difference between two + consecutive time steps. + + Parameters + ---------- + s0: xr.DataArray + Data array at time step t0 + s1: xr.DataArray + Data array at time step t1 + + Returns + ------- + xr.DataArray + Change rate of the data array + """ + + if s1 is None: + return xr.full_like(s0, np.nan) + else: + # Sort the coordinates of subsequent time steps to match each other. Can be removed + # once unshuffling is solved elsewhere + s1 = self.sort_by_coords(da_to_sort=s1, da_reference=s0) + crate = np.abs(s0 - s1.values) + return crate + + def calc_froct( + self, + p: xr.DataArray, + gt: xr.DataArray, + p_next: xr.DataArray, + gt_next: xr.DataArray, + ) -> xr.DataArray: + """ + Calculate forecast rate of change over time + + Parameters + ---------- + p: xr.DataArray + Forecast data array + gt: xr.DataArray + Ground truth data array (not used in calculation, but kept for consistency) + p_next: xr.DataArray + Next forecast step data array + gt_next: xr.DataArray + Next ground truth step data array (not used in calculation, but kept for consistency) + Returns + ------- + xr.DataArray + Forecast rate of change over time + """ + if self._agg_dims is None: + raise ValueError( + "Cannot calculate rate of change without aggregation dimensions (agg_dims=None)." + ) + + froct = self.calc_change_rate(p, p_next) + + froct = self._mean(froct) + + return froct + + def calc_troct( + self, + p: xr.DataArray, + gt: xr.DataArray, + gt_next: xr.DataArray, + p_next: xr.DataArray, + ): + """ + Calculate target rate of change over time + + Parameters + ---------- + p: xr.DataArray + Forecast data array (not used in calculation, but kept for consistency) + gt: xr.DataArray + Ground truth data array + p_next: xr.DataArray + Next forecast step data array (not used in calculation, but kept for consistency) + gt_next: xr.DataArray + Next ground truth step data array + Returns + ------- + xr.DataArray + Target rate of change over time + """ + if self._agg_dims is None: + raise ValueError( + "Cannot calculate rate of change without aggregation dimensions (agg_dims=None)." + ) + + troct = self.calc_change_rate(gt, gt_next) + troct = self._mean(troct) + + return troct + + def _calc_act( + self, + x: xr.DataArray, + c: xr.DataArray, + ): + """ + Calculate activity metric as standard deviation of forecast or target anomaly. + + NOTE: + The climatlogical mean data clim_mean must fit to the forecast and ground truth data. + + Parameters + ---------- + x: xr.DataArray + Forecast or target data array + c: xr.DataArray + Climatological mean data array, which is used to calculate anomalies + """ + + if c is None: + return xr.full_like(x.sum(self._agg_dims), np.nan) + + # Calculate anomalies + ano = x - c + act = ano.std(dim=self._agg_dims) + + return act + + def calc_fact( + self, + p: xr.DataArray, + c: xr.DataArray, + ): + """ + Calculate forecast activity metric as standard deviation of forecast anomaly. + + NOTE: + The climatlogical mean data clim_mean must fit to the forecast data. + + Parameters + ---------- + p: xr.DataArray + Forecast data array + c: xr.DataArray + Climatological mean data array, which is used to calculate anomalies + """ + + return self._calc_act(p, c) + + def calc_tact( + self, + gt: xr.DataArray, + c: xr.DataArray, + ): + """ + Calculate target activity metric as standard deviation of target anomaly. + + NOTE: + The climatlogical mean data clim_mean must fit to the target data. + + Parameters + ---------- + gt: xr.DataArray + Target data array + c: xr.DataArray + Climatological mean data array, which is used to calculate anomalies + """ + + return self._calc_act(gt, c) + + def calc_acc( + self, + p: xr.DataArray, + gt: xr.DataArray, + c: xr.DataArray, + ) -> xr.DataArray: + """ + Calculate anomaly correlation coefficient (ACC). + + NOTE: + The climatlogical mean data clim_mean must fit to the forecast and ground truth data. + By definition, the ACC is always aggregated over the spatial dimensions. + + Parameters + ---------- + p: xr.DataArray + Forecast data array + gt: xr.DataArray + Ground truth data array + c: xr.DataArray + Climatological mean data array, which is used to calculate anomalies + + Returns + ------- + xr.DataArray + Anomaly correlation coefficient (ACC) + """ + + if c is None: + return xr.full_like(p.sum(self._agg_dims), np.nan) + + # Calculate anomalies + fcst_ano, obs_ano = p - c, gt - c + + # Calculate ACC over spatial dimensions (no grouping) + acc = (fcst_ano * obs_ano).sum(self._agg_dims) / np.sqrt( + (fcst_ano**2).sum(self._agg_dims) * (obs_ano**2).sum(self._agg_dims) + ) + + return acc + + def calc_bias(self, p: xr.DataArray, gt: xr.DataArray) -> xr.DataArray: + """ + Calculate mean bias of forecast data w.r.t. reference data + + Parameters + ---------- + p: xr.DataArray + Forecast data array + gt: xr.DataArray + Ground truth data array + Returns + ------- + xr.DataArray + Mean bias + """ + bias = self._mean(p - gt) + + return bias + + def calc_psnr( + self, + p: xr.DataArray, + gt: xr.DataArray, + pixel_max: float = 1.0, + ) -> xr.DataArray: + """ + Calculate PSNR of forecast data w.r.t. reference data + + Parameters + ---------- + p: xr.DataArray + Forecast data array + gt: xr.DataArray + Ground truth data array + pixel_max: float + Maximum pixel value in the data. Default is 1.0. + Returns + ------- + xr.DataArray + Peak signal-to-noise ratio (PSNR) + """ + + mse = self.calc_mse(p, gt) + if np.count_nonzero(mse) == 0: + psnr = mse + psnr[...] = 100.0 + else: + psnr = 20.0 * np.log10(pixel_max / np.sqrt(mse)) + + return psnr + + def calc_spatial_variability( + self, + p: xr.DataArray, + gt: xr.DataArray, + order: int = 1, + non_spatial_avg_dims: list[str] = None, + ) -> xr.DataArray: + """ + Calculates the ratio between the spatial variability of differental operator + with order 1 (higher values unsupported yet) forecast and ground truth data using + the calc_geo_spatial-method. + + NOTE: + Requires that data is provided on a regular lat/lon-grid! + + Parameters + ---------- + p: xr.DataArray + Forecast data array + gt: xr.DataArray + Ground truth data array + order: int + Order of the spatial differential operator to be applied. Supported orders: 1 + non_spatial_avg_dims: List[str] + List of dimensions over which the spatial variability ratio should be averaged. + It must be non-spatial dimensions, i.e. not latitude or longitude. + Returns + ------- + xr.DataArray + Ratio of spatial variability between forecast and ground truth data + """ + + fcst_grad = self.calc_geo_spatial_diff(p, order=order) + ref_grd = self.calc_geo_spatial_diff(gt, order=order) + + ratio_spat_variability = fcst_grad / ref_grd + + if non_spatial_avg_dims is not None: + ratio_spat_variability = ratio_spat_variability.mean(dim=non_spatial_avg_dims) + + return ratio_spat_variability + + def calc_seeps( + self, + p: xr.DataArray, + gt: xr.DataArray, + seeps_weights: xr.DataArray, + t1: xr.DataArray, + t3: xr.DataArray, + spatial_dims: list, + ) -> xr.DataArray: + """ + Calculates stable equitable error in probabiliyt space (SEEPS), see Rodwell et al., 2011 + + NOTE: + Threshold arrays t1 and t3 (derived from space-time dependant climatology) + must fit to the forecast and ground truth data. + + Parameters + ---------- + p: xr.DataArray + Forecast data array + gt: xr.DataArray + Ground truth data array + seeps_weights: xr.DataArray + SEEPS-parameter matrix to weight contingency table elements + t1: xr.DataArray + Threshold for light precipitation events + t3: xr.DataArray + Threshold for strong precipitation events + spatial_dims: List[str] + List of spatial dimensions of the data, e.g. ["lat", "lon"] + Returns + ------- + xr.DataArray + SEEPS skill score (i.e. 1-SEEPS) + """ + + def seeps(ground_truth, prediction, thr_light, thr_heavy, seeps_weights): + ob_ind = (ground_truth > thr_light).astype(int) + (ground_truth >= thr_heavy).astype( + int + ) + fc_ind = (prediction > thr_light).astype(int) + (prediction >= thr_heavy).astype(int) + indices = fc_ind * 3 + ob_ind # index of each data point in their local 3x3 matrices + seeps_val = seeps_weights[ + indices, np.arange(len(indices)) + ] # pick the right weight for each data point + + return 1.0 - seeps_val + + if p.ndim == 3: + assert len(spatial_dims) == 2, ( + "Provide two spatial dimensions for three-dimensional data." + ) + prediction, ground_truth = ( + p.stack({"xy": spatial_dims}), + gt.stack({"xy": spatial_dims}), + ) + seeps_weights = seeps_weights.stack({"xy": spatial_dims}) + t3 = t3.stack({"xy": spatial_dims}) + lstack = True + elif self.prediction.ndim == 2: + prediction, ground_truth = p, gt + lstack = False + else: + raise ValueError("Data must be a two-or-three-dimensional array.") + + # check dimensioning of data + assert prediction.ndim <= 2, ( + f"Data must be one- or two-dimensional, but has {prediction.ndim} dimensions. " + "Check if stacking with spatial_dims may help." + ) + + if prediction.ndim == 1: + seeps_values_all = seeps(ground_truth, prediction, t1.values, t3, seeps_weights) + else: + prediction, ground_truth = ( + prediction.transpose(..., "xy"), + ground_truth.transpose(..., "xy"), + ) + seeps_values_all = xr.full_like(prediction, np.nan) + seeps_values_all.name = "seeps" + for it in range(ground_truth.shape[0]): + prediction_now, ground_truth_now = ( + prediction[it, ...], + ground_truth[it, ...], + ) + # in case of missing data, skip computation + if np.all(np.isnan(prediction_now)) or np.all(np.isnan(ground_truth_now)): + continue + + seeps_values_all[it, ...] = seeps( + ground_truth_now, + prediction_now, + t1.values, + t3, + seeps_weights.values, + ) + + if lstack: + seeps_values_all = seeps_values_all.unstack() + + if self._agg_dims is not None: + seeps_values = self._mean(seeps_values_all) + else: + seeps_values = seeps_values_all + + return seeps_values + + ### Probablistic scores + + def calc_spread(self, p: xr.DataArray, **kwargs) -> xr.DataArray: + """ + Calculate the spread of the forecast ensemble + Parameters + ---------- + p: xr.DataArray + Forecast data array with ensemble dimension + + Returns + ------- + xr.DataArray + Spread of the forecast ensemble + """ + ens_std = p.std(dim=self._ens_dim) + + return self._mean(np.sqrt(ens_std**2)) + + def calc_ssr(self, p: xr.DataArray, gt: xr.DataArray) -> xr.DataArray: + """ + Calculate the Spread-Skill Ratio (SSR) of the forecast ensemble data w.r.t. reference data + + Parameters + ---------- + p: xr.DataArray + Forecast data array with ensemble dimension + gt: xr.DataArray + Ground truth data array + Returns + ------- + xr.DataArray + Spread-Skill Ratio (SSR) + """ + ssr = self.calc_spread(p) / self.calc_rmse(p, gt) # spread/rmse + + return ssr + + def calc_crps( + self, + p: xr.DataArray, + gt: xr.DataArray, + method: str = "ensemble", + **kwargs, + ) -> xr.DataArray: + """ + Wrapper around CRPS-methods provided by xskillscore-package. + See https://xskillscore.readthedocs.io/en/stable/api + + Parameters + ---------- + p: xr.DataArray + Forecast data array with ensemble dimension + gt: xr.DataArray + Ground truth data array + method: str + Method to calculate CRPS. Supported methods: ["ensemble", "gaussian"] + kwargs: dict + Other keyword parameters supported by respective CRPS-method from + the xskillscore package + + Returns + ------- + xr.DataArray + CRPS score data array averaged over the provided dimensions + """ + crps_methods = ["ensemble", "gaussian"] + + if method == "ensemble": + func_kwargs = { + "forecasts": p, + "member_dim": self._ens_dim, + "dim": self._agg_dims, + **kwargs, + } + crps_func = xskillscore.crps_ensemble + elif method == "gaussian": + func_kwargs = { + "mu": p.mean(dim=self._ens_dim), + "sig": p.std(dim=self._ens_dim), + "dim": self._agg_dims, + **kwargs, + } + crps_func = xskillscore.crps_gaussian + else: + raise ValueError( + f"Unsupported CRPS-calculation method {method} chosen." + + f"Supported methods: {', '.join(crps_methods)}" + ) + + crps = crps_func(gt, **func_kwargs) + + return crps + + def calc_rank_histogram( + self, + p: xr.DataArray, + gt: xr.DataArray, + norm: bool = True, + add_noise: bool = True, + noise_fac=1.0e-03, + ) -> xr.DataArray: + """ + Calculate the rank histogram of the forecast data w.r.t. reference data. + + Parameters + ---------- + p: xr.DataArray + Forecast data array with ensemble dimension + gt: xr.DataArray + Ground truth data array + norm: bool + Flag if normalized counts should be returned. If True, the rank histogram will be + normalized by the number of ensemble members in the forecast data. + add_noise: bool + Flag if a small amount of random noise should be added to the data to avoid ties in the + rank histogram. + This is recommended for fair computations, cf. Sec. 4.2.2 in Harris et al. 2022 + noise_fac: float + Magnitude of random noise to be added to the data if add_noise is True. + Default is 1.0e-03. This value is only relevant if add_noise is True + + Returns + ------- + xr.DataArray + Rank histogram data array averaged over the provided dimensions + """ + + # unstack stacked time-dimension beforehand if required (time may be stacked for forecast + # data) + ground_truth = gt + if "time" in ground_truth.indexes: + if isinstance(ground_truth.indexes["time"], pd.MultiIndex): + ground_truth = ground_truth.reset_index("time") + + prediction = p + if "time" in prediction.indexes: + if isinstance(prediction.indexes["time"], pd.MultiIndex): + prediction = prediction.reset_index("time") + + # perform the stacking + obs_stacked = ground_truth.stack({"npoints": self._agg_dims}) + fcst_stacked = prediction.stack({"npoints": self._agg_dims}) + + # add noise to data if desired + if add_noise: + if obs_stacked.chunks is None and fcst_stacked.chunks is None: + # underlying arrays are numpy arrays -> use numpy's native random generator + rng = np.random.default_rng() + + obs_stacked += rng.random(size=obs_stacked.shape, dtype=np.float32) * noise_fac + fcst_stacked += rng.random(size=fcst_stacked.shape, dtype=np.float32) * noise_fac + else: + # underlying arrays are dask arrays -> use dask's random generator + obs_stacked += ( + da.random.random(size=obs_stacked.shape, chunks=obs_stacked.chunks) * noise_fac + ) + fcst_stacked += ( + da.random.random(size=fcst_stacked.shape, chunks=fcst_stacked.chunks) + * noise_fac + ) + # preserve the other coordinates + preserved_coords = { + c: obs_stacked[c].values + for c in obs_stacked.coords + if all(dim not in {self._ens_dim, "npoints"} for dim in obs_stacked[c].dims) + } + + # calculate ranks for all data points + rank = (obs_stacked >= fcst_stacked).sum(dim=self._ens_dim) + # and count occurence of rank values + rank.name = "rank" # name for xr.DataArray is required for histogram-method + rank_counts = histogram( + rank, + dim=["npoints"], + bins=np.arange(len(fcst_stacked[self._ens_dim]) + 2), + block_size=None if rank.chunks is None else "auto", + ) + + # Reattach preserved coordinates by broadcasting + for coord_name, coord_values in preserved_coords.items(): + # Only keep unique values along npoints if necessary + if coord_name in rank_counts.coords: + continue + rank_counts = rank_counts.assign_coords({coord_name: coord_values}) + + # Reattach preserved coordinates by broadcasting + for coord_name, coord_values in preserved_coords.items(): + # Only keep unique values along npoints if necessary + if coord_name in rank_counts.coords: + continue + rank_counts = rank_counts.assign_coords({coord_name: coord_values}) + + # provide normalized rank counts if desired + if norm: + npoints = len(fcst_stacked["npoints"]) + rank_counts = rank_counts / npoints + + return rank_counts + + def calc_rank_histogram_xskillscore(self, p: xr.DataArray, gt: xr.DataArray) -> xr.DataArray: + """ + Wrapper around rank_histogram-method by xskillscore-package. + See https://xskillscore.readthedocs.io/en/stable/api + Note: this version is found to be very slow. Use calc_rank_histogram alternatively. + Parameters + ---------- + p: xr.DataArray + Forecast data array with ensemble dimension + gt: xr.DataArray + Ground truth data array + Returns + ------- + xr.DataArray + Rank histogram data array averaged over the provided dimensions + """ + rank_hist = xskillscore.rank_histogram(gt, p, member_dim=self._ens_dim, dim=self._agg_dims) + + return rank_hist + + @staticmethod + def calc_geo_spatial_diff( + scalar_field: xr.DataArray, + order: int = 1, + r_e: float = 6371.0e3, + dom_avg: bool = True, + ) -> xr.DataArray: + """ + Calculates the amplitude of the gradient (order=1) or the Laplacian (order=2) + of a scalar field given on a regular, geographical grid + (i.e. dlambda = const. and dphi=const.) + + Parameters + ---------- + scalar_field: xr.DataArray + Scalar field as data array with latitude and longitude as coordinates + order: int + Order of spatial differential operator + r_e: float + Radius of the sphere + dom_avg: bool + Flag whether to return the domain-averaged amplitude or the amplitude at each + grid point + + Returns + ------- + xr.DataArray + the amplitude of the gradient/laplacian at each grid point or over the whole domain + (see dom_avg) + """ + method = Scores.calc_geo_spatial_diff.__name__ + # sanity checks + assert isinstance(scalar_field, xr.DataArray), ( + f"Scalar_field of {method} must be a xarray DataArray." + ) + assert order in [1, 2], f"Order for {method} must be either 1 or 2." + + dims = list(scalar_field.dims) + lat_dims = ["rlat", "lat", "latitude"] + lon_dims = ["rlon", "lon", "longitude"] + + def check_for_coords(coord_names_data, coord_names_expected): + try: + _ = coord_names_expected.index() + except ValueError as e: + expected_names = ",".join(coord_names_expected) + raise ValueError( + "Could not find one of the following coordinates in the" + + f"passed dictionary: {expected_names}" + ) from e + + _, lat_name = check_for_coords(dims, lat_dims) + _, lon_name = check_for_coords(dims, lon_dims) + + lat, lon = ( + np.deg2rad(scalar_field[lat_name]), + np.deg2rad(scalar_field[lon_name]), + ) + dphi, dlambda = lat[1].values - lat[0].values, lon[1].values - lon[0].values + + if order == 1: + dvar_dlambda = ( + 1.0 / (r_e * np.cos(lat) * dlambda) * scalar_field.differentiate(lon_name) + ) + dvar_dphi = 1.0 / (r_e * dphi) * scalar_field.differentiate(lat_name) + dvar_dlambda = dvar_dlambda.transpose( + *scalar_field.dims + ) # ensure that dimension ordering is not changed + + var_diff_amplitude = np.sqrt(dvar_dlambda**2 + dvar_dphi**2) + if dom_avg: + var_diff_amplitude = var_diff_amplitude.mean(dim=[lat_name, lon_name]) + else: + raise ValueError(f"Second-order differentation is not implemenetd in {method} yet.") + + return var_diff_amplitude diff --git a/packages/evaluate/src/weathergen/evaluate/score_utils.py b/packages/evaluate/src/weathergen/evaluate/score_utils.py new file mode 100644 index 000000000..a6339d009 --- /dev/null +++ b/packages/evaluate/src/weathergen/evaluate/score_utils.py @@ -0,0 +1,134 @@ +# (C) Copyright 2025 WeatherGenerator contributors. +# +# This software is licensed under the terms of the Apache Licence Version 2.0 +# which can be obtained at http://www.apache.org/licenses/LICENSE-2.0. +# +# In applying this licence, ECMWF does not waive the privileges and immunities +# granted to it by virtue of its status as an intergovernmental organisation +# nor does it submit to any jurisdiction. + +import logging +from dataclasses import dataclass +from typing import Any, ClassVar + +import xarray as xr +from omegaconf.listconfig import ListConfig + +_logger = logging.getLogger(__name__) +_logger.setLevel(logging.INFO) + + +def to_list(obj: Any) -> list: + """ + Convert given object to list if obj is not already a list. Sets are also transformed to a list. + + Parameters + ---------- + obj : Any + The object to transform into a list. + Returns + ------- + list + A list containing the object, or the object itself if it was already a list. + """ + if isinstance(obj, set | tuple | ListConfig): + obj = list(obj) + elif not isinstance(obj, list): + obj = [obj] + return obj + + +class RegionLibrary: + """ + Predefined bounding boxes for known regions. + """ + + REGIONS: ClassVar[dict[str, tuple[float, float, float, float]]] = { + "global": (-90.0, 90.0, -180.0, 180.0), + "nhem": (0.0, 90.0, -180.0, 180.0), + "shem": (-90.0, 0.0, -180.0, 180.0), + "tropics": (-30.0, 30.0, -180.0, 180.0), + } + + +@dataclass(frozen=True) +class RegionBoundingBox: + lat_min: float + lat_max: float + lon_min: float + lon_max: float + + def __post_init__(self): + """Validate the bounding box coordinates.""" + self.validate() + + def validate(self): + """Validate the bounding box coordinates.""" + if not (-90 <= self.lat_min <= 90 and -90 <= self.lat_max <= 90): + raise ValueError( + f"Latitude bounds must be between -90 and 90. Got: {self.lat_min}, {self.lat_max}" + ) + if not (-180 <= self.lon_min <= 180 and -180 <= self.lon_max <= 180): + raise ValueError( + "Longitude bounds must be between -180 and 180. " + + f"Got: {self.lon_min}, {self.lon_max}" + ) + if self.lat_min >= self.lat_max: + raise ValueError( + f"Latitude minimum must be less than maximum. Got: {self.lat_min}, {self.lat_max}" + ) + if self.lon_min >= self.lon_max: + raise ValueError( + f"Longitude minimum must be less than maximum. Got: {self.lon_min}, {self.lon_max}" + ) + + def contains(self, lat: float, lon: float) -> bool: + """Check if a lat/lon point is within the bounding box.""" + return (self.lat_min <= lat <= self.lat_max) and (self.lon_min <= lon <= self.lon_max) + + def apply_mask( + self, + data: xr.Dataset | xr.DataArray, + lat_name: str = "lat", + lon_name: str = "lon", + data_dim: str = "ipoint", + ) -> xr.Dataset | xr.DataArray: + """Filter Dataset or DataArray by spatial bounding box on 'ipoint' dimension. + Parameters + ---------- + data : + The data to filter. + lat_name: + Name of the latitude coordinate in the data. + lon_name: + Name of the longitude coordinate in the data. + data_dim: + Name of the dimension that contains the lat/lon coordinates. + + Returns + ------- + Filtered data with only points within the bounding box. + """ + # lat/lon coordinates should be 1D and aligned with ipoint + lat = data[lat_name] + lon = data[lon_name] + + mask = ( + (lat >= self.lat_min) + & (lat <= self.lat_max) + & (lon >= self.lon_min) + & (lon <= self.lon_max) + ) + + return data.sel({data_dim: mask}) + + @classmethod + def from_region_name(cls, region: str) -> "RegionBoundingBox": + region = region.lower() + try: + return cls(*RegionLibrary.REGIONS[region]) + except KeyError as err: + raise ValueError( + f"Region '{region}' is not supported. " + f"Available regions: {', '.join(RegionLibrary.REGIONS.keys())}" + ) from err diff --git a/packages/evaluate/src/weathergen/evaluate/utils.py b/packages/evaluate/src/weathergen/evaluate/utils.py new file mode 100644 index 000000000..09ff2e9b7 --- /dev/null +++ b/packages/evaluate/src/weathergen/evaluate/utils.py @@ -0,0 +1,647 @@ +# (C) Copyright 2025 WeatherGenerator contributors. +# +# This software is licensed under the terms of the Apache Licence Version 2.0 +# which can be obtained at http://www.apache.org/licenses/LICENSE-2.0. +# +# In applying this licence, ECMWF does not waive the privileges and immunities +# granted to it by virtue of its status as an intergovernmental organisation +# nor does it submit to any jurisdiction. + +import json +import logging +from pathlib import Path + +import numpy as np +import omegaconf as oc +import xarray as xr +from tqdm import tqdm + +from weathergen.evaluate.clim_utils import get_climatology +from weathergen.evaluate.io_reader import Reader +from weathergen.evaluate.plot_utils import ( + bar_plot_metric_region, + plot_metric_region, + score_card_metric_region, +) +from weathergen.evaluate.plotter import BarPlots, LinePlots, Plotter, ScoreCards +from weathergen.evaluate.score import VerifiedData, get_score + +_logger = logging.getLogger(__name__) +_logger.setLevel(logging.INFO) + + +def get_next_data(fstep, da_preds, da_tars, fsteps): + """ + Get the next forecast step data for the given forecast step. + """ + fstep_idx = fsteps.index(fstep) + # Get the next forecast step + next_fstep = fsteps[fstep_idx + 1] if fstep_idx + 1 < len(fsteps) else None + if next_fstep is not None: + preds_next = da_preds.get(next_fstep, None) + tars_next = da_tars.get(next_fstep, None) + else: + preds_next = None + tars_next = None + + return preds_next, tars_next + + +def calc_scores_per_stream( + reader: Reader, + stream: str, + region: str, + metrics: list[str], + plot_score_maps: bool = False, +) -> tuple[xr.DataArray, xr.DataArray]: + """ + Calculate scores for a given run and stream using the specified metrics. + + Parameters + ---------- + reader : Reader + Reader object containing all info about a particular run. + stream : + Stream name to calculate scores for. + region : + Region name to calculate scores for. + metrics : + List of metric names to calculate. + plot_score_maps : + When it is True and the stream is on a regular grid the scores are + recomputed as a function of the "ipoint" and plotted on a 2D scatter map. + NOTE: the scores are averaged over the "sample" dimension and for most + of the metrics this does not give the same results as averaging over + the "ipoint" dimension. + Returns + ------- + Tuple of xarray DataArray containing the scores and the number of points per sample. + """ + + _logger.info(f"RUN {reader.run_id} - {stream}: Calculating scores for metrics {metrics}...") + if plot_score_maps: + _logger.info(f"RUN {reader.run_id} - {stream}: Plotting scores is enabled.") + + map_dir = reader.runplot_dir / "plots" / stream / "score_maps" + map_dir.mkdir(parents=True, exist_ok=True) + + _logger.info(f"RUN {reader.run_id} - {stream}: Saving plotted scores to {map_dir}") + + available_data = reader.check_availability(stream, mode="evaluation") + + fsteps = available_data.fsteps + samples = available_data.samples + channels = available_data.channels + ensemble = available_data.ensemble + is_regular = reader.is_regular(stream) + group_by_coord = None if is_regular else "sample" + + output_data = reader.get_data( + stream, + region=region, + fsteps=fsteps, + samples=samples, + channels=channels, + ensemble=ensemble, + return_counts=True, + ) + + da_preds = output_data.prediction + da_tars = output_data.target + points_per_sample = output_data.points_per_sample + + aligned_clim_data = get_climatology(reader, da_tars, stream) + + metric_stream = xr.DataArray( + np.full( + (len(samples), len(fsteps), len(channels), len(metrics), len(ensemble)), + np.nan, + ), + coords={ + "sample": samples, + "forecast_step": fsteps, + "channel": channels, + "metric": metrics, + "ens": ensemble, + }, + ) + + for (fstep, tars), (_, preds) in zip(da_tars.items(), da_preds.items(), strict=False): + if preds.ipoint.size == 0: + _logger.warning( + f"No data for stream {stream} at fstep {fstep} in region {region}. Skipping." + ) + continue + + _logger.debug(f"Verifying data for stream {stream}...") + + preds_next, tars_next = get_next_data(fstep, da_preds, da_tars, fsteps) + + climatology = aligned_clim_data[fstep] if aligned_clim_data else None + score_data = VerifiedData(preds, tars, preds_next, tars_next, climatology) + # Build up computation graphs for all metrics + _logger.debug(f"Build computation graphs for metrics for stream {stream}...") + + # Add it only if it is not None + valid_scores = [] + for metric in metrics: + score = get_score( + score_data, + metric, + agg_dims="ipoint", + group_by_coord=group_by_coord, + ) + if score is not None: + valid_scores.append(score) + + # Keep only metrics corresponding to valid_scores + valid_metric_names = [ + metric + for metric, score in zip(metrics, valid_scores, strict=False) + if score is not None + ] + + combined_metrics = xr.concat(valid_scores, dim="metric") + combined_metrics = combined_metrics.assign_coords(metric=valid_metric_names) + + _logger.debug(f"Running computation of metrics for stream {stream}...") + combined_metrics = combined_metrics.compute() + + for coord in ["channel", "sample", "ens"]: + combined_metrics = scalar_coord_to_dim(combined_metrics, coord) + + assert int(combined_metrics.forecast_step) == int(fstep), ( + "Different steps in data and metrics. Please check." + ) + + criteria = { + "forecast_step": int(combined_metrics.forecast_step), + "sample": combined_metrics.sample, + "channel": combined_metrics.channel, + "metric": combined_metrics.metric, + } + + if "ens" in combined_metrics.dims: + criteria["ens"] = combined_metrics.ens + metric_stream.loc[criteria] = combined_metrics + + ######### + + if is_regular and plot_score_maps: + _logger.info(f"Plotting scores on a map {stream} - forecast step: {fstep}...") + _plot_score_maps_per_stream(reader, map_dir, stream, region, score_data, metrics, fstep) + + _logger.info(f"Scores for run {reader.run_id} - {stream} calculated successfully.") + + return metric_stream, points_per_sample + + +def _plot_score_maps_per_stream( + reader: Reader, + map_dir: str, + stream: str, + region: str, + score_data: VerifiedData, + metrics: list[str], + fstep: int, +) -> None: + """Plot 2D score maps for all metrics and channels. + Parameters + ---------- + reader: Reader + Reader object containing all infos about the run + map_dir: str + Directory where the plots are saved. + stream: str + Stream name to plot score maps for. + region : + Region name to plot score maps for. + score_data: VerifiedData + prediction and target stored in the data class. + metrics: str + List of all metrics to plot. + fstep: + forecast step to plot. + + Return + ------ + None + """ + + cfg = reader.global_plotting_options + + # TODO: add support for climatology-dependent metrics as well + + plotter = Plotter( + { + "image_format": cfg.get("image_format", "png"), + "dpi_val": cfg.get("dpi_val", 300), + "fig_size": cfg.get("fig_size", (8, 10)), + }, + reader.runplot_dir, + stream, + ) + + preds = score_data.prediction + + plot_metrics = xr.concat( + [get_score(score_data, m, agg_dims="sample") for m in metrics], dim="metric" + ) + + plot_metrics = plot_metrics.assign_coords( + lat=preds.lat.reset_coords(drop=True), + lon=preds.lon.reset_coords(drop=True), + metric=metrics, + ).compute() + + if "ens" in preds.dims: + plot_metrics["ens"] = preds.ens + + has_ens = "ens" in plot_metrics.coords + ens_values = plot_metrics.coords["ens"].values if has_ens else [None] + + for metric in plot_metrics.coords["metric"].values: + for ens_val in tqdm(ens_values, f"Plotting metric - {metric}"): + tag = f"score_maps_{region}_{metric}_fstep_{fstep}" + ( + f"_ens_{ens_val}" if ens_val is not None else "" + ) + for channel in plot_metrics.coords["channel"].values: + sel = {"metric": metric, "channel": channel} + if ens_val is not None: + sel["ens"] = ens_val + + data = plot_metrics.sel(**sel).squeeze() + title = f"{metric} - {channel}: fstep {fstep}" + ( + f", ens {ens_val}" if ens_val is not None else "" + ) + plotter.scatter_plot(data, map_dir, channel, tag=tag, title=title) + + +def plot_data(reader: Reader, stream: str, global_plotting_opts: dict) -> None: + """ + Plot the data for a given run and stream. + + Parameters + ---------- + reader: Reader + Reader object containing all infos about the run + stream: str + Stream name to plot data for. + global_plotting_opts: dict + Dictionary containing all plotting options that apply globally to all run_ids + """ + run_id = reader.run_id + + # get stream dict from evaluation config (assumed to be part of cfg at this point) + stream_cfg = reader.get_stream(stream) + + # handle plotting settings + plot_settings = stream_cfg.get("plotting", {}) + + # return early if no plotting is requested + if not ( + plot_settings + and ( + plot_settings.get("plot_maps", False) + or plot_settings.get("plot_histograms", False) + or plot_settings.get("plot_animations", False) + ) + ): + return + + plotter_cfg = { + "image_format": global_plotting_opts.get("image_format", "png"), + "dpi_val": global_plotting_opts.get("dpi_val", 300), + "fig_size": global_plotting_opts.get("fig_size", (8, 10)), + "fps": global_plotting_opts.get("fps", 2), + "plot_subtimesteps": reader.get_inference_stream_attr(stream, "tokenize_spacetime", False), + } + + plotter = Plotter(plotter_cfg, reader.runplot_dir) + + available_data = reader.check_availability(stream, mode="plotting") + + # Check if maps should be plotted and handle configuration if provided + plot_maps = plot_settings.get("plot_maps", False) + if not isinstance(plot_maps, bool): + raise TypeError("plot_maps must be a boolean.") + + plot_target = plot_settings.get("plot_target", True) + if not isinstance(plot_target, bool): + raise TypeError("plot_target must be a boolean.") + + # Check if histograms should be plotted + plot_histograms = plot_settings.get("plot_histograms", False) + if not isinstance(plot_histograms, bool): + raise TypeError("plot_histograms must be a boolean.") + + plot_animations = plot_settings.get("plot_animations", False) + if not isinstance(plot_animations, bool): + raise TypeError("plot_animations must be a boolean.") + + model_output = reader.get_data( + stream, + samples=available_data.samples, + fsteps=available_data.fsteps, + channels=available_data.channels, + ensemble=available_data.ensemble, + ) + + da_tars = model_output.target + da_preds = model_output.prediction + + if not da_tars: + _logger.info(f"Skipping Plot Data for {stream}. Targets are empty.") + return + + # get common ranges across all run_ids + if not isinstance(global_plotting_opts.get(stream), oc.DictConfig): + global_plotting_opts[stream] = oc.DictConfig({}) + maps_config = common_ranges( + da_tars, da_preds, available_data.channels, global_plotting_opts[stream] + ) + + for (fstep, tars), (_, preds) in zip(da_tars.items(), da_preds.items(), strict=False): + plot_chs = list(np.atleast_1d(tars.channel.values)) + plot_samples = list(np.unique(tars.sample.values)) + + for sample in tqdm(plot_samples, desc=f"Plotting {run_id} - {stream} - fstep {fstep}"): + data_selection = { + "sample": sample, + "stream": stream, + "forecast_step": fstep, + } + + if plot_maps: + if plot_target: + plotter.create_maps_per_sample( + tars, plot_chs, data_selection, "targets", maps_config + ) + for ens in available_data.ensemble: + preds_ens = ( + preds.sel(ens=ens) if "ens" in preds.dims and ens != "mean" else preds + ) + preds_tag = "" if "ens" not in preds.dims else f"ens_{ens}" + preds_name = "_".join( + filter(None, ["preds", preds_tag]) + ) # avoid trailing underscore + + plotter.create_maps_per_sample( + preds_ens, plot_chs, data_selection, preds_name, maps_config + ) + + if plot_histograms: + plotter.create_histograms_per_sample( + tars, preds_ens, plot_chs, data_selection, preds_tag + ) + + plotter = plotter.clean_data_selection() + + if plot_animations: + plot_fsteps = da_tars.keys() + for ens in available_data.ensemble: + preds_name = "preds" if "ens" not in preds.dims else f"preds_ens_{ens}" + plotter.animation(plot_samples, plot_fsteps, plot_chs, data_selection, preds_name) + if plot_target: + plotter.animation(plot_samples, plot_fsteps, plot_chs, data_selection, "targets") + + return + + +def metric_list_to_json( + reader: Reader, + metrics_list: list[xr.DataArray], + npoints_sample_list: list[xr.DataArray], + streams: list[str], + region: str, +): + """ + Write the evaluation results collected in a list of xarray DataArrays for the metrics + to stream- and metric-specific JSON files. + + Parameters + ---------- + reader: + Reader object containing all info about the run_id. + metrics_list : + Metrics per stream. + npoints_sample_list : + Number of points per sample per stream. + streams : + Stream names. + region : + Region name. + metric_dir : + Output directory. + run_id : + Identifier of the inference run. + mini_epoch : + Mini_epoch number. + """ + assert len(metrics_list) == len(npoints_sample_list) == len(streams), ( + "The lengths of metrics_list, npoints_sample_list, and streams must be the same." + ) + + reader.metrics_dir.mkdir(parents=True, exist_ok=True) + + for s_idx, stream in enumerate(streams): + metrics_stream, npoints_sample_stream = ( + metrics_list[s_idx], + npoints_sample_list[s_idx], + ) + + for metric in metrics_stream.coords["metric"].values: + metric_now = metrics_stream.sel(metric=metric) + + # Save as individual DataArray, not Dataset + metric_now.attrs["npoints_per_sample"] = npoints_sample_stream.values.tolist() + metric_dict = metric_now.to_dict() + + # Match the expected filename pattern + save_path = ( + reader.metrics_dir + / f"{reader.run_id}_{stream}_{region}_{metric}_chkpt{reader.mini_epoch:05d}.json" + ) + + _logger.info(f"Saving results to {save_path}") + with open(save_path, "w") as f: + json.dump(metric_dict, f, indent=4) + + _logger.info( + f"Saved all results of inference run {reader.run_id} - mini_epoch {reader.mini_epoch:d} " + f"successfully to {reader.metrics_dir}." + ) + + +def plot_summary(cfg: dict, scores_dict: dict, summary_dir: Path): + """ + Plot summary of the evaluation results. + This function is a placeholder for future implementation. + + Parameters + ---------- + cfg : + Configuration dictionary containing all information for the evaluation. + scores_dict : + Dictionary containing scores for each metric and stream. + """ + _logger.info("Plotting summary of evaluation results...") + + runs = cfg.run_ids + metrics = cfg.evaluation.metrics + print_summary = cfg.evaluation.get("print_summary", False) + regions = cfg.evaluation.get("regions", ["global"]) + plt_opt = cfg.get("global_plotting_options", {}) + eval_opt = cfg.get("evaluation", {}) + + plot_cfg = { + "image_format": plt_opt.get("image_format", "png"), + "dpi_val": plt_opt.get("dpi_val", 300), + "fig_size": plt_opt.get("fig_size", (8, 10)), + "log_scale": eval_opt.get("log_scale", False), + "add_grid": eval_opt.get("add_grid", False), + "plot_ensemble": eval_opt.get("plot_ensemble", False), + } + + plotter = LinePlots(plot_cfg, summary_dir) + sc_plotter = ScoreCards(plot_cfg, summary_dir) + br_plotter = BarPlots(plot_cfg, summary_dir) + for region in regions: + for metric in metrics: + plot_metric_region(metric, region, runs, scores_dict, plotter, print_summary) + if eval_opt.get("score_cards", False): + score_card_metric_region(metric, region, runs, scores_dict, sc_plotter) + if eval_opt.get("bar_plots", False): + bar_plot_metric_region(metric, region, runs, scores_dict, br_plotter) + + +############# Utility functions ############ + + +def common_ranges( + data_tars: list[dict], + data_preds: list[dict], + plot_chs: list[str], + maps_config: oc.dictconfig.DictConfig, +) -> oc.dictconfig.DictConfig: + """ + Calculate common ranges per stream and variables. + + Parameters + ---------- + data_tars : + the (target) list of dictionaries with the forecasteps and respective xarray + data_preds : + the (prediction) list of dictionaries with the forecasteps and respective xarray + plot_chs: + the variables to be plotted as given by the configuration file + maps_config: + the global plotting configuration + Returns + ------- + maps_config : + the global plotting configuration with the ranges added and included for each variable (and + for each stream). + """ + for var in plot_chs: + if var in maps_config: + if not isinstance(maps_config[var].get("vmax"), (int | float)): + list_max = calc_bounds(data_tars, data_preds, var, "max") + list_max = np.concatenate([arr.flatten() for arr in list_max]).tolist() + maps_config[var].update({"vmax": float(max(list_max))}) + + if not isinstance(maps_config[var].get("vmin"), (int | float)): + list_min = calc_bounds(data_tars, data_preds, var, "min") + list_min = np.concatenate([arr.flatten() for arr in list_min]).tolist() + maps_config[var].update({"vmin": float(min(list_min))}) + + else: + list_max = calc_bounds(data_tars, data_preds, var, "max") + list_max = np.concatenate([arr.flatten() for arr in list_max]).tolist() + list_min = calc_bounds(data_tars, data_preds, var, "min") + list_min = np.concatenate([arr.flatten() for arr in list_min]).tolist() + + maps_config.update({var: {"vmax": float(max(list_max)), "vmin": float(min(list_min))}}) + + return maps_config + + +def calc_val(x: xr.DataArray, bound: str) -> list[float]: + """ + Calculate the maximum or minimum value per variable for all forecasteps. + Parameters + ---------- + x : + the xarray DataArray with the forecasteps and respective values + bound : + the bound to be calculated, either "max" or "min" + Returns + ------- + a list with the maximum or minimum values for a specific variable. + """ + if bound == "max": + return x.max(dim=("ipoint")).values + elif bound == "min": + return x.min(dim=("ipoint")).values + else: + raise ValueError("bound must be either 'max' or 'min'") + + +def calc_bounds( + data_tars, + data_preds, + var, + bound, +): + """ + Calculate the minimum and maximum values per variable for all forecasteps for both targets and + predictions + + Parameters + ---------- + data_tars : + the (target) list of dictionaries with the forecasteps and respective xarray + data_preds : + the (prediction) list of dictionaries with the forecasteps and respective xarray + Returns + ------- + list_bound : + a list with the maximum or minimum values for a specific variable. + """ + list_bound = [] + for da_tars, da_preds in zip(data_tars.values(), data_preds.values(), strict=False): + list_bound.extend( + ( + calc_val(da_tars.where(da_tars.channel == var, drop=True), bound), + calc_val(da_preds.where(da_preds.channel == var, drop=True), bound), + ) + ) + + return list_bound + + +def scalar_coord_to_dim(da: xr.DataArray, name: str, axis: int = -1) -> xr.DataArray: + """ + Convert a scalar coordinate to a dimension in an xarray DataArray. + If the coordinate is already a dimension, it is returned unchanged. + + Parameters + ---------- + da : xarray.DataArray + The DataArray to modify. + name : str + The name of the coordinate to convert. + axis : int, optional + The axis along which to expand the dimension. Default is -1 (last axis). + Returns + ------- + xarray.DataArray + The modified DataArray with the scalar coordinate converted to a dimension. + """ + if name in da.dims: + return da # already a dimension + if name in da.coords and da.coords[name].ndim == 0: + val = da.coords[name].item() + da = da.drop_vars(name) + da = da.expand_dims({name: [val]}, axis=axis) + return da diff --git a/packages/metrics/pyproject.toml b/packages/metrics/pyproject.toml new file mode 100644 index 000000000..ba54aa4a1 --- /dev/null +++ b/packages/metrics/pyproject.toml @@ -0,0 +1,102 @@ +[project] +name = "weathergen-metrics" +version = "0.1.0" +description = "The WeatherGenerator Machine Learning Earth System Model" +readme = "../../README.md" +requires-python = ">=3.12,<3.13" +dependencies = [ + "mlflow-skinny", + "weathergen-common", +] + +[dependency-groups] +dev = [ + "pytest~=8.3.5", + "pytest-mock>=3.14.1", + "ruff==0.9.7", + "pyrefly==0.36.0", +] + + +[tool.pyrefly] +project-includes = ["src/"] +project-excludes = [ +] + +[tool.pyrefly.errors] +bad-argument-type = false +unsupported-operation = false +missing-attribute = false +no-matching-overload = false +bad-context-manager = false + +# To do: +bad-assignment = false +bad-return = false +index-error = false +not-iterable = false +not-callable = false + + + + +# The linting configuration +[tool.ruff] + +# Wide rows +line-length = 100 + +[tool.ruff.lint] +# All disabled until the code is formatted. +select = [ + # pycodestyle + "E", + # Pyflakes + "F", + # pyupgrade + "UP", + # flake8-bugbear + "B", + # flake8-simplify + "SIM", + # isort + "I", + # Banned imports + "TID", + # Naming conventions + "N", + # print + "T201" +] + +# These rules are sensible and should be enabled at a later stage. +ignore = [ + # "B006", + "B011", + "UP008", + "SIM117", + "SIM118", + "SIM102", + "SIM401", + # To ignore, not relevant for us + "SIM108", # in case additional norm layer supports are added in future + "N817", # we use heavy acronyms, e.g., allowing 'import LongModuleName as LMN' (LMN is accepted) + "E731", # overly restrictive and less readable code + "N812", # prevents us following the convention for importing torch.nn.functional as F +] + +[tool.ruff.lint.flake8-tidy-imports.banned-api] +"numpy.ndarray".msg = "Do not use 'ndarray' to describe a numpy array type, it is a function. Use numpy.typing.NDArray or numpy.typing.NDArray[np.float32] for example" + +[tool.ruff.format] +# Use Unix `\n` line endings for all files +line-ending = "lf" + + + +[build-system] +requires = ["hatchling"] +build-backend = "hatchling.build" + +[tool.hatch.build.targets.wheel] +packages = ["src/weathergen"] diff --git a/packages/metrics/src/weathergen/metrics/__init__.py b/packages/metrics/src/weathergen/metrics/__init__.py new file mode 100644 index 000000000..e69de29bb diff --git a/packages/metrics/src/weathergen/metrics/mlflow_utils.py b/packages/metrics/src/weathergen/metrics/mlflow_utils.py new file mode 100644 index 000000000..27a8bec8e --- /dev/null +++ b/packages/metrics/src/weathergen/metrics/mlflow_utils.py @@ -0,0 +1,176 @@ +import logging +import os + +import mlflow +import mlflow.client +import numpy as np +from mlflow.client import MlflowClient +from mlflow.entities.metric import Metric +from mlflow.entities.run import Run +from xarray import DataArray + +from weathergen.common.config import Config +from weathergen.common.platform_env import get_platform_env + +_logger = logging.getLogger(__name__) + +project_name = "WeatherGenerator" +project_lifecycle = "dev" + +_platform_env = get_platform_env() + + +class MlFlowUpload: + tracking_uri = "databricks" + registry_uri = "databricks-uc" + experiment_name = "/Shared/weathergen-dev/core-model/defaultExperiment" + + experiment_tags = { + "project": project_name, + "lifecycle": project_lifecycle, + } + + @classmethod + def run_tags(cls, run_id: str, phase: str, from_run_id: str | None) -> dict[str, str]: + """ + Returns the tags to be set for a run. + """ + dct = { + "lifecycle": project_lifecycle, + "hpc": _platform_env.get_hpc() or "unknown", + "run_id": run_id, + "stage": phase, + "project": project_name, + "uploader": _platform_env.get_hpc_user() or "unknown", + "completion_status": "success", + } + if from_run_id: + dct["from_run_id"] = from_run_id + return dct + + +def log_metrics( + metrics: list[dict[str, float | int]], + mlflow_client: MlflowClient, + mlflow_run_id: str, +): + """ + Logs the metrics to MLFlow. + """ + if not metrics: + return + + # Converts teh metrics to a single batch of metrics object. This limits the IO and DB calls + def _convert_to_mlflow_metric(dct): + # Convert the metric to a mlflow metric + ts = int(dct.get("weathergen.timestamp", 0)) + step = int(dct.get("weathergen.step", 0)) + return [ + Metric(key=k, value=v, timestamp=ts, step=step) + for k, v in dct.items() + if not k.startswith("weathergen.") + ] + + mlflow_metrics = [met for dct in metrics for met in _convert_to_mlflow_metric(dct)] + mlflow_client.log_batch( + run_id=mlflow_run_id, + metrics=mlflow_metrics, + ) + + +def log_scores( + metrics_dict: dict[str, dict[str, dict[str, DataArray]]], + mlflow_client: MlflowClient, + mlflow_run_id: str, + channels_set: list[str], + x_dim="forecast_step", +): + """ + Logs the evaluation scores to MLFlow. + metrics_dict: metric -> region -> stream -> DataArray + """ + + ts = 0 + + mlflow_metrics = [] + for metric, regions_dict in metrics_dict.items(): + for region, streams_dict in regions_dict.items(): + for stream, data in streams_dict.items(): + for ch in channels_set: + # skip if channel is missing or contains NaN + if ch not in np.atleast_1d(data.channel.values) or data.isnull().all(): + _logger.info( + f"Skipping channel {ch} for {metric} - {region} - {stream} ", + "due to missing data.", + ) + continue + _logger.info(f"Collecting data for {metric} - {region} - {stream} - {ch}.") + data_ch = data.sel(channel=ch) + non_zero_dims = [ + dim for dim in data_ch.dims if dim != x_dim and data_ch[dim].shape[0] > 1 + ] + if "ens" in non_zero_dims: + _logger.info("Uploading ensembles not yet imnplemented") + else: + if non_zero_dims: + _logger.info( + f"LinePlot:: Found multiple entries for dimensions: {non_zero_dims}" + + ". Averaging..." + ) + averaged = data_ch.mean( + dim=[dim for dim in data_ch.dims if dim != x_dim], skipna=True + ).sortby(x_dim) + label = f"score.{region}.{metric}.{stream}.{ch}" + + mlflow_metrics.append( + [ + Metric(key=label, value=y, timestamp=ts, step=int(x)) + for x, y in zip( + averaged[x_dim].values, averaged.values, strict=False + ) + ] + ) + + all_metrics = [met for dict in mlflow_metrics for met in dict] + _logger.info(f"Logging total of {len(all_metrics)} metrics to MLFlow.") + mlflow_client.log_batch( + run_id=mlflow_run_id, + metrics=all_metrics, + ) + + +def setup_mlflow(private_config: Config) -> MlflowClient: + os.environ["DATABRICKS_HOST"] = private_config["mlflow"]["tracking_uri"] + os.environ["DATABRICKS_TOKEN"] = private_config["secrets"]["mlflow_token"] + mlflow.set_tracking_uri(MlFlowUpload.tracking_uri) + mlflow.set_registry_uri(MlFlowUpload.registry_uri) + mlflow_client = mlflow.client.MlflowClient( + tracking_uri=MlFlowUpload.tracking_uri, registry_uri=MlFlowUpload.registry_uri + ) + return mlflow_client + + +def get_or_create_mlflow_parent_run(mlflow_client: MlflowClient, run_id: str) -> Run: + exp_name = MlFlowUpload.experiment_name + _logger.info(f"Setting experiment name to {exp_name}: host: {os.environ['DATABRICKS_HOST']}") + exp = mlflow.set_experiment(exp_name) + _logger.info(f"Experiment {exp_name} created with ID {exp.experiment_id}: {exp}") + runs = mlflow_client.search_runs( + experiment_ids=[exp.experiment_id], + filter_string=f"tags.run_id='{run_id}' AND tags.stage='unknown'", + ) + if len(runs) == 0: + _logger.info(f"No existing parent run found for run_id {run_id}, creating new run") + return mlflow_client.create_run( + experiment_id=exp.experiment_id, + tags=MlFlowUpload.run_tags(run_id, "unknown", from_run_id=None), + run_name=run_id, + ) + if len(runs) > 1: + _logger.warning( + ( + f"Multiple existing parent runs found for run_id {run_id},", + f" using the first one: {runs[0].info.run_id}", + ) + ) + return runs[0] diff --git a/packages/readers_extra/pyproject.toml b/packages/readers_extra/pyproject.toml new file mode 100644 index 000000000..21179f146 --- /dev/null +++ b/packages/readers_extra/pyproject.toml @@ -0,0 +1,106 @@ +[project] +name = "weathergen-readers-extra" +version = "0.1.0" +description = "The WeatherGenerator Machine Learning Earth System Model" +readme = "../../README.md" +requires-python = ">=3.12,<3.13" +# TODO: incomplete: it also implicitly depends on the main project for the base classes +# There is currently a circular dependency readers-extra => root => readers-extra +# It needs to be broken by moving the base class of the readers code to its own package. +dependencies = [ + "xarray", + "zarr", + "weathergen-common", +] + +[dependency-groups] +dev = [ + "pytest~=8.3.5", + "pytest-mock>=3.14.1", + "ruff==0.9.7", + "pyrefly==0.36.0", +] + + +[tool.pyrefly] +project-includes = ["src/"] +project-excludes = [ +] + +[tool.pyrefly.errors] +bad-argument-type = false +unsupported-operation = false +missing-attribute = false +no-matching-overload = false +bad-context-manager = false + +# To do: +bad-assignment = false +bad-return = false +index-error = false +not-iterable = false +not-callable = false + + + + +# The linting configuration +[tool.ruff] + +# Wide rows +line-length = 100 + +[tool.ruff.lint] +# All disabled until the code is formatted. +select = [ + # pycodestyle + "E", + # Pyflakes + "F", + # pyupgrade + "UP", + # flake8-bugbear + "B", + # flake8-simplify + "SIM", + # isort + "I", + # Banned imports + "TID", + # Naming conventions + "N", + # print + "T201" +] + +# These rules are sensible and should be enabled at a later stage. +ignore = [ + # "B006", + "B011", + "UP008", + "SIM117", + "SIM118", + "SIM102", + "SIM401", + # To ignore, not relevant for us + "SIM108", # in case additional norm layer supports are added in future + "N817", # we use heavy acronyms, e.g., allowing 'import LongModuleName as LMN' (LMN is accepted) + "E731", # overly restrictive and less readable code + "N812", # prevents us following the convention for importing torch.nn.functional as F +] + +[tool.ruff.lint.flake8-tidy-imports.banned-api] +"numpy.ndarray".msg = "Do not use 'ndarray' to describe a numpy array type, it is a function. Use numpy.typing.NDArray or numpy.typing.NDArray[np.float32] for example" + +[tool.ruff.format] +# Use Unix `\n` line endings for all files +line-ending = "lf" + + + +[build-system] +requires = ["hatchling"] +build-backend = "hatchling.build" + +[tool.hatch.build.targets.wheel] +packages = ["src/weathergen"] diff --git a/packages/readers_extra/src/weathergen/readers_extra/__init__.py b/packages/readers_extra/src/weathergen/readers_extra/__init__.py new file mode 100644 index 000000000..df6164120 --- /dev/null +++ b/packages/readers_extra/src/weathergen/readers_extra/__init__.py @@ -0,0 +1,7 @@ +""" +readers-extra package. + +Contains additional data readers for the WeatherGenerator project. + +This code is not as stable and tested as the main readers. +""" diff --git a/packages/readers_extra/src/weathergen/readers_extra/data_reader_eobs.py b/packages/readers_extra/src/weathergen/readers_extra/data_reader_eobs.py new file mode 100644 index 000000000..4f0157792 --- /dev/null +++ b/packages/readers_extra/src/weathergen/readers_extra/data_reader_eobs.py @@ -0,0 +1,415 @@ +# (C) Copyright 2025 WeatherGenerator contributors. +# +# This software is licensed under the terms of the Apache Licence Version 2.0 +# which can be obtained at http://www.apache.org/licenses/LICENSE-2.0. +# +# In applying this licence, ECMWF does not waive the privileges and immunities +# granted to it by virtue of its status as an intergovernmental organisation +# nor does it submit to any jurisdiction. + +import logging +from pathlib import Path +from typing import override + +import numpy as np +import xarray as xr +from numpy.typing import NDArray + +from weathergen.datasets.data_reader_base import ( + DataReaderTimestep, + ReaderData, + TimeWindowHandler, + TIndex, + check_reader_data, + str_to_timedelta, +) + +_logger = logging.getLogger(__name__) + + +# TODO make this datareader works with multiple datasets in ZARR format +class DataReaderEObs(DataReaderTimestep): + """ + Data reader for gridded Zarr datasets with regular lat/lon structure. + + This reader handles datasets stored as Zarr with dimensions (time, latitude, longitude) + and converts the gridded data to point-wise format required by the framework. + + The reader implements lazy initialization to work efficiently with multiple dataloader workers. + """ + + def __init__( + self, + tw_handler: TimeWindowHandler, + filename: Path, + stream_info: dict, + ) -> None: + """ + Construct data reader for gridded Zarr dataset. + + Parameters + ---------- + tw_handler : TimeWindowHandler + Handler for time windows + filename : Path + Path to the Zarr dataset + stream_info : dict + Stream configuration containing channel selection and other metadata + + Returns + ------- + None + """ + # Store configuration but DO NOT open files here + self._filename = filename + self._tw_handler = tw_handler + self._stream_info = stream_info + + # Initialize data-dependent attributes to None + self.ds: xr.Dataset | None = None + self.len = 0 + self.source_channels = [] + self.source_idx = [] + self.target_channels = [] + self.target_idx = [] + self.geoinfo_channels = [] + self.geoinfo_idx = [] + self.properties = {} + + # Grid properties + self.latitudes: NDArray | None = None + self.longitudes: NDArray | None = None + self.n_lat: int = 0 + self.n_lon: int = 0 + self.n_points: int = 0 + + # Statistics + self.mean: NDArray | None = None + self.stdev: NDArray | None = None + + # Call super() with temporary values + super().__init__(self._tw_handler, self._stream_info) + + # Flag to ensure initialization happens only once per worker + self._initialized = False + + def _lazy_init(self) -> None: + """ + Initialize the dataset. Called once per worker process to ensure + proper handling of file handles across processes. + """ + if self._initialized: + return + + try: + # Open the Zarr dataset with xarray + self.ds = xr.open_zarr(self._filename, consolidated=True, chunks=None, zarr_format=2) + except Exception as e: + name = self._stream_info["name"] + _logger.error(f"Failed to open {name} at {self._filename}: {e}") + self.init_empty() + self._initialized = True + return + + # Extract time coordinate + time_coord = self.ds.coords["time"].values + data_start_time = np.datetime64(time_coord[0]) + data_end_time = np.datetime64(time_coord[-1]) + + # Check if dataset overlaps with requested time window + if self._tw_handler.t_start >= data_end_time or self._tw_handler.t_end <= data_start_time: + name = self._stream_info["name"] + _logger.warning(f"{name} is not supported over data loader window. Stream is skipped.") + self.init_empty() + self._initialized = True + return + + # Determine the period/frequency + if len(time_coord) > 1: + period = np.timedelta64(time_coord[1] - time_coord[0]) + else: + # Default to daily if only one timestep + period = np.timedelta64(1, "D") + + # Handle frequency override from stream_info + if "frequency" in self._stream_info: + period = str_to_timedelta(self._stream_info["frequency"]) + + # Re-initialize parent class with correct time info + super().__init__( + self._tw_handler, + self._stream_info, + data_start_time, + data_end_time, + period, + ) + + # Calculate valid time range indices + time_mask = (time_coord >= self._tw_handler.t_start) & (time_coord < self._tw_handler.t_end) + self.len = int(np.sum(time_mask)) + + if self.len <= 0: + self.init_empty() + self._initialized = True + return + + # Extract and validate spatial coordinates + self.latitudes = self.ds.coords["latitude"].values.astype(np.float32) + self.longitudes = self.ds.coords["longitude"].values.astype(np.float32) + + # Validate coordinate ranges + if np.any(self.latitudes < -90) or np.any(self.latitudes > 90): + _logger.warning( + f"Latitude values outside valid range [-90, 90] in stream " + f"'{self._stream_info['name']}'" + ) + self.latitudes = np.clip(self.latitudes, -90.0, 90.0) + + if np.any(self.longitudes < -180) or np.any(self.longitudes > 180): + _logger.warning( + f"Longitude values outside valid range [-180, 180] in stream " + f"'{self._stream_info['name']}'. Converting from [0, 360] format." + ) + self.longitudes = ((self.longitudes + 180.0) % 360.0 - 180.0).astype(np.float32) + + self.n_lat = len(self.latitudes) + self.n_lon = len(self.longitudes) + self.n_points = self.n_lat * self.n_lon + + # Identify available data variables (exclude coordinate and statistics variables) + available_vars = [ + var + for var in self.ds.data_vars + if not var.endswith("_mean") + and not var.endswith("_std") + and "time" in self.ds[var].dims + ] + + # Select source channels + source_channels_filter = self._stream_info.get("source") + source_exclude = self._stream_info.get("source_exclude", []) + self.source_channels, self.source_idx = self._select_channels( + available_vars, source_channels_filter, source_exclude + ) + + # Select target channels + target_channels_filter = self._stream_info.get("target") + target_exclude = self._stream_info.get("target_exclude", []) + self.target_channels, self.target_idx = self._select_channels( + available_vars, target_channels_filter, target_exclude + ) + + # No geoinfo channels for gridded data + self.geoinfo_channels = [] + self.geoinfo_idx = [] + + # Get target channel weights + self.target_channel_weights = self.parse_target_channel_weights() + + # Load or compute statistics + all_channels = sorted(set(self.source_channels + self.target_channels)) + self._load_statistics(all_channels) + + # Log configuration + ds_name = self._stream_info["name"] + _logger.info(f"{ds_name}: source channels: {self.source_channels}") + _logger.info(f"{ds_name}: target channels: {self.target_channels}") + _logger.info(f"{ds_name}: grid shape: {self.n_lat} x {self.n_lon}") + + self.properties = { + "stream_id": self._stream_info.get("id", 0), + } + + self._initialized = True + + def _select_channels( + self, + available_vars: list[str], + include_filters: list[str] | None, + exclude_filters: list[str] | None = None, + ) -> tuple[list[str], list[int]]: + """ + Select channels based on include/exclude filters. + + Parameters + ---------- + available_vars : list[str] + List of available variable names + include_filters : list[str] | None + List of patterns to include (None means include all) + exclude_filters : list[str] | None + List of patterns to exclude + + Returns + ------- + tuple[list[str], list[int]] + Selected channel names and their indices + """ + if exclude_filters is None: + exclude_filters = [] + + selected = [] + for var in available_vars: + # Check inclusion + if include_filters is not None: + if not any(f in var or f == var for f in include_filters): + continue + + # Check exclusion + if any(f in var for f in exclude_filters): + continue + + selected.append(var) + + # Return channels and their indices in the original list + indices = [available_vars.index(ch) for ch in selected] + return selected, indices + + def _load_statistics(self, channels: list[str]) -> None: + """ + Load or compute statistics (mean and standard deviation) for channels. + + Parameters + ---------- + channels : list[str] + List of channel names for which to load statistics + """ + means = [] + stds = [] + + for ch in channels: + # Try to load pre-computed statistics + mean_var = f"{ch}_mean" + std_var = f"{ch}_std" + + if mean_var in self.ds.data_vars: + mean = float(self.ds[mean_var].values) + else: + _logger.warning( + f"No pre-computed mean for {ch}, using 0.0. " + "Consider computing statistics offline." + ) + mean = 0.0 + + if std_var in self.ds.data_vars: + std = float(self.ds[std_var].values) + else: + _logger.warning( + f"No pre-computed std for {ch}, using 1.0. " + "Consider computing statistics offline." + ) + std = 1.0 + + means.append(mean) + stds.append(std) + + self.mean = np.array(means, dtype=np.float32) + self.stdev = np.array(stds, dtype=np.float32) + + # Avoid division by zero + self.stdev[self.stdev <= 1e-5] = 1.0 + + @override + def init_empty(self) -> None: + """Initialize an empty reader.""" + super().init_empty() + self.ds = None + self.len = 0 + self.n_points = 0 + + @override + def length(self) -> int: + """Return the length of the dataset.""" + self._lazy_init() + return self.len + + @override + def _get(self, idx: TIndex, channels_idx: list[int]) -> ReaderData: + """ + Get data for a time window. + + Parameters + ---------- + idx : TIndex + Index of temporal window + channels_idx : list[int] + Selection of channel indices + + Returns + ------- + ReaderData + Data structure containing coords, geoinfos, data, and datetimes + """ + self._lazy_init() + + (t_idxs, dtr) = self._get_dataset_idxs(idx) + + if self.ds is None or self.len == 0 or len(t_idxs) == 0: + return ReaderData.empty( + num_data_fields=len(channels_idx), + num_geo_fields=len(self.geoinfo_idx), + ) + + # Get the actual channel names + all_channels = sorted(set(self.source_channels + self.target_channels)) + selected_channels = [all_channels[i] for i in channels_idx] + + # Extract data for selected timesteps and channels + data_arrays = [] + datetimes_list = [] + + for t_idx in t_idxs: + if t_idx < 0 or t_idx >= len(self.ds.coords["time"]): + continue + + # Extract data for this timestep + timestep_data = [] + for ch in selected_channels: + # Load data using isel for efficient indexing + var_data = self.ds[ch].isel(time=t_idx).values.astype(np.float32) + # Flatten spatial dimensions (lat, lon) -> (n_points,) + var_data_flat = var_data.flatten() + timestep_data.append(var_data_flat) + + # Stack channels: (n_points, n_channels) + timestep_data = np.stack(timestep_data, axis=1) + data_arrays.append(timestep_data) + + # Get datetime for this timestep + dt = np.datetime64(self.ds.coords["time"].values[t_idx]) + datetimes_list.extend([dt] * self.n_points) + + if len(data_arrays) == 0: + return ReaderData.empty( + num_data_fields=len(channels_idx), + num_geo_fields=len(self.geoinfo_idx), + ) + + # Concatenate all timesteps: (n_timesteps * n_points, n_channels) + data = np.vstack(data_arrays) + + # Create coordinate grid + lon_grid, lat_grid = np.meshgrid(self.longitudes, self.latitudes) + coords_single = np.stack([lat_grid.flatten(), lon_grid.flatten()], axis=1).astype( + np.float32 + ) + + # Repeat coordinates for each timestep + coords = np.tile(coords_single, (len(t_idxs), 1)) + + # Empty geoinfos + geoinfos = np.zeros((len(data), 0), dtype=np.float32) + + # Convert datetimes to numpy array + datetimes = np.array(datetimes_list, dtype="datetime64[ns]") + + rd = ReaderData( + coords=coords, + geoinfos=geoinfos, + data=data, + datetimes=datetimes, + ) + + check_reader_data(rd, dtr) + + return rd diff --git a/packages/readers_extra/src/weathergen/readers_extra/data_reader_icon.py b/packages/readers_extra/src/weathergen/readers_extra/data_reader_icon.py new file mode 100644 index 000000000..78a103ff6 --- /dev/null +++ b/packages/readers_extra/src/weathergen/readers_extra/data_reader_icon.py @@ -0,0 +1,530 @@ +# (C) Copyright 2025 WeatherGenerator contributors. +# +# This software is licensed under the terms of the Apache Licence Version 2.0 +# which can be obtained at http://www.apache.org/licenses/LICENSE-2.0. +# +# In applying this licence, ECMWF does not waive the privileges and immunities +# granted to it by virtue of its status as an intergovernmental organisation +# nor does it submit to any jurisdiction. + +import json +import logging +from pathlib import Path +from typing import override + +import fsspec +import numpy as np +import xarray as xr +import zarr +from numpy.typing import NDArray + +from weathergen.datasets.data_reader_anemoi import _clip_lat, _clip_lon +from weathergen.datasets.data_reader_base import ( + DataReaderTimestep, + ReaderData, + TimeWindowHandler, + TIndex, + check_reader_data, +) + +_logger = logging.getLogger(__name__) + +frequencies = { + "3hrPt": np.timedelta64(10800000000000, "ns"), + "day": np.timedelta64(86400000000000, "ns"), + "fx": np.timedelta64(0, "ns"), + "mon": np.timedelta64(2548800000000000, "ns"), + "monC": np.timedelta64(2505600000000000, "ns"), + "yr": np.timedelta64(31536000000000000, "ns"), +} + + +class DataReaderIconBase(DataReaderTimestep): + "Wrapper for ICON data variables" + + def __init__( + self, + tw_handler: TimeWindowHandler, + stream_info: dict, + ) -> None: + """ + Parent class for ICON data variables + + Parameters + ---------- + tw_handler : TimeWindowHandler + Handles temporal slicing and mapping from time indices to datetimes + stream_info : dict + Stream metadata + """ + + # Extract key metadata from stream_info + lon_attribute = stream_info["attributes"]["lon"] + lat_attribute = stream_info["attributes"]["lat"] + mesh_attribute = stream_info["attributes"]["grid"] + + # Set mesh size based on spatial grid definition + self.mesh_size = len(self.ds[mesh_attribute]) + + # Time range in the dataset + self.time = self.ds["time"].values + start_ds = np.datetime64(self.time[0]) + end_ds = np.datetime64(self.time[-1]) + + # Skip stream if it doesn't intersect with time window + if start_ds > tw_handler.t_end or end_ds < tw_handler.t_start: + name = stream_info["name"] + _logger.warning(f"{name} is not supported over data loader window. Stream is skipped.") + super().__init__(tw_handler, stream_info) + self.init_empty() + return + + # Compute temporal resolution if not already defined + self.temporal_frequency = ( + self.time[1] - self.time[0] + if self.temporal_frequency is None + else self.temporal_frequency + ) + + # Initialize parent class with resolved time window + super().__init__( + tw_handler, + stream_info, + start_ds, + end_ds, + self.temporal_frequency, + ) + + # Compute absolute start/end indices in the dataset based on time window + self.start_idx = (tw_handler.t_start - start_ds).astype("timedelta64[D]").astype( + int + ) * self.mesh_size + self.end_idx = ( + (tw_handler.t_end - start_ds).astype("timedelta64[D]").astype(int) + 1 + ) * self.mesh_size - 1 + + # Sanity check + assert self.end_idx > self.start_idx, ( + f"Abort: Final index of {self.end_idx} is the same or smaller than " + f"start index {self.start_idx}" + ) + + # Number of time steps in selected range + self.len = int((self.end_idx - self.start_idx) // self.mesh_size) + + # === Coordinates === + + # Convert to degrees if stored in radians + coords_units = self.ds[lat_attribute].attrs["units"] + if coords_units == "radian": + self.lat = np.rad2deg(self.ds[lat_attribute][:].astype("f")) + self.lon = np.rad2deg(self.ds[lon_attribute][:].astype("f")) + else: + self.lat = self.ds[lat_attribute][:].astype("f") + self.lon = self.ds[lon_attribute][:].astype("f") + + # Extract coordinates and pressure level + self.lat = _clip_lat(self.lat) + self.lon = _clip_lon(self.lon) + + # Placeholder; currently unused + self.step_hrs = 1 + + # Stream metadata + self.properties = { + "stream_id": 0, + } + + # === Normalization statistics === + + # Ensure stats match dataset columns + assert self.stats_vars == self.colnames, ( + f"Variables in normalization file {self.stats_vars} do not match " + f"dataset columns {self.colnames}" + ) + + # === Channel selection === + source_channels = stream_info.get("source") + if source_channels: + self.source_channels, self.source_idx = self.select(source_channels) + elif getattr(self, "levels", None): + self.source_channels, self.source_idx = self.select_by_level("source") + else: + self.source_channels = self.colnames + self.source_idx = self.cols_idx + + target_channels = stream_info.get("target") + if target_channels: + self.target_channels, self.target_idx = self.select(target_channels) + elif getattr(self, "levels", None): + self.target_channels, self.target_idx = self.select_by_level("target") + else: + self.target_channels = self.colnames + self.target_idx = self.cols_idx + + # Ensure all selected channels have valid standard deviations + selected_channel_indices = list(set(self.source_idx).union(set(self.target_idx))) + non_positive_stds = np.where(self.stdev[selected_channel_indices] <= 0)[0] + if len(non_positive_stds) != 0: + bad_vars = [self.colnames[selected_channel_indices[i]] for i in non_positive_stds] + raise ValueError( + f"Abort: Encountered non-positive standard deviations" + f" for selected columns {bad_vars}." + ) + + # === Geo-info channels (currently unused) === + self.geoinfo_channels = [] + self.geoinfo_idx = [] + + def select(self, ch_filters: list[str]) -> (NDArray, list[str]): + """ + Allow user to specify which columns they want to access. + Get functions only returned for these specified columns. + + Parameters + ---------- + ch_filters: list[str] + list of patterns to access + + Returns + ------- + selected_colnames: np.array, + Selected columns according to the patterns specified in ch_filters + selected_cols_idx + respective index of these patterns in the data array + """ + mask = [np.array([f in c for f in ch_filters]).any() for c in self.colnames] + + selected_cols_idx = self.cols_idx[np.where(mask)[0]] + selected_colnames = [self.colnames[int(i)] for i in np.where(mask)[0]] + + return selected_colnames, selected_cols_idx + + def select_by_level(self, ch_type: str) -> tuple[list[str], NDArray[np.int64]]: + """ + Select channels constrained by allowed pressure levels and optional excludes. + ch_type: "source" or "target" (for *_exclude key in stream_info) + """ + channels_exclude = self.stream_info.get(f"{ch_type}_exclude", []) + allowed_levels = set(self.levels) if getattr(self, "levels", None) else set() + + new_colnames: list[str] = [] + for ch in self.colnames: + parts = ch.split("_") + # Profile channel if exactly one level suffix exists + if len(parts) == 2 and parts != "": + level = parts[1] + ch_base = parts[0] + if ( + not allowed_levels or level in allowed_levels + ) and ch_base not in channels_exclude: + new_colnames.append(ch) + else: + if ch not in channels_exclude: + new_colnames.append(ch) + + mask = [c in new_colnames for c in self.colnames] + selected_cols_idx = self.cols_idx[np.where(mask)] + selected_colnames = [self.colnames[int(i)] for i in np.where(mask)[0]] + + return selected_colnames, selected_cols_idx + + @override + def init_empty(self) -> None: + super().init_empty() + self.len = 0 + + @override + def length(self) -> int: + """ + Length of dataset + + Parameters + ---------- + None + + Returns + ------- + length of dataset + """ + return self.len + + +########################## +class DataReaderIcon(DataReaderIconBase): + "Wrapper for ICON variables - This class reads Zarr format datasets" + + def __init__( + self, + tw_handler: TimeWindowHandler, + filename: Path, + stream_info: dict, + ) -> None: + # Open Zarr dataset with Xarray + self.ds = xr.open_zarr(filename, consolidated=True) + + # Column (variable) names and indices + self.colnames = list(self.ds) + self.cols_idx = np.array(list(np.arange(len(self.colnames)))) + + # get pressure levels + # TODO Julius ? + self.levels = [] + + # Will be inferred later based on the dataset’s time variable + self.temporal_frequency = None + + # Load associated statistics file for normalization + stats_filename = Path(filename).with_name(Path(filename).stem + "_stats.json") + with open(stats_filename) as stats_file: + self.stats = json.load(stats_file) + + # Extract variable list from stats metadata + stats_vars_metadata = self.stats["metadata"]["variables"] + self.stats_vars = [v for v in stats_vars_metadata if v not in {"clat", "clon", "time"}] + + # Load mean and standard deviation per variable + self.mean = np.array(self.stats["statistics"]["mean"], dtype="d") + self.stdev = np.array(self.stats["statistics"]["std"], dtype="d") + + # Delegate further initialization to the base class + super().__init__( + tw_handler, + stream_info, + ) + + # TODO Julius ? + def select_by_level(self): + return + + @override + def _get(self, idx: TIndex, channels_idx: list[int]) -> ReaderData: + """ + Get data for temporal window + Parameters + ---------- + idx : int + Index of temporal window + channels_idx : np.array + Selection of channels + Returns + ------- + data (coords, geoinfos, data, datetimes) + """ + + (t_idxs, dtr) = self._get_dataset_idxs(idx) + + if self.ds is None or self.len == 0 or len(t_idxs) == 0: + return ReaderData.empty( + num_data_fields=len(channels_idx), num_geo_fields=len(self.geoinfo_idx) + ) + + # TODO: handle sub-sampling + + t_idxs_start = t_idxs[0] + t_idxs_end = t_idxs[-1] + 1 + + # datetimes + datetimes = np.asarray(self.time[t_idxs_start:t_idxs_end]) + + # lat/lon coordinates + tiling to match time steps + lat = self.lat.values[:, np.newaxis] + lon = self.lon.values[:, np.newaxis] + + lat = np.tile(lat, len(datetimes)) + lon = np.tile(lon, len(datetimes)) + + coords = np.concatenate([lat, lon], axis=1) + + # time coordinate repeated to match grid points + datetimes = np.repeat(datetimes, self.mesh_size).reshape(-1, 1) + datetimes = np.squeeze(datetimes) + + # expanding indexes for data + start_row = t_idxs_start * self.mesh_size + end_row = t_idxs_end * self.mesh_size + + # data + channels = np.array(self.colnames)[channels_idx] + + data_reshaped = [ + np.asarray(self.ds[ch_]).reshape(-1, 1)[start_row:end_row] for ch_ in channels + ] + data = np.concatenate(data_reshaped, axis=1) + + # empty geoinfos + geoinfos = np.zeros((data.shape[0], 0), dtype=data.dtype) + + rd = ReaderData( + coords=coords, + geoinfos=geoinfos, + data=data, + datetimes=datetimes, + ) + check_reader_data(rd, dtr) + + return rd + + +########################## +class DataReaderIconCmip6(DataReaderIconBase): + "Wrapper for ICON CMIP6 data variables - This class reads NetCDF4 using kerchunk" + + def __init__( + self, + tw_handler: TimeWindowHandler, + filename: Path, + stream_info: dict, + ) -> None: + # Open the kerchunk-generated reference JSON + ref_path = Path(filename) + if not ref_path.exists(): + raise FileNotFoundError(f"Kerchunk reference JSON not found: {ref_path}") + + # Load JSON references and initialize a virtual file system + kerchunk_ref = json.loads(ref_path.read_text()) + fs = fsspec.filesystem("reference", fo=kerchunk_ref) + mapper = fs.get_mapper("") + + # Ensure metadata is consolidated for zarr-style access + zarr.consolidate_metadata(mapper) + + # Open the dataset using Xarray with Zarr engine + self.ds = xr.open_dataset(mapper, engine="zarr", consolidated=True, chunks={"time": 1}) + + # get pressure levels + # TODO add self.dataset_levels + self.levels = stream_info["pressure_levels"] + + # Column (variable) names and indices + self.colnames, self.cols_idx = self.get_cols(stream_info["variables"]) + + # Determine temporal frequency from dataset metadata + frequency_attr = self.ds.attrs["frequency"] + self.temporal_frequency = frequencies[frequency_attr] + + # Load associated statistics file for normalization + stats_filename = Path(filename).with_name(Path(filename).stem + "_stats.json") + with open(stats_filename) as stats_file: + self.stats = json.load(stats_file) + + # Variables included in the stats + self.stats_vars = list(self.stats) + + # Load mean and standard deviation per variable + self.mean = np.array([self.stats[var]["mean"] for var in self.stats_vars], dtype=np.float64) + self.stdev = np.array([self.stats[var]["std"] for var in self.stats_vars], dtype=np.float64) + + # Delegate further initialization to the base class + super().__init__( + tw_handler, + stream_info, + ) + + def get_cols(self, channels: list[str]) -> (list[str], list[int]): + """ + TBD + """ + colnames = [] + for ch in channels: + coords_list = list(self.ds[ch].coords) + if "plev" not in coords_list: + colnames.append(f"{ch}") + else: + dataset_levels = self.ds[ch]["plev"][0, :].values + for level in dataset_levels: + colnames.append(f"{ch}_{int(level)}") + + cols_idx = np.array(list(np.arange(len(colnames)))) + + return colnames, cols_idx + + @override + def _get(self, idx: TIndex, channels_idx: list[int]) -> ReaderData: + """ + Get data for temporal window + + Parameters + ---------- + idx : int + Index of temporal window + channels_idx : list[int] + Selection of channels + + Returns + ------- + ReaderData + """ + (t_idxs, dtr) = self._get_dataset_idxs(idx) + # dtr is a time window object it has the attributes t_start_win and t_end_win + + if self.ds is None or self.len == 0 or len(t_idxs) == 0: + return ReaderData.empty( + num_data_fields=len(channels_idx), num_geo_fields=len(self.geoinfo_idx) + ) + + # Select channels + channels = np.array(self.colnames)[channels_idx] + + start_ts = dtr.start + end_ts = dtr.end - np.timedelta64(1, "h") + + try: + data_per_channel = [] + datetimes = [] + coords = [] + + for ch in channels: + ch_parts = ch.split("_") + if ( + hasattr(self, "levels") + and self.levels + and len(ch_parts) == 2 + and ch_parts[1] in self.levels + ): + ch_ = ch_parts[0] + plev_int = ch_parts[1] + levels_all = self.ds[ch_]["plev"][0].values + da = self.ds[ch_].assign_coords(plev=("plev", levels_all)) + da = da.sel(plev=plev_int, time=slice(start_ts, end_ts)) + else: + da = self.ds[ch].sel(time=slice(start_ts, end_ts)) + data_arr = da.compute(scheduler="synchronous") + + if not data_per_channel: + # datetimes + datetimes = np.repeat(data_arr.time.values, self.mesh_size).reshape(-1, 1) + datetimes = np.squeeze(datetimes) + + # coords + n_times = len(data_arr.time) + lat = np.tile(data_arr.latitude.values[:, np.newaxis], (n_times, 1)) + lon = np.tile(data_arr.longitude.values[:, np.newaxis], (n_times, 1)) + + coords = np.concatenate([lat, lon], axis=1) + + # data + data_per_channel.append(np.asarray(data_arr.data.reshape(-1, 1))) + + data = np.concatenate(data_per_channel, axis=1) + except Exception as e: + _logger.debug(f"Date not present in ICON dataset: {str(e)}. Skipping.") + return ReaderData.empty( + num_data_fields=len(channels_idx), num_geo_fields=len(self.geoinfo_idx) + ) + if data_per_channel[0].shape[0] == 0: + return ReaderData.empty( + num_data_fields=len(channels_idx), num_geo_fields=len(self.geoinfo_idx) + ) + + # Empty geoinfos + geoinfos = np.zeros((data.shape[0], 0), dtype=data.dtype) + + rd = ReaderData( + coords=coords, + geoinfos=geoinfos, + data=data, + datetimes=datetimes, + ) + check_reader_data(rd, dtr) + return rd diff --git a/packages/readers_extra/src/weathergen/readers_extra/registry.py b/packages/readers_extra/src/weathergen/readers_extra/registry.py new file mode 100644 index 000000000..8920354b4 --- /dev/null +++ b/packages/readers_extra/src/weathergen/readers_extra/registry.py @@ -0,0 +1,28 @@ +from collections.abc import Callable +from dataclasses import dataclass + +from weathergen.common.config import Config + + +@dataclass +class ReaderEntry: + data_path: str | None + constructor: Callable + + +def get_extra_reader(name: str, cf: Config) -> object | None: + """Get an extra reader by name.""" + # Uses lazy imports to avoid circular dependencies and to not load all the readers at start. + # There is no sanity check on them, so they may fail at runtime during imports + + match name: + case "icon": + from weathergen.readers_extra.data_reader_icon import DataReaderIcon + + return ReaderEntry(cf.data_path_icon, DataReaderIcon) + case "eobs": + from weathergen.readers_extra.data_reader_eobs import DataReaderEObs + + return ReaderEntry(cf.data_path_eobs, DataReaderEObs) + case _: + return None diff --git a/pyproject.toml b/pyproject.toml index 40ea9e0c8..0f0f7a296 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -7,17 +7,30 @@ authors = [ { name = "WeatherGenerator collaboration" } ] -requires-python = ">=3.11,<3.13" -dependencies = [ 'torch', - 'flash_attn', - 'numpy', - 'astropy_healpix', - 'zarr', - 'anemoi-datasets', - 'pandas', - 'pynvml', - 'tqdm', - 'matplotlib'] +requires-python = ">=3.12,<3.13" +# TODO: split the plotting dependencies into their own dep groups, they are not required. +dependencies = [ + 'numpy~=2.2', + 'astropy_healpix~=1.1.2', + 'zarr~=2.17', + 'anemoi-datasets~=0.5.16', + 'pandas~=2.2', + 'pynvml', + 'tqdm', + 'matplotlib', + 'packaging', + 'wheel', + 'psutil', + "polars~=1.25.2", + "omegaconf~=2.3.0", + "dask~=2025.5.1", + "hatchling", + "numexpr>=2.11.0", + "weathergen-common", + "weathergen-evaluate", + "weathergen-readers-extra", +] + [project.urls] Homepage = "https://www.weathergenerator.eu" @@ -26,9 +39,218 @@ Repository = "https://github.com/ecmwf/WeatherGenerator.git" Issues = "https://github.com/ecmwf/WeatherGenerator/issues" [project.scripts] -train = "weathergen:train" -evaluate = "weathergen:evaluate" +train = "weathergen.run_train:train" +train_continue = "weathergen.run_train:train_continue" +inference = "weathergen.run_train:inference" +evaluate = "weathergen.evaluate.run_evaluation:evaluate" +plot_train = "weathergen.utils.plot_training:plot_train" +export = "weathergen.evaluate.export.export_inference:export" [build-system] requires = ["hatchling"] build-backend = "hatchling.build" + +[tool.hatch.build.targets.wheel] +packages = ["src/weathergen"] + +[dependency-groups] +# The development dependencies +dev = [ + "ipykernel>=6.30.0", + "jupytext>=1.17.2", + "pytest~=8.3.5", + "pytest-mock>=3.14.1", + "ruff==0.9.7", + "tensorboard>=2.20.0", + "pdbpp>=0.11.7", + "pyrefly==0.36.0", +] + + +# Torch listed as optional dependencies. +# uv and python can only filter dependencies by platform, not by capability. +# Following the recommendations from https://docs.astral.sh/uv/guides/integration/pytorch +# We need to support: +# x86_64: cpu (unit tests) + gpu +# aarch64: gpu +[project.optional-dependencies] + +cpu = [ + 'torch==2.6.0', +] + +gpu = [ + 'torch==2.6.0+cu126', + # flash-attn also has a torch dependency. + "flash-attn", +] + + +[tool.black] + +# Wide rows +line-length = 100 + + +# The linting configuration +[tool.ruff] + +# Wide rows +line-length = 100 + +[tool.ruff.lint] +# All disabled until the code is formatted. +select = [ + # pycodestyle + "E", + # Pyflakes + "F", + # pyupgrade + "UP", + # flake8-bugbear + "B", + # flake8-simplify + "SIM", + # isort + "I", + # Banned imports + "TID", + # Naming conventions + "N", + # print + "T201" +] + +# These rules are sensible and should be enabled at a later stage. +ignore = [ + # "B006", + "B011", + "UP008", + "SIM117", + "SIM118", + "SIM102", + "SIM401", + # To ignore, not relevant for us + "SIM108", # in case additional norm layer supports are added in future + "N817", # we use heavy acronyms, e.g., allowing 'import LongModuleName as LMN' (LMN is accepted) + "E731", # overly restrictive and less readable code + "N812", # prevents us following the convention for importing torch.nn.functional as F +] + +[tool.ruff.lint.flake8-tidy-imports.banned-api] +"numpy.ndarray".msg = "Do not use 'ndarray' to describe a numpy array type, it is a function. Use numpy.typing.NDArray or numpy.typing.NDArray[np.float32] for example" + +[tool.ruff.format] +# Use Unix `\n` line endings for all files +line-ending = "lf" + + + + +[tool.uv] +# Most work is done a distributed filesystem, where hardlink is not always possible. +# Also, trying to resolve some permissions issue, see 44. +link-mode = "symlink" +# This guarantees that the build is deterministic and will not be impacted +# by future releases of dependencies or sub-dependencies. +# See https://docs.astral.sh/uv/reference/settings/#exclude-newer +# TODO: pytorch does not publish valid release timestamps, so sadly it does not work. +# exclude-newer = "2025-03-14T00:00:00Z" + +# The minimum version of uv required. +# It is tightly controlled because the format of uv.lock has changed +# over revisions, causing reformats to happen without reason. +# Also, relatively recent versions are required to support workspaces. +required-version = ">=0.7.0" + +# The supported environments +# TODO: add macos and windows (CPU only, for running tests) +environments = [ + "sys_platform == 'linux' and platform_machine == 'aarch64'", + "sys_platform == 'linux' and platform_machine == 'x86_64'", +# "sys_platform == 'darwin'", +] + +# One can only have cpu or gpu. +conflicts = [ + [ + { extra = "cpu" }, + { extra = "gpu" }, + ], +] + + +[[tool.uv.index]] +name = "pytorch-cu126" +url = "https://download.pytorch.org/whl/cu126" +explicit = true + + +[tool.pyrefly] +project-includes = ["src/"] +project-excludes = [ +] + +[tool.pyrefly.errors] +bad-argument-type = false +unsupported-operation = false +missing-attribute = false +no-matching-overload = false +bad-context-manager = false + +# To do: +bad-assignment = false +bad-return = false +index-error = false +not-iterable = false +not-callable = false + + + +[[tool.uv.index]] +name = "pytorch-cpu" +url = "https://download.pytorch.org/whl/cpu" +explicit = true + +[tool.uv.sources] +weathergen-common = { workspace = true } +weathergen-evaluate = { workspace = true } +weathergen-metrics = { workspace = true } +weathergen-readers-extra = { workspace = true } + + +flash-attn = [ +# The build of Cathal O'Brien is not compatible with the libc build on santis. +# Hardcode the reference to the swiss cluster for the time being. +# TODO: open issue +# { url = "https://github.com/cathalobrien/get-flash-attn/releases/download/v0.1-alpha/flash_attn-2.7.4+cu12torch2.6cxx11abiFALSE-cp312-cp312-linux_aarch64.whl", marker = "sys_platform == 'linux' and platform_machine == 'aarch64'" }, +# This version was rebuilt locally on santis and uploaded. + { url = "https://object-store.os-api.cci1.ecmwf.int/weathergenerator-dev/wheels/flash_attn-2.7.3-cp312-cp312-linux_aarch64.whl", marker = "sys_platform == 'linux' and platform_machine == 'aarch64'" }, + { url = "https://github.com/Dao-AILab/flash-attention/releases/download/v2.7.4.post1/flash_attn-2.7.4.post1+cu12torch2.6cxx11abiTRUE-cp312-cp312-linux_x86_64.whl", marker = "sys_platform == 'linux' and platform_machine == 'x86_64'" }, +# { index = "pytorch-cpu", marker = "sys_platform == 'darwin'"}, +] + + +torch = [ +# Explicit pin for GPU + { url = "https://download.pytorch.org/whl/cu126/torch-2.6.0%2Bcu126-cp312-cp312-linux_aarch64.whl", marker = 'sys_platform == "linux" and platform_machine == "aarch64"', extra="gpu" }, + { url = "https://download.pytorch.org/whl/cu126/torch-2.6.0%2Bcu126-cp312-cp312-manylinux_2_28_x86_64.whl", marker = 'sys_platform == "linux" and platform_machine == "x86_64"', extra="gpu" }, +# Use the public repo for CPU versions. + { index = "pytorch-cpu", marker = "sys_platform == 'linux'", extra="cpu"}, +] + +[tool.pytest.ini_options] +log_cli = true +log_cli_level = "INFO" +log_cli_format = "%(asctime)s [%(levelname)8s] %(message)s (%(filename)s:%(lineno)s)" +log_cli_date_format = "%Y-%m-%d %H:%M:%S" + + +[tool.uv.workspace] +members = [ + "packages/common", + "packages/evaluate", + "packages/metrics", + "packages/readers_extra", +] + diff --git a/scripts/actions.sh b/scripts/actions.sh new file mode 100755 index 000000000..c19d20f4b --- /dev/null +++ b/scripts/actions.sh @@ -0,0 +1,152 @@ +#!/bin/bash + +# TODO: this is the root weathergenerator directory, rename the variable. +SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && cd .. && pwd)" + +case "$1" in + sync) + ( + cd "$SCRIPT_DIR" || exit 1 + uv sync --all-packages --extra gpu + ) + ;; + lint) + ( + cd "$SCRIPT_DIR" || exit 1 + uv run --no-project --with "ruff==0.12.2" ruff format --target-version py312 \ + src/ scripts/ packages/ \ + && \ + uv run --no-project --with "ruff==0.12.2" \ + ruff check --target-version py312 \ + --fix \ + src/ scripts/ packages/ + ) + ;; + lint-check) + ( + cd "$SCRIPT_DIR" || exit 1 + uv run --no-project --with "ruff==0.12.2" ruff format --target-version py312 \ + -n \ + src/ scripts/ packages/ \ + && \ + uv run --no-project --with "ruff==0.12.2" \ + ruff check --target-version py312 \ + src/ scripts/ packages/ + ) + ;; + type-check) + ( + # The dependencies are rebuilt for each package to ensure that they do not rely on implicit imports. + cd "$SCRIPT_DIR" || exit 1 + + # weathergen-common + uv sync --project packages/common --no-install-workspace + uv pip list + uv run --project packages/common --frozen pyrefly check packages/common + # Fail for errors on weathergen-common: + if [ $? -ne 0 ]; then + echo "Type checking failed for weathergen-common." + exit 1 + fi + + # weathergen-metrics + uv sync --project packages/metrics --no-install-workspace + uv pip list + uv run --project packages/metrics --frozen pyrefly check packages/metrics + # Fail for errors on weathergen-metrics: + if [ $? -ne 0 ]; then + echo "Type checking failed for weathergen-metrics." + exit 1 + fi + + # weathergen-evaluate + uv sync --project packages/evaluate --no-install-workspace --package weathergen-evaluate + uv pip list + uv run --project packages/evaluate --frozen pyrefly check packages/evaluate + + # weathergen (root) + # Install the whole workspace. It also needs the extra cpu option for the right version of pytorch. + uv sync --all-packages --extra cpu --no-install-workspace + uv pip list + uv run --all-packages pyrefly check src + echo "Type checking completed." + ) + ;; + unit-test) + ( + cd "$SCRIPT_DIR" || exit 1 + uv sync --extra cpu + uv run --extra cpu pytest src/ + ) + ;; + toml-check) + ( + cd "$SCRIPT_DIR" || exit 1 + uv run --no-project python scripts/check_tomls.py + ) + ;; + integration-test) + ( + cd "$SCRIPT_DIR" || exit 1 + uv sync --offline --all-packages --extra gpu + uv run --offline pytest ./integration_tests/small1_test.py --verbose -s + ) + ;; + create-links) + ( + cd "$SCRIPT_DIR" || exit 1 + # This script creates symbolic links to the shared working directories. + # 1. Get the path of the private config of the cluster + # 2. Read the yaml and extract the path of the shared conf + # This uses the yq command. It is a python package so uvx (bundled with uv) will donwload and create the right venv + export working_dir=$(cat $(../WeatherGenerator-private/hpc/platform-env.py hpc-config) | uvx yq .path_shared_working_dir) + # Remove quotes + export working_dir=$(echo "$working_dir" | sed 's/[\"\x27]//g') + # If the working directory does not exist, exit with an error + if [ ! -d "$working_dir" ]; then + echo "Working directory $working_dir does not exist. Please check the configuration." + exit 1 + fi + # Ensure the working directory ends with a slash + if [[ "$working_dir" != */ ]]; then + working_dir="$working_dir/" + fi + echo "Working directory: $working_dir" + # Create all the links + for d in "logs" "models" "output" "plots" "results" + do + # If the link already exists, do nothing + # If a file with the same name exists, skip it + if [ -e "$d" ]; then + echo "'$d' already exists, skipping. The results in $d will not be linked to the shared working directory." + continue + fi + echo "$d -> $working_dir$d" + ln -s "$working_dir$d" "$d" + done + ) + ;; + create-jupyter-kernel) + ( + cd "$SCRIPT_DIR" || exit 1 + uv sync --all-packages + uv run ipython kernel install --user --env VIRTUAL_ENV $(pwd)/.venv --name=weathergen_kernel --display-name "Python (WeatherGenerator)" + echo "Jupyter kernel created. You can now use it in Jupyter Notebook or JupyterLab." + echo "To use this kernel, select 'Python (WeatherGenerator)' from the kernel options in Jupyter Notebook or JupyterLab." + echo "If you want to remove the kernel later, you can run:" + echo "jupyter kernelspec uninstall weathergen_kernel" + ) + ;; + jupytext-sync) + ( + cd "$SCRIPT_DIR" || exit 1 + # Run on any python or jupyter notebook files in the WeatherGenerator-private/notebooks directory + uv run jupytext --set-formats ipynb,py:percent --sync ../WeatherGenerator-private/notebooks/*.ipynb ../WeatherGenerator-private/notebooks/*.py + echo "Jupytext sync completed." + ) + ;; + *) + echo "Usage: $0 {sync|lint|lint-check|type-check|unit-test|toml-check|integration-test|create-links|create-jupyter-kernel|jupytext-sync}" + exit 1 + ;; +esac diff --git a/scripts/check_gh_issue.py b/scripts/check_gh_issue.py new file mode 100755 index 000000000..7465fbd1c --- /dev/null +++ b/scripts/check_gh_issue.py @@ -0,0 +1,62 @@ +#!/usr/bin/env -S uv run +# /// script +# dependencies = [ "BeautifulSoup4", "requests" +# ] +# [tool.uv] +# exclude-newer = "2025-01-01T00:00:00Z" +# /// + +# ruff: noqa: T201 + +""" +Checks that a pull request has a corresponding GitHub issue. + +Source: +https://stackoverflow.com/questions/60717142/getting-linked-issues-and-projects-associated-with-a-pull-request-form-github-ap +""" + +import re + +import requests +from bs4 import BeautifulSoup + +repo = "ecmwf/WeatherGenerator" + +msg_template = """This pull request {pr} does not have a linked issue. +Please link it to an issue in the repository {repo} before merging. +The easiest way to do this is to add a comment with the issue number, like this: +Fixes #1234 +This will automatically link the issue to the pull request. + +If you just want to reference an issue without closing it, you can use: +Refs #1234 + +See https://docs.github.com/en/issues/tracking-your-work-with-issues/using-issues/linking-a-pull-request-to-an-issue +""" + + +if __name__ == "__main__": + import argparse + + parser = argparse.ArgumentParser(description="Check GitHub PR for linked issues.") + parser.add_argument("pr", type=str, help="Pull request number") + args = parser.parse_args() + + pr: str = args.pr + pr = pr.split("/")[0] + r = requests.get(f"https://github.com/{repo}/pull/{pr}") + soup = BeautifulSoup(r.text, "html.parser") + issue_form = soup.find_all("form", {"aria-label": re.compile("Link issues")}) + msg = msg_template.format(pr=pr, repo=repo) + + if not issue_form: + print(msg) + exit(1) + issues = [i["href"] for i in issue_form[0].find_all("a")] + issues = [i for i in issues if i is not None and repo in i] + print(f"Linked issues for PR {pr}:") + print(f"Found {len(issues)} linked issues.") + print("\n".join(issues)) + if not issues: + print(msg) + exit(1) diff --git a/scripts/check_tomls.py b/scripts/check_tomls.py new file mode 100644 index 000000000..cb709c42b --- /dev/null +++ b/scripts/check_tomls.py @@ -0,0 +1,65 @@ +#!/usr/bin/env -S uv run + +# ruff: noqa: T201 +""" +Checks that all pyproject.toml files are consistent for select sections +USAGE EXAMPLE: ./scripts/actions.sh toml-check from the root of the repo +""" + +import tomllib +from pathlib import Path + +_REPO_ROOT = Path(__file__).parent.parent + + +def loop_keys(toml_dict, list_keys): + for i in list_keys: + toml_dict = toml_dict[i] + return toml_dict + + +def check_toml_key(main_toml_dict, other_toml_dict, list_keys, name): + try: + main_value = loop_keys(dict(main_toml_dict), list_keys) + other_value = loop_keys(dict(other_toml_dict), list_keys) + assert main_value == other_value, ( + f"{list_keys} mismatch with main pyproject.toml and {name} pyproject.toml: ", + f"{main_value} != {other_value}", + ) + except Exception as e: + assert ( + type(e) is not KeyError + ), f"""KeyError: '{list_keys}' not found in {name} pyproject.toml, + please populate this field""" + print(e) + + +def check_tomls(main_toml, *tomls): + main_toml_dict = {} + with open(main_toml, "rb") as toml_file: + main_toml_dict = tomllib.load(toml_file) + all_tomls = {} + for toml in tomls: + toml_dict = {} + with open(toml, "rb") as toml_file: + toml_dict = tomllib.load(toml_file) + all_tomls[Path(toml)] = toml_dict + for toml_path, toml_dict in all_tomls.items(): + # shorten name to package path + name = toml_path.parent.name + # check build system is the same + check_toml_key(main_toml_dict, toml_dict, ["build-system"], name) + # check python version is the same + # check_toml_key(main_toml_dict, toml_dict, [], name) + # check project.version/authors/urls are the same + for key in ["version", "requires-python"]: + check_toml_key(main_toml_dict["project"], toml_dict["project"], [key], name) + # check tool.ruff is the same (disabled until issue 1081) + # check_toml_key(main_toml_dict, toml_dict, ["tool", "ruff"], name) + + +if __name__ == "__main__": + main_toml = _REPO_ROOT / "pyproject.toml" + sub_packages = ["evaluate", "common", "metrics", "readers_extra"] + tomls = [_REPO_ROOT / "packages" / package / "pyproject.toml" for package in sub_packages] + check_tomls(main_toml, *tomls) diff --git a/src/weathergen/__init__.py b/src/weathergen/__init__.py deleted file mode 100644 index 79198f307..000000000 --- a/src/weathergen/__init__.py +++ /dev/null @@ -1,204 +0,0 @@ -# (C) Copyright 2024 WeatherGenerator contributors. -# -# This software is licensed under the terms of the Apache Licence Version 2.0 -# which can be obtained at http://www.apache.org/licenses/LICENSE-2.0. -# -# In applying this licence, ECMWF does not waive the privileges and immunities -# granted to it by virtue of its status as an intergovernmental organisation -# nor does it submit to any jurisdiction. - -import time -import sys -import pdb -import traceback - -import pandas as pd - -from weathergen.utils.config import Config -from weathergen.train.trainer import Trainer -from weathergen.train.utils import get_run_id - -#################################################################################################### -def evaluate( run_id, epoch, masking_mode = None, forecacast_steps = None, - samples = 10000000, shuffle=False, - save_samples=True, gridded_output_streams=[]) : - - # load config if specified - cf = Config.load( run_id, epoch if epoch is not None else -1) - - cf.run_history += [ (cf.run_id, cf.istep) ] - - cf.samples_per_validation = samples - cf.log_validation = samples if save_samples else 0 - - if masking_mode is not None : - cf.masking_mode = masking_mode - - # Oct-Nov 2022 - cf.start_date_val = 202210011600 - cf.end_date_val = 202212010400 - # # 2022 - # cf.start_date_val = 202201010400 - # cf.end_date_val = 202301010400 - - cf.step_hrs = 12 - - cf.shuffle = shuffle - - cf.forecast_steps = forecacast_steps if forecacast_steps else cf.forecast_steps - # cf.forecast_policy = 'fixed' - - # cf.analysis_streams_output = ['Surface', 'Air', 'METEOSAT', 'ATMS', 'IASI', 'AMSR2'] - cf.analysis_streams_output = ['ERA5'] - - # make sure number of loaders does not exceed requested samples - cf.loader_num_workers = min( cf.loader_num_workers, samples) - - trainer = Trainer() - trainer.evaluate( cf, run_id, epoch) - -#################################################################################################### -def train( run_id = None) -> None : - - cf = Config() - - # directory where input streams are specified - # cf.streams_directory = './streams_large/' - cf.streams_directory = './streams_anemoi/' - - # embed_orientation : 'channels' or 'columns' - # channels: embedding is per channel for a token (#tokens=num_channels) - # columns: embedding is per "column", all channels are embedded together (#tokens=token_size) - # the per-stream embedding paramters, in particular dim_embed, have to be chosen accordingly - cf.embed_orientation = 'channels' - cf.embed_local_coords = True - # False since per cell coords are meaningless for cells - cf.embed_centroids_local_coords = False - cf.embed_size_centroids = 64 - cf.embed_unembed_mode = 'block' - - cf.target_cell_local_prediction = True - cf.target_coords_local = True - - # parameters for local assimilation engine - cf.ae_local_dim_embed = 1024 #2048 #1024 - cf.ae_local_num_blocks = 2 - cf.ae_local_num_heads = 16 - cf.ae_local_dropout_rate = 0.1 - cf.ae_local_with_qk_lnorm = True - - # assimilation engine local -> global adapter - cf.ae_local_num_queries = 2 - cf.ae_local_queries_per_cell = False - cf.ae_adapter_num_heads = 16 - cf.ae_adapter_embed = 128 - cf.ae_adapter_with_qk_lnorm = True - cf.ae_adapter_with_residual = True - cf.ae_adapter_dropout_rate = 0.1 - - # parameters for global assimilation engine - cf.ae_global_dim_embed = 2048 - cf.ae_global_num_blocks = 8 - cf.ae_global_num_heads = 32 - cf.ae_global_dropout_rate = 0.1 - cf.ae_global_with_qk_lnorm = True - cf.ae_global_att_dense_rate = 0.2 # 0.25 : every 4-th block is dense attention - cf.ae_global_block_factor = 64 - cf.ae_global_mlp_hidden_factor = 2 - - cf.pred_adapter_kv = False - cf.pred_self_attention = True - cf.pred_dyadic_dims = False - cf.pred_mlp_adaln = True - - # forecasting engine - cf.forecast_delta_hrs = 0 - cf.forecast_steps = 0 # [j for j in range(1,11) for i in range(1)] - cf.forecast_policy = None #'fixed', 'sequential' - cf.forecast_freeze_model = False # False - cf.forecast_att_dense_rate = 0.25 - - cf.fe_num_blocks = 0 - cf.fe_num_heads = 16 - cf.fe_dropout_rate = 0.1 - cf.fe_with_qk_lnorm = True - - cf.healpix_level = 5 - - # working precision - cf.with_mixed_precision = True - cf.with_flash_attention = True - if cf.with_flash_attention : - assert cf.with_mixed_precision - # compile entire model - cf.compile_model = False - - cf.with_fsdp = True - - cf.loss_fcts = [['mse', 1.0]] - cf.loss_fcts_val = [['mse', 1.0]] - # cf.loss_fcts = [['mse', 0.5], ['stats', 0.5]] - # cf.loss_fcts_val = [['mse', 0.5], ['stats', 0.5]] - - cf.batch_size = 1 - cf.batch_size_validation = 1 - - # forecast - cf.masking_mode = 'forecast' - cf.masking_rate = 0.0 - cf.masking_rate_sampling = True #False - cf.sampling_rate_target = 1.0 - - cf.num_epochs = 24 - cf.samples_per_epoch = 4096 - cf.samples_per_validation = 512 - cf.shuffle = True - - cf.lr_scaling_policy = 'sqrt' - cf.lr_start = 0.000001 - cf.lr_max = 0.00003 - cf.lr_final_decay = 0.000001 - cf.lr_final = 0.0 - cf.lr_steps_warmup = 256 - cf.lr_steps_cooldown = 4096 - cf.lr_policy_warmup = 'cosine' - cf.lr_policy_decay = 'linear' - cf.lr_policy_cooldown = 'linear' - - cf.grad_clip = 5. - cf.weight_decay = 0.1 - cf.norm_type = 'LayerNorm' #'LayerNorm' #'RMSNorm' - cf.nn_module = 'te' - - cf.data_path = '/home/mlx/ai-ml/datasets/stable/' - # cf.data_path = '/lus/h2resw01/fws4/lb/project/ai-ml/observations/v1' - # cf.data_path = '/leonardo_scratch/large/userexternal/clessig0/obs/v1' - cf.start_date = 201301010000 - cf.end_date = 202012310000 - cf.start_date_val = 202101010000 - cf.end_date_val = 202201010000 - cf.len_hrs = 6 - cf.step_hrs = 6 - cf.input_window_steps = 1 - - cf.val_initial = False - - cf.loader_num_workers = 8 - cf.data_loader_rng_seed = int(time.time()) - cf.log_validation = 0 - - cf.mlflow_offline = True #False - cf.istep = 0 - cf.run_history = [] - - cf.run_id = run_id - cf.desc = '' - - trainer = Trainer( log_freq=20, checkpoint_freq=250, print_freq=10) - - try : - trainer.run( cf) - except : - extype, value, tb = sys.exc_info() - traceback.print_exc() - pdb.post_mortem(tb) diff --git a/src/weathergen/datasets/anemoi_dataset.py b/src/weathergen/datasets/anemoi_dataset.py deleted file mode 100644 index c4a25a9ba..000000000 --- a/src/weathergen/datasets/anemoi_dataset.py +++ /dev/null @@ -1,63 +0,0 @@ -# (C) Copyright 2024 WeatherGenerator contributors. -# -# This software is licensed under the terms of the Apache Licence Version 2.0 -# which can be obtained at http://www.apache.org/licenses/LICENSE-2.0. -# -# In applying this licence, ECMWF does not waive the privileges and immunities -# granted to it by virtue of its status as an intergovernmental organisation -# nor does it submit to any jurisdiction. - -import code - -import numpy as np - -from anemoi.datasets import open_dataset - - -class AnemoiDataset(): - "Wrapper for Anemoi dataset" - - def __init__( - self, - filename: str, - start: int, - end: int, - len_hrs: int, - step_hrs: int = None, - normalize: bool = True, - select: list[str] = None, - ) -> None : - - assert len_hrs == step_hrs, 'Currently only step_hrs=len_hrs is supported' - - self.ds = open_dataset( filename, frequency=str(step_hrs) + 'h', - start=str(start)[:-4], end=str(end)[:-4] ) - # caches lats and lons - self.latitudes = self.ds.latitudes.astype( np.float32) - self.longitudes = self.ds.longitudes.astype( np.float32) - - self.colnames = ['lat', 'lon'] + self.ds.variables - - self.properties = { 'obs_id' : 0, - 'means' : self.ds.statistics['mean'], - 'vars' : np.square(self.ds.statistics['stdev']), } - - def __len__(self) : - "Length of dataset" - return len(self.ds) - - def __getitem__( self, idx: int) -> tuple : - "Get (data,datetime) for given index" - - # prepend lat and lon to data; squeeze out ensemble dimension (for the moment) - data = np.concatenate( [np.expand_dims( self.latitudes, 0), - np.expand_dims( self.longitudes, 0), - self.ds[idx].squeeze()], 0).transpose() - - # date time matching #data points of data - datetimes = np.full( data.shape[0], self.ds.dates[idx]) - - return (data, datetimes) - - def time_window(self, idx: int) -> tuple[np.datetime64, np.datetime64]: - return (self.ds.dates[idx], self.ds.dates[idx]) diff --git a/src/weathergen/datasets/batchifyer.py b/src/weathergen/datasets/batchifyer.py deleted file mode 100644 index 63689d508..000000000 --- a/src/weathergen/datasets/batchifyer.py +++ /dev/null @@ -1,270 +0,0 @@ -# (C) Copyright 2024 WeatherGenerator contributors. -# -# This software is licensed under the terms of the Apache Licence Version 2.0 -# which can be obtained at http://www.apache.org/licenses/LICENSE-2.0. -# -# In applying this licence, ECMWF does not waive the privileges and immunities -# granted to it by virtue of its status as an intergovernmental organisation -# nor does it submit to any jurisdiction. - -import torch -import numpy as np -import code -import warnings -import time - -import astropy_healpix as hp -from astropy_healpix.healpy import ang2pix, pix2ang - -from functools import partial - -from weathergen.datasets.utils import ( - vecs_to_rots, - s2tor3, - r3tos2, - locs_to_cell_coords, - coords_to_hpyidxs, - healpix_verts, - get_target_coords_local, - get_target_coords_local_fast, - get_target_coords_local_ffast, - healpix_verts_rots, - locs_to_cell_coords_ctrs -) - - -############################################# -def tokenize_window_space( source, times, normalize_coords, tokens_cells, - token_size, hl, geoinfo_offset, hpy_verts_Rs, rng, mr) : - '''Process one window into tokens''' - - # len(source)==1 would require special case handling that is not worth the effort - if len(source) < 2 : - return tokens_cells - - thetas = (((90. - source[:,geoinfo_offset] ) / 180.) * np.pi) - phis = ((((source[:,geoinfo_offset+1] + 180.) / 360.) * 2. * np.pi)) - posr3 = s2tor3( thetas, phis) - hpy_idxs = ang2pix( 2**hl, thetas, phis, nest=True) - - hpy_idxs_ord = torch.argsort( torch.from_numpy(hpy_idxs), stable=True) - splits = np.flatnonzero( np.diff( hpy_idxs[hpy_idxs_ord])) - cells_idxs = np.concatenate( [hpy_idxs[hpy_idxs_ord][ splits ], - np.array([hpy_idxs[hpy_idxs_ord[-1]]]) ]) - hpy_idxs_ord_split = np.split( hpy_idxs_ord, splits+1) - - lens = [] - for i,c in enumerate( cells_idxs) : - - thetas_sorted = torch.argsort( thetas[ hpy_idxs_ord_split[i] ], stable=True) - posr3_cell = posr3[ hpy_idxs_ord_split[i] ][ thetas_sorted ] - source_cell = source[ hpy_idxs_ord_split[i] ][ thetas_sorted ] - - R = hpy_verts_Rs[c] - local_coords = r3tos2( torch.matmul( R, posr3_cell.transpose(1,0)).transpose(1,0)) - source_cell[:,geoinfo_offset:geoinfo_offset+2] = local_coords.to(torch.float32) - source_cell = normalize_coords( source_cell, False) - - # split into tokens and pad last one to have full size - pad = token_size - (len(source_cell)%token_size) if len(source_cell)%token_size>0 else 0 - source_cell = torch.nn.functional.pad( source_cell, (0,0,0,pad), mode='constant', value=0.) - source_cell = source_cell.reshape( (len(source_cell)//token_size, token_size, -1)) - - # apply masking (discarding) of tokens - if mr > 0. : - idx_sel = rng.permutation( len(source_cell))[ : max(1,int((1.-mr) * len(source_cell)))] - source_cell = source_cell[ idx_sel ] - - tokens_cells[c] += [ source_cell ] - - return tokens_cells - -############################################# -def tokenize_window_spacetime( source, times, normalize_coords, tokens_cells, - token_size, hl, geoinfo_offset, hpy_verts_Rs, rng, mr) : - - t_unique = np.unique( times) - for i, t in enumerate( t_unique) : - mask = t == times - tokens_cells = tokenize_window_space( source[mask], None, normalize_coords, - tokens_cells, token_size, hl, geoinfo_offset, - hpy_verts_Rs, rng, mr) - - return tokens_cells - - -#################################################################################################### -class Batchifyer : - - def __init__(self, hl) : - - ref = torch.tensor( [1., 0., 0.]) - - self.hl_source = hl - self.hl_target = hl - - self.num_healpix_cells_source = 12 * 4**self.hl_source - self.num_healpix_cells_target = 12 * 4**self.hl_target - - verts00, verts00_Rs = healpix_verts_rots( self.hl_source, 0.0, 0.0) - verts10, verts10_Rs = healpix_verts_rots( self.hl_source, 1.0, 0.0) - verts11, verts11_Rs = healpix_verts_rots( self.hl_source, 1.0, 1.0) - verts01, verts01_Rs = healpix_verts_rots( self.hl_source, 0.0, 1.0) - vertsmm, vertsmm_Rs = healpix_verts_rots( self.hl_source, 0.5, 0.5) - self.hpy_verts = [verts00.to(torch.float32), verts10.to(torch.float32), - verts11.to(torch.float32), verts01.to(torch.float32), - vertsmm.to(torch.float32)] - self.hpy_verts_Rs_source = [verts00_Rs.to(torch.float32), verts10_Rs.to(torch.float32), - verts11_Rs.to(torch.float32), verts01_Rs.to(torch.float32), - vertsmm_Rs.to(torch.float32)] - - verts00, verts00_Rs = healpix_verts_rots( self.hl_target, 0.0, 0.0) - verts10, verts10_Rs = healpix_verts_rots( self.hl_target, 1.0, 0.0) - verts11, verts11_Rs = healpix_verts_rots( self.hl_target, 1.0, 1.0) - verts01, verts01_Rs = healpix_verts_rots( self.hl_target, 0.0, 1.0) - vertsmm, vertsmm_Rs = healpix_verts_rots( self.hl_target, 0.5, 0.5) - self.hpy_verts = [verts00.to(torch.float32), verts10.to(torch.float32), - verts11.to(torch.float32), verts01.to(torch.float32), - vertsmm.to(torch.float32)] - self.hpy_verts_Rs_target = [verts00_Rs.to(torch.float32), verts10_Rs.to(torch.float32), - verts11_Rs.to(torch.float32), verts01_Rs.to(torch.float32), - vertsmm_Rs.to(torch.float32)] - - self.verts_local = [] - verts = torch.stack( [verts10, verts11, verts01, vertsmm]) - temp = ref - torch.stack(locs_to_cell_coords_ctrs( verts00_Rs, verts.transpose(0,1))) - self.verts_local.append( temp.flatten(1,2)) - - verts = torch.stack( [verts00, verts11, verts01, vertsmm]) - temp = ref - torch.stack(locs_to_cell_coords_ctrs( verts10_Rs, verts.transpose(0,1))) - self.verts_local.append( temp.flatten(1,2)) - - verts = torch.stack( [verts00, verts10, verts01, vertsmm]) - temp = ref - torch.stack(locs_to_cell_coords_ctrs( verts11_Rs, verts.transpose(0,1))) - self.verts_local.append( temp.flatten(1,2)) - - verts = torch.stack( [verts00, verts11, verts10, vertsmm]) - temp = ref - torch.stack(locs_to_cell_coords_ctrs( verts01_Rs, verts.transpose(0,1))) - self.verts_local.append( temp.flatten(1,2)) - - verts = torch.stack( [verts00, verts10, verts11, verts01]) - temp = ref - torch.stack(locs_to_cell_coords_ctrs( vertsmm_Rs, verts.transpose(0,1))) - self.verts_local.append( temp.flatten(1,2)) - - self.hpy_verts_local_target = torch.stack( self.verts_local).transpose(0,1) - - # add local coords wrt to center of neighboring cells - # (since the neighbors are used in the prediction) - num_healpix_cells = 12 * 4**self.hl_target - with warnings.catch_warnings(action="ignore"): - temp = hp.neighbours( np.arange(num_healpix_cells), 2**self.hl_target, order='nested').transpose() - # fix missing nbors with references to self - for i, row in enumerate(temp) : - temp[i][row == -1] = i - self.hpy_nctrs_target = vertsmm[temp.flatten()].reshape( (num_healpix_cells, 8, 3)).transpose(1,0).to(torch.float32) - - self.rng = np.random.default_rng() - - ############################################## - def batchify_source( self, stream_info, geoinfo_offset, geoinfo_size, - masking_rate, masking_rate_sampling, rng, - source, times, normalize_coords) : - - si = stream_info - token_size = si['token_size'] - is_diagnostic = si['diagnostic'] if 'diagnostic' in stream_info else False - tokenize_spacetime = si['tokenize_spacetime'] if 'tokenize_spacetime' in stream_info else False - - if masking_rate > 0. : - # adjust if there's a per-stream masking rate - masking_rate = si['masking_rate'] if 'masking_rate' in si else masking_rate - # mask either patches or entire stream - if masking_rate_sampling : - # masking_rate = self.rng.uniform( low=0., high=masking_rate) - masking_rate = np.clip( np.abs( self.rng.normal( loc=0., scale=1./np.pi)), 0., 1.0) - else : - masking_rate = 1.0 if self.rng.uniform() < masking_rate else 0. - - tokenize_window = partial( tokenize_window_space, token_size=token_size, hl=self.hl_source, - geoinfo_offset=geoinfo_offset, - hpy_verts_Rs=self.hpy_verts_Rs_source[-1]) - if tokenize_spacetime : - tokenize_window = partial( tokenize_window_spacetime, token_size=token_size, hl=self.hl_source, - geoinfo_offset=geoinfo_offset, - hpy_verts_Rs=self.hpy_verts_Rs_source[-1]) - - # source - - if is_diagnostic or len(source)<2 or masking_rate==1.0 : - source_tokens_cells = torch.tensor([]) - source_centroids = torch.tensor([]) - source_tokens_lens = torch.zeros([self.num_healpix_cells_source],dtype=torch.int32) - - else : - - source_tokens_cells = [[] for _ in range(self.num_healpix_cells_source)] - source_tokens_cells = tokenize_window( source, times, normalize_coords, source_tokens_cells, - rng=self.rng, mr=masking_rate) - - source_tokens_cells = [torch.cat(c) if len(c)>0 else torch.tensor([]) for c in source_tokens_cells] - source_tokens_lens = torch.tensor( [len(s) for s in source_tokens_cells], dtype=torch.int32) - - if source_tokens_lens.sum() > 0 : - source_means = [self.hpy_verts[-1][i].unsqueeze(0).repeat(len(s),1) - if len(s)>0 else torch.tensor([]) - for i,s in enumerate(source_tokens_cells)] - source_means_lens = [len(s) for s in source_means] - # merge and split to vectorize computations - source_means = torch.cat( source_means) - # TODO: precompute also source_means_r3 and then just cat - source_centroids = torch.cat( [ source_means.to(torch.float32), - r3tos2( source_means).to(torch.float32)], -1) - source_centroids = torch.split( source_centroids, source_means_lens) - else : - source_centroids = torch.tensor([]) - - return (source_tokens_cells, source_tokens_lens, source_centroids) - - ############################################## - def batchify_target( self, stream_info, geoinfo_offset, geoinfo_size, - sampling_rate_target, rng, source, times2, normalize_targets ) : - - if len(source) < 2 : - target_tokens, target_coords = torch.tensor([]), torch.tensor([]) - target_tokens_lens = torch.zeros([self.num_healpix_cells_target],dtype=torch.int32) - target_coords_lens = torch.zeros([self.num_healpix_cells_target],dtype=torch.int32) - - else : - - thetas = ((90. - source[:,geoinfo_offset] ) / 180.) * np.pi - phis = (((source[:,geoinfo_offset+1] + 180.) / 360.) * 2. * np.pi) - hpy_idxs = ang2pix( 2**self.hl_target, thetas, phis, nest=True) - hpy_idxs_ord = np.argsort( hpy_idxs) - - # extract per cell data - splits = np.flatnonzero( np.diff( hpy_idxs[hpy_idxs_ord])) - cells_idxs = np.concatenate( [hpy_idxs[hpy_idxs_ord][ splits ], - np.array([hpy_idxs[hpy_idxs_ord[-1]]]) ]) - hpy_idxs_ord_split = np.split( hpy_idxs_ord, splits+1) - - target_tokens = [torch.tensor([]) for _ in range(self.num_healpix_cells_target)] - target_coords = [torch.tensor([]) for _ in range(self.num_healpix_cells_target)] - for i,c in enumerate( cells_idxs) : - t = source[ hpy_idxs_ord_split[i] ] - t = t[ self.rng.permutation( len(t)) ][: int(len(t)*sampling_rate_target)] - target_tokens[c] = t - # target_coords[c] = normalize_coords(t[:,:geoinfo_size].clone(), False) - target_coords[c] = normalize_targets( t[:,:geoinfo_size].clone()) - - target_tokens_lens = torch.tensor( [len(s) for s in target_tokens], dtype=torch.int32) - target_coords_lens = target_tokens_lens.detach().clone() - - # if target_coords_local and target_tokens_lens.sum()>0 : - if target_tokens_lens.sum()>0 : - target_coords = get_target_coords_local_ffast( self.hl_target, target_coords, geoinfo_offset, - self.hpy_verts_Rs_target, self.hpy_verts_local_target, - self.hpy_nctrs_target ) - target_coords.requires_grad = False - target_coords = list(target_coords.split( target_coords_lens.tolist())) - - return (target_tokens, target_tokens_lens, target_coords, target_coords_lens) diff --git a/src/weathergen/datasets/data_reader_anemoi.py b/src/weathergen/datasets/data_reader_anemoi.py new file mode 100644 index 000000000..7b6758a4f --- /dev/null +++ b/src/weathergen/datasets/data_reader_anemoi.py @@ -0,0 +1,270 @@ +# (C) Copyright 2025 WeatherGenerator contributors. +# +# This software is licensed under the terms of the Apache Licence Version 2.0 +# which can be obtained at http://www.apache.org/licenses/LICENSE-2.0. +# +# In applying this licence, ECMWF does not waive the privileges and immunities +# granted to it by virtue of its status as an intergovernmental organisation +# nor does it submit to any jurisdiction. + +import logging +from pathlib import Path +from typing import override + +import anemoi.datasets as anemoi_datasets +import numpy as np +from anemoi.datasets.data import MissingDateError +from anemoi.datasets.data.dataset import Dataset +from numpy.typing import NDArray + +from weathergen.datasets.data_reader_base import ( + DataReaderTimestep, + ReaderData, + TimeWindowHandler, + TIndex, + check_reader_data, + str_to_timedelta, +) + +_logger = logging.getLogger(__name__) + + +class DataReaderAnemoi(DataReaderTimestep): + "Wrapper for Anemoi datasets" + + def __init__( + self, + tw_handler: TimeWindowHandler, + filename: Path, + stream_info: dict, + ) -> None: + """ + Construct data reader for anemoi dataset + + Parameters + ---------- + filename : + filename (and path) of dataset + stream_info : + information about stream + + Returns + ------- + None + """ + + # open dataset to peak that it is compatible with requested parameters + ds0: Dataset = anemoi_datasets.open_dataset(filename) + # If there is no overlap with the time range, the dataset will be empty + if tw_handler.t_start >= ds0.dates[-1] or tw_handler.t_end <= ds0.dates[0]: + name = stream_info["name"] + _logger.warning(f"{name} is not supported over data loader window. Stream is skipped.") + super().__init__(tw_handler, stream_info) + self.init_empty() + return + + kwargs = {} + if "frequency" in stream_info: + kwargs["frequency"] = str_to_timedelta(stream_info["frequency"]) + if "subsampling_rate" in stream_info: + name = stream_info["name"] + _logger.warning( + f"subsampling_rate specified for anemoi dataset for stream {name}. " + + "Use frequency instead." + ) + ds: Dataset = anemoi_datasets.open_dataset( + ds0, **kwargs, start=tw_handler.t_start, end=tw_handler.t_end + ) + + period = np.timedelta64(ds.frequency) + data_start_time = ds.dates[0] + data_end_time = ds.dates[-1] + assert data_start_time is not None and data_end_time is not None, ( + data_start_time, + data_end_time, + ) + super().__init__( + tw_handler, + stream_info, + data_start_time, + data_end_time, + period, + ) + # If there is no overlap with the time range, no need to keep the dataset. + if tw_handler.t_start >= data_end_time or tw_handler.t_end <= data_start_time: + self.init_empty() + return + else: + self.ds = ds + self.len = len(ds) + + # caches lats and lons + self.latitudes = _clip_lat(ds.latitudes) + self.longitudes = _clip_lon(ds.longitudes) + + # select/filter requested source channels + self.source_idx = self.select_channels(ds0, "source") + self.source_channels = [ds.variables[i] for i in self.source_idx] + + # select/filter requested target channels + self.target_idx = self.select_channels(ds0, "target") + self.target_channels = [ds.variables[i] for i in self.target_idx] + + # get target channel weights from stream config + self.target_channel_weights = self.parse_target_channel_weights() + + self.geoinfo_channels = [] + self.geoinfo_idx = [] + + ds_name = stream_info["name"] + _logger.info(f"{ds_name}: source channels: {self.source_channels}") + _logger.info(f"{ds_name}: target channels: {self.target_channels}") + _logger.info(f"{ds_name}: geoinfo channels: {self.geoinfo_channels}") + + self.properties = { + "stream_id": 0, + } + self.mean = ds.statistics["mean"] + self.stdev = ds.statistics["stdev"] + + @override + def init_empty(self) -> None: + super().init_empty() + self.ds = None + self.len = 0 + + @override + def length(self) -> int: + return self.len + + @override + def _get(self, idx: TIndex, channels_idx: list[int]) -> ReaderData: + """ + Get data for window (for either source or target, through public interface) + + Parameters + ---------- + idx : int + Index of temporal window + channels_idx : np.array + Selection of channels + + Returns + ------- + ReaderData providing coords, geoinfos, data, datetimes + """ + + (t_idxs, dtr) = self._get_dataset_idxs(idx) + + if self.ds is None or self.len == 0 or len(t_idxs) == 0: + return ReaderData.empty( + num_data_fields=len(channels_idx), num_geo_fields=len(self.geoinfo_idx) + ) + + assert t_idxs[0] >= 0, "index must be non-negative" + didx_start = t_idxs[0] + # End is inclusive + didx_end = t_idxs[-1] + 1 + + # extract number of time steps and collapse ensemble dimension + # ds is a wrapper around zarr with get_coordinate_selection not being exposed since + # subsetting is pushed to the ctor via frequency argument; this also ensures that no sub- + # sampling is required here + try: + data = self.ds[didx_start:didx_end][:, :, 0].astype(np.float32) + except MissingDateError as e: + _logger.debug(f"Date not present in anemoi dataset: {str(e)}. Skipping.") + return ReaderData.empty( + num_data_fields=len(channels_idx), num_geo_fields=len(self.geoinfo_idx) + ) + + # extract channels + data = ( + data[:, list(channels_idx)] + .transpose([0, 2, 1]) + .reshape((data.shape[0] * data.shape[2], -1)) + ) + + # construct lat/lon coords + latlon = np.concatenate( + [ + np.expand_dims(self.latitudes, 0), + np.expand_dims(self.longitudes, 0), + ], + axis=0, + ).transpose() + # repeat latlon len(t_idxs) times + coords = np.vstack((latlon,) * len(t_idxs)) + + # empty geoinfos for anemoi + geoinfos = np.zeros((len(data), 0), dtype=data.dtype) + + # date time matching #data points of data + # Assuming a fixed frequency for the dataset + datetimes = np.repeat(self.ds.dates[didx_start:didx_end], len(data) // len(t_idxs)) + + rd = ReaderData( + coords=coords, + geoinfos=geoinfos, + data=data, + datetimes=datetimes, + ) + check_reader_data(rd, dtr) + + return rd + + def select_channels(self, ds0: anemoi_datasets, ch_type: str) -> NDArray[np.int64]: + """ + Select source or target channels + + Parameters + ---------- + ds0 : + raw anemoi dataset with available channels + ch_type : + "source" or "target", i.e channel type to select + + Returns + ------- + ReaderData providing coords, geoinfos, data, datetimes + + """ + + channels = self.stream_info.get(ch_type) + channels_exclude = self.stream_info.get(ch_type + "_exclude", []) + # sanity check + is_empty = len(channels) == 0 if channels is not None else False + if is_empty: + stream_name = self.stream_info["name"] + _logger.warning(f"No channel for {stream_name} for {ch_type}.") + + chs_idx = np.sort( + [ + ds0.name_to_index[k] + for (k, v) in ds0.typed_variables.items() + if ( + not v.is_computed_forcing + and not v.is_constant_in_time + and ( + np.array([f in k for f in channels]).any() if channels is not None else True + ) + and not np.array([f in k for f in channels_exclude]).any() + ) + ] + ) + + return np.array(chs_idx, dtype=np.int64) + + +def _clip_lat(lats: NDArray) -> NDArray[np.float32]: + """ + Clip latitudes to the range [-90, 90] and ensure periodicity. + """ + return (2 * np.clip(lats, -90.0, 90.0) - lats).astype(np.float32) + + +def _clip_lon(lons: NDArray) -> NDArray[np.float32]: + """ + Clip longitudes to the range [-180, 180] and ensure periodicity. + """ + return ((lons + 180.0) % 360.0 - 180.0).astype(np.float32) diff --git a/src/weathergen/datasets/data_reader_base.py b/src/weathergen/datasets/data_reader_base.py new file mode 100644 index 000000000..2b1bc9509 --- /dev/null +++ b/src/weathergen/datasets/data_reader_base.py @@ -0,0 +1,761 @@ +# (C) Copyright 2025 WeatherGenerator contributors. +# +# This software is licensed under the terms of the Apache Licence Version 2.0 +# which can be obtained at http://www.apache.org/licenses/LICENSE-2.0. +# +# In applying this licence, ECMWF does not waive the privileges and immunities +# granted to it by virtue of its status as an intergovernmental organisation +# nor does it submit to any jurisdiction. + +import datetime +import logging +from abc import abstractmethod +from dataclasses import dataclass + +import numpy as np +import pandas as pd +from numpy import datetime64, timedelta64 +from numpy.typing import NDArray + +from weathergen.utils.better_abc import ABCMeta, abstract_attribute + +_logger = logging.getLogger(__name__) + +# The numpy date time 64 time (nanosecond precision) +type NPDT64 = datetime64 +# The numpy delta time 64 time (nanosecond precision) +type NPTDel64 = timedelta64 + +type DType = np.float32 # The type for the data in the datasets. + +""" +The type for indexing into datasets. It is a multiple of hours. +""" +type TIndex = np.int64 + + +_DT_ZERO = np.datetime64("1850-01-01T00:00") + + +@dataclass +class TimeIndexRange: + """ + Defines a time window for indexing into datasets. + + It is defined as number of hours since the start of the dataset. + """ + + start: TIndex + end: TIndex + + +@dataclass +class DTRange: + """ + Defines a time window for indexing into datasets. + + It is defined as numpy datetime64 objects. + """ + + start: NPDT64 + end: NPDT64 + + def __post_init__(self): + assert self.start < self.end, "start time must be before end time" + assert self.start > _DT_ZERO, "start time must be after 1850-01-01T00:00" + + +def str_to_datetime64(s: str | int | NPDT64) -> NPDT64: + """ + Convert a string to a numpy datetime64 object. + """ + if isinstance(s, datetime64): + return s + s_str = str(s) + + supported_formats = [ + "%Y%m%d%H%M%S", + "%Y-%m-%d %H:%M:%S", + "%Y-%m-%d %H:%M", + "%Y-%m-%dT%H:%M:%S", + "%Y-%m-%dT%H:%M", + ] + + for fmt in supported_formats: + try: + dt_obj = datetime.datetime.strptime(s_str, fmt) + return np.datetime64(dt_obj) + except ValueError: + pass + + raise ValueError(f"Unable to parse the date string '{s}'. Original string might be invalid.") + + +def str_to_timedelta(s: str | datetime.timedelta) -> pd.Timedelta: + """ + Convert a string or datetime.timedelta object to a pd.Timedelta object. + The string format is expected to be "HH:MM:SS". + Hours are not limited to two digits. Minutes and seconds must be in the range 0-59. + """ + + if not isinstance(s, str) and not isinstance(s, datetime.timedelta): + raise TypeError("Input must be a string or a datetime.timedelta object") + if isinstance(s, datetime.timedelta): + # If input is a timedelta object, convert it directly to pd.Timedelta + return pd.Timedelta(s) + if isinstance(s, str): + # ensure that the string is in "HH:MM:SS" format + parts = s.split(":") + if not len(parts) == 3: + raise ValueError("String must be in 'HH:MM:SS' format") + if not all(part.isdigit() for part in parts): + raise ValueError("String must be in 'HH:MM:SS' format") + # ensure that minutes and seconds do not exceed 59 + if int(parts[1]) > 59 or int(parts[2]) > 59: + raise ValueError("Minutes and seconds must be in the range 0-59") + return pd.to_timedelta(s) + + +class TimeWindowHandler: + """ + Handler for time windows and translation of indices to times + """ + + def __init__( + self, + t_start: str | int | NPDT64, + t_end: str | int | NPDT64, + t_window_len_hours: int, + t_window_step_hours: int, + ): + """ + Parameters + ---------- + start : + start time + end : + end time + t_window_len : + length of data window + t_window_step : + delta hours between start times of windows + + """ + self.t_start: NPDT64 = str_to_datetime64(t_start) + self.t_end: NPDT64 = str_to_datetime64(t_end) + self.t_window_len: NPTDel64 = np.timedelta64(t_window_len_hours, "h") + self.t_window_step: NPTDel64 = np.timedelta64(t_window_step_hours, "h") + + assert self.t_start < self.t_end, "end datetime has to be in the past of start datetime" + assert self.t_start > _DT_ZERO, "start datetime has to be >= 1850-01-01T00:00." + + def get_index_range(self) -> TimeIndexRange: + """ + Temporal window corresponding to index + + Parameters + ---------- + idx : + index of temporal window + + Returns + ------- + start and end of temporal window + """ + + idx_start: TIndex = np.int64(0) + idx_end = np.int64((self.t_end - self.t_start) // self.t_window_step) + assert idx_start <= idx_end, f"time window idxs invalid: {idx_start} <= {idx_end}" + + return TimeIndexRange(idx_start, idx_end) + + def window(self, idx: TIndex) -> DTRange: + """ + Temporal window corresponding to index + + Parameters + ---------- + idx : + index of temporal window + + Returns + ------- + start and end of temporal window + """ + + t_start_win = self.t_start + self.t_window_step * idx + t_end_win = t_start_win + self.t_window_len + + return DTRange(t_start_win, t_end_win) + + +@dataclass +class ReaderData: + """ + Wrapper for return values from DataReader.get_source and DataReader.get_target + """ + + coords: NDArray[DType] + geoinfos: NDArray[DType] + data: NDArray[DType] + datetimes: NDArray[NPDT64] + + @staticmethod + def empty(num_data_fields: int, num_geo_fields: int) -> "ReaderData": + """ + Create an empty ReaderData object + + Returns + ------- + ReaderData + Empty ReaderData object + """ + return ReaderData( + coords=np.zeros((0, 2), dtype=np.float32), + geoinfos=np.zeros((0, num_geo_fields), dtype=np.float32), + data=np.zeros((0, num_data_fields), dtype=np.float32), + datetimes=np.zeros((0,), dtype=np.datetime64), + ) + + def is_empty(self): + return self.len() == 0 + + def len(self): + """ + Length of data + + Returns + ------- + length of data + """ + return len(self.data) + + def remove_nan_coords(self) -> "ReaderData": + """ + Remove all data points where coords are NaN + + Returns + ------- + self + """ + idx_valid = ~np.isnan(self.coords) + # filter should be if any (of the two) coords is NaN + idx_valid = np.logical_and(idx_valid[:, 0], idx_valid[:, 1]) + + # apply + return ReaderData( + self.coords[idx_valid], + self.geoinfos[idx_valid], + self.data[idx_valid], + self.datetimes[idx_valid], + ) + + +def check_reader_data(rdata: ReaderData, dtr: DTRange) -> None: + """ + Check that ReaderData is valid + + Parameters + ---------- + rdata : + ReaderData to check + dtr : + datetime range of window for which the rdata is valid + + Returns + ------- + None + """ + + assert rdata.coords.ndim == 2, f"coords must be 2D {rdata.coords.shape}" + assert rdata.coords.shape[1] == 2, ( + f"coords must have 2 columns (lat, lon), got {rdata.coords.shape}" + ) + assert rdata.geoinfos.ndim == 2, f"geoinfos must be 2D, got {rdata.geoinfos.shape}" + assert rdata.data.ndim == 2, f"data must be 2D {rdata.data.shape}" + assert rdata.datetimes.ndim == 1, f"datetimes must be 1D {rdata.datetimes.shape}" + + assert rdata.coords.shape[0] == rdata.data.shape[0], "coords and data must have same length" + assert rdata.geoinfos.shape[0] == rdata.data.shape[0], "geoinfos and data must have same length" + + # Check that all fields have the same length + assert ( + rdata.coords.shape[0] + == rdata.geoinfos.shape[0] + == rdata.data.shape[0] + == rdata.datetimes.shape[0] + ), ( + f"coords, geoinfos, data and datetimes must have the same length " + f"{rdata.coords.shape[0]}, {rdata.geoinfos.shape[0]}, {rdata.data.shape[0]}, " + f"{rdata.datetimes.shape[0]}" + ) + + assert np.logical_and(rdata.datetimes >= dtr.start, rdata.datetimes < dtr.end).all(), ( + f"datetimes for data points violate window {dtr}." + ) + + +class DataReaderBase(metaclass=ABCMeta): + """ + Base class for data readers. + + Coordinates must be provided in standard geographical format: + latitude in degrees from -90 (South) to +90 (North), + and longitude in degrees from -180 (West) to +180 (East). + """ + + # The fields that need to be set by the child classes + source_channels: list[str] = abstract_attribute() + target_channels: list[str] = abstract_attribute() + geoinfo_channels: list[str] = abstract_attribute() + source_idx: list[int] = abstract_attribute() + target_idx: list[int] = abstract_attribute() + geoinfo_idx: list[int] = abstract_attribute() + target_channel_weights: list[float] = abstract_attribute() + + def __init__( + self, + tw_handler: TimeWindowHandler, + stream_info: dict, + ) -> None: + """ + Parameters + ---------- + tw_handler : + time window handler + stream_info : + information about stream + + Returns + ------- + None + """ + + self.time_window_handler = tw_handler + self.stream_info = stream_info + self.target_channel_weights = None + + def init_empty(self) -> None: + """ + Initialize + """ + + self.source_channels = [] + self.target_channels = [] + self.geoinfo_channels = [] + self.source_idx = [] + self.target_idx = [] + self.geoinfo_idx = [] + self.target_channel_weights = [] + + self.mean = np.zeros(0) + self.stdev = np.ones(0) + self.mean_geoinfo = np.zeros(0) + self.stdev_geoinfo = np.ones(0) + + @abstractmethod + def length(self) -> int: + """The length of this dataset. Must be constant.""" + pass + + def __len__(self) -> int: + """ + Length of dataset + + Parameters + ---------- + None + + Returns + ------- + length of dataset + """ + + return self.length() + + def get_source(self, idx: TIndex) -> ReaderData: + """ + Get source data for idx + + Parameters + ---------- + idx : int + Index of temporal window + + Returns + ------- + source data (coords, geoinfos, data, datetimes) + """ + + rdata = self._get(idx, self.source_idx) + + return rdata + + def get_target(self, idx: TIndex) -> ReaderData: + """ + Get target data for idx + + Parameters + ---------- + idx : int + Index of temporal window + + Returns + ------- + target data (coords, geoinfos, data, datetimes) + """ + + rdata = self._get(idx, self.target_idx) + + return rdata + + @abstractmethod + def _get(self, idx: TIndex, channels_idx: list[int]) -> ReaderData: + """ + Get data for window + + Parameters + ---------- + idx : int + Index of temporal window + channels_idx : np.array + Selection of channels + + Returns + ------- + data (coords, geoinfos, data, datetimes) + """ + + raise NotImplementedError() + + def get_source_num_channels(self) -> int: + """ + Get number of source channels + + Parameters + ---------- + None + + Returns + ------- + number of source channels + """ + return len(self.source_idx) + + def get_target_num_channels(self) -> int: + """ + Get number of target channels + + Parameters + ---------- + None + + Returns + ------- + number of target channels + """ + return len(self.target_idx) + + def get_coords_size(self) -> int: + """ + Get size of coords + + Parameters + ---------- + None + + Returns + ------- + size of coords + """ + return 2 + + def get_geoinfo_size(self) -> int: + """ + Get size of geoinfos + + Parameters + ---------- + None + + Returns + ------- + size of geoinfos + """ + return len(self.geoinfo_idx) + + def parse_target_channel_weights( + self, + ) -> list[float] | None: + target_channel_weights = [ + self.stream_info["channel_weights"].get(ch, 1.0) + if self.stream_info.get("channel_weights", None) + else 1.0 + for ch in self.target_channels + ] + + if self.stream_info.get("channel_weights", None) is not None: + # Check whether all given channel_weights could be matched to a channel. + ch_unmatched = [ + ch for ch in self.stream_info["channel_weights"] if ch not in self.target_channels + ] + if len(ch_unmatched) > 0: + _logger.info( + f"Unmatched channel_weights in {self.stream_info.name}: {ch_unmatched}" + ) + + return target_channel_weights + + def normalize_coords(self, coords: NDArray[DType]) -> NDArray[DType]: + """ + Normalize coordinates + + Parameters + ---------- + coords : + coordinates to be normalized + + Returns + ------- + Normalized coordinates + """ + coords[..., 0] = np.sin(np.deg2rad(coords[..., 0])) + coords[..., 1] = np.sin(0.5 * np.deg2rad(coords[..., 1])) + + return coords + + def normalize_geoinfos(self, geoinfos: NDArray[DType]) -> NDArray[DType]: + """ + Normalize geoinfos + + Parameters + ---------- + geoinfos : + geoinfos to be normalized + + Returns + ------- + Normalized geoinfo + """ + + assert geoinfos.shape[-1] == len(self.geoinfo_idx), "incorrect number of geoinfo channels" + for i, _ in enumerate(self.geoinfo_idx): + geoinfos[..., i] = (geoinfos[..., i] - self.mean_geoinfo[i]) / self.stdev_geoinfo[i] + + return geoinfos + + def normalize_source_channels(self, source: NDArray[DType]) -> NDArray[DType]: + """ + Normalize source channels + + Parameters + ---------- + data : + data to be normalized + + Returns + ------- + Normalized data + """ + assert source.shape[-1] == len(self.source_idx), "incorrect number of source channels" + for i, ch in enumerate(self.source_idx): + source[..., i] = (source[..., i] - self.mean[ch]) / self.stdev[ch] + + return source + + def normalize_target_channels(self, target: NDArray[DType]) -> NDArray[DType]: + """ + Normalize target channels + + Parameters + ---------- + data : + data to be normalized + + Returns + ------- + Normalized data + """ + assert target.shape[-1] == len(self.target_idx), "incorrect number of target channels" + for i, ch in enumerate(self.target_idx): + target[..., i] = (target[..., i] - self.mean[ch]) / self.stdev[ch] + + return target + + def denormalize_source_channels(self, source: NDArray[DType]) -> NDArray[DType]: + """ + Denormalize source channels + + Parameters + ---------- + data : + data to be denormalized + + Returns + ------- + Denormalized data + """ + assert source.shape[-1] == len(self.source_idx), "incorrect number of source channels" + for i, ch in enumerate(self.source_idx): + source[..., i] = (source[..., i] * self.stdev[ch]) + self.mean[ch] + + return source + + def denormalize_target_channels(self, data: NDArray[DType]) -> NDArray[DType]: + """ + Denormalize target channels + + Parameters + ---------- + data : + data to be denormalized (target or pred) + + Returns + ------- + Denormalized data + """ + assert data.shape[-1] == len(self.target_idx), "incorrect number of target channels" + for i, ch in enumerate(self.target_idx): + data[..., i] = (data[..., i] * self.stdev[ch]) + self.mean[ch] + + return data + + +class DataReaderTimestep(DataReaderBase): + """ + An abstract class for data readers that provide data at fixed time intervals. + + On top of all the fields to be defined in DataReaderBase, they must define the following fields: + + """ + + # The start time of the dataset. + data_start_time: NPDT64 + # The end time of the dataset (possibly none). + data_end_time: NPDT64 | None = None + # The period of the dataset, i.e. the time interval between two consecutive samples. + # It is also called 'frequency' in Anemoi. + period: NPTDel64 + + def __init__( + self, + tw_handler: TimeWindowHandler, + stream_info: dict, + data_start_time: NPDT64 | None = None, + data_end_time: NPDT64 | None = None, + period: NPTDel64 | None = None, + ) -> None: + """ + Parameters + ---------- + tw_handler : + time window handler + stream_info : + information about stream + data_start_time : + start time of dataset + end_start_time : + end time of dataset + period : + period / frequency of dataset + + Returns + ------- + None + """ + + super().__init__(tw_handler, stream_info) + self.data_start_time = data_start_time or tw_handler.t_start + self.data_end_time = data_end_time + self.period = period + + def _get_dataset_idxs(self, idx: TIndex) -> tuple[NDArray[np.int64], DTRange]: + """ + Get dataset indexes for a given time window index. + + Parameters + ---------- + idx : TIndex + Index of the time window. + + Returns + ------- + NDArray[np.int64] + Array of dataset indexes corresponding to the time window. + """ + return get_dataset_indexes_timestep( + self.data_start_time, + self.data_end_time, + self.period, + idx, + self.time_window_handler, + ) + + +# to avoid rounding issues +# The basic time precision is 1 millisecond. +# This should support all datasets (the small period expected is 1 second) +t_epsilon = np.timedelta64(1, "ms") + + +def get_dataset_indexes_timestep( + data_start_time: NPDT64, + data_end_time: NPDT64 | None, + period: NPTDel64, + idx: TIndex, + tw_handler: TimeWindowHandler, +) -> tuple[NDArray[np.int64], DTRange]: + """ + Get dataset indexes for a given time window index, when the dataset is periodic. + + Keeping this function separate for testing purposes. + + Parameters + ---------- + data_start_time : NPDT64 + Start time of the dataset. + data_end_time : NPDT64 + End time of the dataset (possibly none). + period : NPTDel64 + idx : TIndex + Index of the time window. + tw_handler : TimeWindowHandler + Handler for time windows. + + Returns + ------- + NDArray[np.int64] + Array of dataset indexes corresponding to the time window. + """ + + # Function is separated from the class to allow testing without instantiating the class. + dtr = tw_handler.window(idx) + # If there is no or only marginal overlap with the dataset, return empty index ranges + if ( + not data_start_time + or not data_end_time + or dtr.end < data_start_time + or dtr.start > data_end_time + or dtr.start < data_start_time + or dtr.end > data_end_time + or (data_end_time is not None and dtr.start > data_end_time) + ): + return (np.array([], dtype=np.int64), dtr) + + # relative time in dataset + delta_t_start = dtr.start - data_start_time + delta_t_end = dtr.end - data_start_time - t_epsilon + assert isinstance(delta_t_start, timedelta64), "delta_t_start must be timedelta64" + start_didx = delta_t_start // period + end_didx = delta_t_end // period + + # adjust start_idx if not exactly on start time + if (delta_t_start % period) > np.timedelta64(0, "s"): + # empty window in between two timesteps + if start_didx == end_didx: + return (np.array([], dtype=np.int64), dtr) + start_didx += 1 + + end_didx = start_didx + int((dtr.end - dtr.start - t_epsilon) / period) + + return (np.arange(start_didx, end_didx + 1, dtype=np.int64), dtr) diff --git a/src/weathergen/datasets/data_reader_fesom.py b/src/weathergen/datasets/data_reader_fesom.py new file mode 100644 index 000000000..16971322f --- /dev/null +++ b/src/weathergen/datasets/data_reader_fesom.py @@ -0,0 +1,648 @@ +# (C) Copyright 2025 WeatherGenerator contributors. +# +# This software is licensed under the terms of the Apache Licence Version 2.0 +# which can be obtained at http://www.apache.org/licenses/LICENSE-2.0. +# +# In applying this licence, ECMWF does not waive the privileges and immunities +# granted to it by virtue of its status as an intergovernmental organisation +# nor does it submit to any jurisdiction. + +import glob +import logging +from pathlib import Path +from typing import override + +import dask +import dask.array as da +import numpy as np +import zarr + +from weathergen.datasets.data_reader_base import ( + DataReaderTimestep, + DTRange, + NDArray, + ReaderData, + TimeWindowHandler, + TIndex, + t_epsilon, +) + +_logger = logging.getLogger(__name__) + + +class DataReaderFesom(DataReaderTimestep): + """ + A dataset class for handling temporal windows of FESOM model output data stored in Zarr format. + + This class is optimized for use with multiple dataloader workers by implementing + lazy initialization of file handles and efficient, batched data reads. + """ + + def __init__( + self, + tw_handler: TimeWindowHandler, + filename: Path, + stream_info: dict, + ) -> None: + # Store configuration but DO NOT open files here + self.filenames = sorted(glob.glob(str(filename) + "/*")) + self._tw_handler = tw_handler + self._stream_info = stream_info + self.target_files = self.filenames + + self._src_lat_conv = False + self._src_lon_conv = False + self._trg_lat_conv = False + self._trg_lon_conv = False + + if "target_file" in stream_info: + self.target_files = sorted(glob.glob(str(stream_info["target_file"]) + "/*")) + + if len(self.filenames) == 0: + self.init_empty() + self._initialized = True + return + + # Initialize data-dependent attributes to None. They will be set by _lazy_init. + self.source_time: da.Array | None = None + self.source_data: da.Array | None = None + self.target_time: da.Array | None = None + self.target_data: da.Array | None = None + self.len = 0 # Default length is 0 until initialized + self.source_channels = [] + self.source_idx = [] + self.target_channels = [] + self.target_idx = [] + self.geoinfo_channels = [] + self.geoinfo_idx = [] + self.properties = {} + self.fake_specs = {} + self.fake_target = False + + if len(self.filenames) == 0 or len(self.target_files) == 0: + name = stream_info["name"] + _logger.warning( + f"{name} couldn't find any files matching {filename}. Stream is skipped." + ) + super().__init__(tw_handler, stream_info) + self.init_empty() + # No need to return, the length is 0, so it will be skipped. + + # We call super() last, after we know if the stream is valid or not. + # We also pass dummy values, as the real ones will be set in _lazy_init. + super().__init__(self._tw_handler, self._stream_info) + + # This flag ensures initialization happens only once per worker + self._initialized = False + # print(f"checking stream info {list(stream_info.keys())}") + + def _get_mesh_size(self, group: zarr.Group) -> int: + if "n_points" in group.data.attrs: + return group.data.attrs["n_points"] + else: + return group.data.attrs["nod2"] + + def _reorder_groups(self, colnames: list[str], groups: list[zarr.Group]) -> list[da.Array]: + reordered_data_arrays: list[da.Array] = [] + + for group in groups: + local_colnames = group["data"].attrs["colnames"] + + # If the order is already correct, no need to do anything. + if local_colnames == colnames: + reordered_data_arrays.append(da.from_zarr(group["data"])) + else: + # Create the list of indices to re-shuffle the columns. + reorder_indices = [local_colnames.index(name) for name in colnames] + + # Lazily re-index the dask array. This operation is not executed immediately. + dask_array = da.from_zarr(group["data"]) + reordered_array = dask_array[:, reorder_indices] + reordered_data_arrays.append(reordered_array) + + return reordered_data_arrays + + def _remove_lonlat(self, colnames: list[str]) -> list[str]: + temp_colnames = list(colnames) + temp_colnames.remove("lat") + temp_colnames.remove("lon") + return temp_colnames + + def _lazy_init(self) -> None: + """ + Initializes the dataset object. This method is called once per worker process + to ensure dask scheduler is not shared between them. + """ + if self._initialized: + return + + _logger.info(f"Initialising {self._stream_info['name']}") + + # Each worker now opens its own file handles safely + s_groups: list[zarr.Group] = [zarr.open_group(name, mode="r") for name in self.filenames] + t_groups: list[zarr.Group] = [zarr.open_group(name, mode="r") for name in self.target_files] + + s_times: list[zarr.Array] = [group["dates"] for group in s_groups] + t_times: list[zarr.Array] = [group["dates"] for group in t_groups] + + self.source_time = da.concatenate(s_times, axis=0) + self.target_time = da.concatenate(t_times, axis=0) + + # Use the first group for metadata + self.source_mesh_size = self._get_mesh_size(s_groups[0]) + self.target_mesh_size = self._get_mesh_size(t_groups[0]) + + # Metadata reading is cheap, but let's do it with the rest of the init + self.start_source = self.source_time[0][0].compute() + self.end_source = self.source_time[-1][0].compute() + + if self.start_source > self._tw_handler.t_end or self.end_source < self._tw_handler.t_start: + name = self._stream_info["name"] + _logger.warning(f"{name} is not supported over data loader window. Stream is skipped.") + self.init_empty() + self._initialized = True + return + + self.start_target = self.target_time[0][0].compute() + self.end_target = self.target_time[-1][0].compute() + + if self.start_target > self._tw_handler.t_end or self.end_target < self._tw_handler.t_start: + name = self._stream_info["name"] + _logger.warning(f"{name} is not supported over data loader window. Stream is skipped.") + self.init_empty() + self._initialized = True + return + + self.source_period = ( + self.source_time[self.source_mesh_size][0] - self.source_time[0][0] + ).compute() + self.target_period = ( + self.target_time[self.target_mesh_size][0] - self.target_time[0][0] + ).compute() + + # Re-initialize the parent class with correct time info + super().__init__( # Initialise only for source as source-target split is not supported + self._tw_handler, + self._stream_info, + self.start_source, + self.end_source, + self.source_period, + ) + + if ( + self._tw_handler.t_start > self.start_source + and self._tw_handler.t_start > self.end_source + ): + self.source_start_idx = ( + (self._tw_handler.t_start - self.start_source) // self.source_period + 1 + ) * self.source_mesh_size + else: + self.source_start_idx = 0 + + if ( + self._tw_handler.t_start > self.start_target + and self._tw_handler.t_start > self.end_target + ): + self.target_start_idx = ( + (self._tw_handler.t_start - self.start_target) // self.target_period + 1 + ) * self.target_mesh_size + else: + self.target_start_idx = 0 + + self.source_end_idx = ( + (self._tw_handler.t_end - self.start_source) // self.source_period + 1 + ) * self.source_mesh_size + self.target_end_idx = ( + (self._tw_handler.t_end - self.start_target) // self.target_period + 1 + ) * self.target_mesh_size + + if self.source_end_idx > len(self.source_time): + self.source_end_idx = len(self.source_time) + if self.target_end_idx > len(self.target_time): + self.target_end_idx = len(self.target_time) + + self.source_len = (self.source_end_idx - self.source_start_idx) // self.source_mesh_size + self.target_len = (self.target_end_idx - self.target_start_idx) // self.target_mesh_size + self.len = min(self.source_len, self.target_len) + + # Check for a valid length after calculations + if self.len <= 0: + self.init_empty() + self._initialized = True + return + + self.source_colnames: list[str] = list(s_groups[0].data.attrs["colnames"]) + self.target_colnames: list[str] = list(t_groups[0].data.attrs["colnames"]) + + self.source_cols_idx = list(np.arange(len(self.source_colnames), dtype=int)) + self.target_cols_idx = list(np.arange(len(self.target_colnames), dtype=int)) + + self.src_lat_index: int = self.source_colnames.index("lat") + self.src_lon_index: int = self.source_colnames.index("lon") + self.trg_lat_index: int = self.target_colnames.index("lat") + self.trg_lon_index: int = self.target_colnames.index("lon") + + source_reorderd = self._reorder_groups(self.source_colnames, s_groups) + target_reorderd = self._reorder_groups(self.target_colnames, t_groups) + + # Modify a copy, not the original list while iterating + self.source_colnames = self._remove_lonlat(self.source_colnames) + self.target_colnames = self._remove_lonlat(self.target_colnames) + + self.source_cols_idx.remove(self.src_lat_index) + self.source_cols_idx.remove(self.src_lon_index) + self.source_cols_idx = np.array(self.source_cols_idx) + + self.target_cols_idx.remove(self.trg_lat_index) + self.target_cols_idx.remove(self.trg_lon_index) + self.target_cols_idx = np.array(self.target_cols_idx) + + self.properties = {"stream_id": s_groups[0].data.attrs["obs_id"]} + + self.source_mean = np.concatenate( + (np.array([0, 0]), np.array(s_groups[0].data.attrs["means"])) + ) + self.source_stdev = np.sqrt( + np.concatenate((np.array([1, 1]), np.array(s_groups[0].data.attrs["std"]))) + ) + self.source_stdev[self.source_stdev <= 1e-5] = 1.0 + + self.target_mean = np.concatenate( + (np.array([0, 0]), np.array(t_groups[0].data.attrs["means"])) + ) + self.target_stdev = np.sqrt( + np.concatenate((np.array([1, 1]), np.array(t_groups[0].data.attrs["std"]))) + ) + self.target_stdev[self.target_stdev <= 1e-5] = 1.0 + + self.source = da.concatenate(source_reorderd, axis=0) + self.target = da.concatenate(target_reorderd, axis=0) + + source_channels = self._stream_info.get("source") + source_excl = self._stream_info.get("source_exclude") + self.source_channels, self.source_idx = ( + self.select(self.source_colnames, self.source_cols_idx, source_channels, source_excl) + if source_channels or source_excl + else (self.source_colnames, self.source_cols_idx) + ) + + target_channels = self._stream_info.get("target") + target_excl = self._stream_info.get("target_exclude") + self.target_channels, self.target_idx = ( + self.select(self.target_colnames, self.target_cols_idx, target_channels, target_excl) + if target_channels or target_excl + else (self.target_colnames, self.target_cols_idx) + ) + + src_timestep_lats = self.source[: self.source_mesh_size, self.src_lat_index].compute() + trg_timestep_lats = self.target[: self.target_mesh_size, self.trg_lat_index].compute() + + if np.any(src_timestep_lats > 90.0): + _logger.warning( + f"Latitude for stream '{self._stream_info['name']}' " + f"source appears to be in a [0, 180] format. " + f"It will be automatically converted to the required [-90, 90] format." + ) + self._src_lat_conv = True + + if np.any(trg_timestep_lats > 90.0): + _logger.warning( + f"Latitude for stream '{self._stream_info['name']}' " + f"target appears to be in a [0, 180] format. " + f"It will be automatically converted to the required [-90, 90] format." + ) + self._trg_lat_conv = True + + src_timestep_lons = self.source[: self.source_mesh_size, self.src_lon_index].compute() + trg_timestep_lons = self.target[: self.target_mesh_size, self.trg_lon_index].compute() + + if np.any(src_timestep_lons > 180.0): + _logger.warning( + f"Longitude for stream '{self._stream_info['name']}' " + f"source appears to be in a [0, 360] format. " + f"It will be automatically converted to the required [-180, 180] format." + ) + self._src_lon_conv = True + + if np.any(trg_timestep_lons > 180.0): + _logger.warning( + f"Longitude for stream '{self._stream_info['name']}' " + f"target appears to be in a [0, 360] format." + f"It will be automatically converted to the required [-180, 180] format." + ) + self._trg_lon_conv = True + + self.geoinfo_channels = [] + self.geoinfo_idx = [] + + self._initialized = True + + def select( + self, + colnames: list[str], + cols_idx: NDArray, + ch_filters: list[str] | None, + excl: list[str] | None = None, + ) -> tuple[list[str], NDArray]: + if excl and ch_filters: + mask = [ + any(f == c for f in ch_filters) and all(ex not in c for ex in excl) + for c in colnames + ] + elif ch_filters: + mask = [any(f == c for f in ch_filters) for c in colnames] + elif excl: + mask = [all(ex not in c for ex in excl) for c in colnames] + else: + assert False, "Cannot use select with both ch_filters and excl as None" + + selected_cols_idx = cols_idx[np.where(mask)[0]] + selected_colnames = [colnames[i] for i in np.where(mask)[0]] + return selected_colnames, selected_cols_idx + + @override + def init_empty(self) -> None: + super().init_empty() + self.len = 0 + + @override + def length(self) -> int: + # Make sure initialization has happened before returning length + self._lazy_init() + return self.len + + def _get_source_idxs(self, idx: TIndex) -> tuple[NDArray, DTRange]: + """ + Get source dataset indexes for a given time window index, when the dataset is periodic. + + This function assumes state of a variable is persistent, thus if no data is found + in the time window, last measurement is used before the beggining of the windows is used. + + Parameters + ---------- + idx : TIndex + Index of the time window. + + Returns + ------- + NDArray[np.int64] + Array of dataset indexes corresponding to the time window. + """ + tw_handler = self.time_window_handler + + # Function is separated from the class to allow testing without instantiating the class. + dtr = tw_handler.window(idx) + # If there is no or only marginal overlap with the dataset, return empty index ranges + if ( + not self.start_source + or not self.end_source + or dtr.end < self.start_source + or dtr.start > self.end_source + or dtr.start < self.start_source + or dtr.end > self.end_source + or (self.end_source is not None and dtr.start > self.end_source) + ): + return (np.array([], dtype=np.int64), dtr) + + # relative time in dataset + delta_t_start = dtr.start - self.start_source + delta_t_end = dtr.end - self.start_source - t_epsilon + assert isinstance(delta_t_start, np.timedelta64), "delta_t_start must be timedelta64" + start_didx = delta_t_start // self.source_period + end_didx = delta_t_end // self.source_period + + # adjust start_idx if not exactly on start time + if (delta_t_start % self.source_period) > np.timedelta64(0, "s"): + # empty window in between two timesteps + if start_didx == end_didx: + return (np.array([start_didx], dtype=np.int64), dtr) + start_didx += 1 + + end_didx = start_didx + int((dtr.end - dtr.start - t_epsilon) / self.source_period) + return (np.arange(start_didx, end_didx + 1, dtype=np.int64), dtr) + + def _get_target_idxs(self, idx: TIndex) -> tuple[NDArray, DTRange]: + """ + Get target dataset indexes for a given time window index, when the dataset is periodic. + + This function assumes state of a variable is persistent, thus if no data is found + in the time window, last measurement is used before the beggining of the windows is used. + + Parameters + ---------- + idx : TIndex + Index of the time window. + + Returns + ------- + NDArray[np.int64] + Array of dataset indexes corresponding to the time window. + """ + tw_handler = self.time_window_handler + + # Function is separated from the class to allow testing without instantiating the class. + dtr = tw_handler.window(idx) + # If there is no or only marginal overlap with the dataset, return empty index ranges + if ( + not self.start_target + or not self.end_target + or dtr.end < self.start_target + or dtr.start > self.end_target + or dtr.start < self.start_target + or dtr.end > self.end_target + or (self.end_target is not None and dtr.start > self.end_target) + ): + return (np.array([], dtype=np.int64), dtr) + + # relative time in dataset + delta_t_start = dtr.start - self.start_target + delta_t_end = dtr.end - self.start_target - t_epsilon + assert isinstance(delta_t_start, np.timedelta64), "delta_t_start must be timedelta64" + start_didx = delta_t_start // self.target_period + end_didx = delta_t_end // self.target_period + + # adjust start_idx if not exactly on start time + if (delta_t_start % self.target_period) > np.timedelta64(0, "s"): + # empty window in between two timesteps + if start_didx == end_didx: + return (np.array([start_didx], dtype=np.int64), dtr) + start_didx += 1 + + end_didx = start_didx + int((dtr.end - dtr.start - t_epsilon) / self.target_period) + return (np.arange(start_didx, end_didx + 1, dtype=np.int64), dtr) + + @override + def get_source(self, idx: TIndex) -> ReaderData: + self._lazy_init() + (t_idxs, dtr) = self._get_source_idxs(idx) + if self.len == 0 or len(t_idxs) == 0: + return ReaderData.empty( + num_data_fields=len(self.source_idx), num_geo_fields=len(self.geoinfo_idx) + ) + + start_row = t_idxs[0] * self.source_mesh_size + end_row = (t_idxs[-1] + 1) * self.source_mesh_size + + # Note: we read all columns from start_row to end_row once, + # then select the ones we need. This is more efficient for Zarr. + full_data_slice = self.source[start_row:end_row] + datetimes_lazy = self.source_time[start_row:end_row] + + # Define the specific slices we need from the larger block + data_lazy = full_data_slice[:, self.source_idx] + lat_lazy = full_data_slice[:, self.src_lat_index] + lon_lazy = full_data_slice[:, self.src_lon_index] + + # Dask optimizes this to a single (or few) efficient read operation(s). + data, lat, lon, datetimes = dask.compute( + data_lazy, lat_lazy, lon_lazy, datetimes_lazy, scheduler="single-threaded" + ) + + if self._src_lat_conv: + lat = 90.0 - lat + + if self._src_lon_conv: + lon = ((lon + 180.0) % 360.0) - 180.0 + + coords = np.stack([lat, lon], axis=1) + geoinfos = np.zeros((data.shape[0], 0), dtype=data.dtype) + datetimes = np.squeeze(datetimes) + + rd = ReaderData( + coords=coords, + geoinfos=geoinfos, + data=data, + datetimes=datetimes, + ) + + return rd + + @override + def get_target(self, idx: TIndex) -> ReaderData: + self._lazy_init() + (t_idxs, dtr) = self._get_target_idxs(idx) + if self.len == 0 or len(t_idxs) == 0: + return ReaderData.empty( + num_data_fields=len(self.source_idx), num_geo_fields=len(self.geoinfo_idx) + ) + + start_row = t_idxs[0] * self.target_mesh_size + end_row = (t_idxs[-1] + 1) * self.target_mesh_size + + # Note: we read all columns from start_row to end_row once, + # then select the ones we need. This is more efficient for Zarr. + full_data_slice = self.target[start_row:end_row] + datetimes_lazy = self.target_time[start_row:end_row] + + # Define the specific slices we need from the larger block + data_lazy = full_data_slice[:, self.target_idx] + lat_lazy = full_data_slice[:, self.trg_lat_index] + lon_lazy = full_data_slice[:, self.trg_lon_index] + + # Dask optimizes this to a single (or few) efficient read operation(s). + data, lat, lon, datetimes = dask.compute( + data_lazy, lat_lazy, lon_lazy, datetimes_lazy, scheduler="single-threaded" + ) + + if self._trg_lat_conv: + lat = 90.0 - lat + + if self._trg_lon_conv: + lon = ((lon + 180.0) % 360.0) - 180.0 + + coords = np.stack([lat, lon], axis=1) + geoinfos = np.zeros((data.shape[0], 0), dtype=data.dtype) + datetimes = np.squeeze(datetimes) + + rd = ReaderData( + coords=coords, + geoinfos=geoinfos, + data=data, + datetimes=datetimes, + ) + + return rd + + @override + def _get(self, idx: TIndex, channels_idx: list[int]) -> ReaderData: + return self.get_source(idx) + + @override + def normalize_source_channels(self, source: NDArray) -> NDArray: + """ + Normalize source channels + + Parameters + ---------- + data : + data to be normalized + + Returns + ------- + Normalized data + """ + assert source.shape[-1] == len(self.source_idx), "incorrect number of source channels" + for i, ch in enumerate(self.source_idx): + source[..., i] = (source[..., i] - self.source_mean[ch]) / self.source_stdev[ch] + + return source + + @override + def normalize_target_channels(self, target: NDArray) -> NDArray: + """ + Normalize target channels + + Parameters + ---------- + data : + data to be normalized + + Returns + ------- + Normalized data + """ + assert target.shape[-1] == len(self.target_idx), "incorrect number of target channels" + for i, ch in enumerate(self.target_idx): + target[..., i] = (target[..., i] - self.target_mean[ch]) / self.target_stdev[ch] + + return target + + @override + def denormalize_source_channels(self, source: NDArray) -> NDArray: + """ + Denormalize source channels + + Parameters + ---------- + data : + data to be denormalized + + Returns + ------- + Denormalized data + """ + assert source.shape[-1] == len(self.source_idx), "incorrect number of source channels" + for i, ch in enumerate(self.source_idx): + source[..., i] = (source[..., i] * self.source_stdev[ch]) + self.source_mean[ch] + + return source + + @override + def denormalize_target_channels(self, data: NDArray) -> NDArray: + """ + Denormalize target channels + + Parameters + ---------- + data : + data to be denormalized (target or pred) + + Returns + ------- + Denormalized data + """ + assert data.shape[-1] == len(self.target_idx), "incorrect number of target channels" + for i, ch in enumerate(self.target_idx): + data[..., i] = (data[..., i] * self.target_stdev[ch]) + self.target_mean[ch] + + return data diff --git a/src/weathergen/datasets/data_reader_obs.py b/src/weathergen/datasets/data_reader_obs.py new file mode 100644 index 000000000..5fb2b7147 --- /dev/null +++ b/src/weathergen/datasets/data_reader_obs.py @@ -0,0 +1,257 @@ +# (C) Copyright 2025 WeatherGenerator contributors. +# +# This software is licensed under the terms of the Apache Licence Version 2.0 +# which can be obtained at http://www.apache.org/licenses/LICENSE-2.0. +# +# In applying this licence, ECMWF does not waive the privileges and immunities +# granted to it by virtue of its status as an intergovernmental organisation +# nor does it submit to any jurisdiction. + +import datetime +import logging +from pathlib import Path +from typing import override + +import numpy as np +import zarr + +from weathergen.datasets.data_reader_base import ( + DataReaderBase, + ReaderData, + TimeWindowHandler, + check_reader_data, +) + +_logger = logging.getLogger(__name__) + + +class DataReaderObs(DataReaderBase): + def __init__(self, tw_handler: TimeWindowHandler, filename: Path, stream_info: dict) -> None: + super().__init__(tw_handler, stream_info) + + self.filename = filename + self.z = zarr.open(filename, mode="r") + self.data = self.z["data"] + self.dt = self.z["dates"] # datetime only + self.base_datetime = stream_info.get("base_datetime", "1970-01-01T00:00:00") + format_str = "%Y-%m-%dT%H:%M:%S" + self.base_datetime = datetime.datetime.strptime(str(self.base_datetime), format_str) + # To read idx convert to a string, format e.g.: 197001010000 + base_date_str = self.base_datetime.strftime("%Y%m%d%H%M") + self.hrly_index = self.z[f"idx_{base_date_str}_1"] + self.colnames = self.data.attrs["colnames"] + + data_colnames = [col for col in self.colnames if "obsvalue" in col] + data_idx = [i for i, col in enumerate(self.colnames) if "obsvalue" in col] + + # determine source / target channels and corresponding idx using include and exclude lists + + s_chs = stream_info.get("source") + s_chs_exclude = stream_info.get("source_exclude", []) + + t_chs = stream_info.get("target") + t_chs_exclude = stream_info.get("target_exclude", []) + + # source_n_empty = len(s_chs) > 0 if s_chs is not None else True + # assert source_n_empty, "source is empty; at least one channels must be present." + # target_n_empty = len(t_chs) > 0 if t_chs is not None else True + # assert target_n_empty, "target is empty; at least one channels must be present." + + self.source_channels = self.select_channels(data_colnames, s_chs, s_chs_exclude) + self.source_idx = [self.colnames.index(c) for c in self.source_channels] + self.source_idx = np.array(self.source_idx, dtype=np.int64) + + self.target_channels = self.select_channels(data_colnames, t_chs, t_chs_exclude) + self.target_idx = [self.colnames.index(c) for c in self.target_channels] + self.target_idx = np.array(self.target_idx, dtype=np.int64) + + # determine idx for coords and geoinfos + self.coords_idx = [self.colnames.index("lat"), self.colnames.index("lon")] + self.geoinfo_idx = list(range(self.coords_idx[-1] + 1, data_idx[0])) + self.geoinfo_channels = [self.colnames[i] for i in self.geoinfo_idx] + + # load additional properties (mean, var) + self._load_properties() + self.mean = np.array(self.properties["means"]) # [data_idx] + self.stdev = np.sqrt(np.array(self.properties["vars"])) # [data_idx]) + self.mean_geoinfo = np.array(self.properties["means"])[self.geoinfo_idx] + self.stdev_geoinfo = np.sqrt(np.array(self.properties["vars"])[self.geoinfo_idx]) + + # Create index for samples + self._setup_sample_index() + + self.len = min(len(self.indices_start), len(self.indices_end)) + + @override + def length(self) -> int: + return self.len + + def select_channels( + self, colnames: list[str], cols_select: list[str] | None, cols_exclude: list[str] | None + ) -> list[str]: + """ + Allow user to specify which columns they want to access. + Get functions only returned for these specified columns. + """ + selected_colnames = [ + c + for c in colnames + if ( + np.array([c_sel in c for c_sel in cols_select]).any() + if cols_select is not None + else True and not np.array([c_nsel in c for c_nsel in cols_exclude]).any() + ) + ] + + return selected_colnames + + def first_sample_with_data(self) -> int: + """ + Returns the position of the first sample which contains data. + """ + return ( + int(np.nonzero(self.indices_end)[0][0]) + if self.indices_end[-1] != self.indices_end[0] + else None + ) + + def last_sample_with_data(self) -> int: + """ + Returns the position of the last sample which contains data. + """ + if self.indices_end[-1] == self.indices_end[0]: + last_sample = None + else: + last_sample = int( + np.where(np.diff(np.append(self.indices_end, self.indices_end[-1])) > 0)[0][-1] + 1 + ) + + return last_sample + + def _setup_sample_index(self) -> None: + """ + Dataset is divided into samples; + - each n_hours long + - sample 0 starts at start (yyyymmddhhmm) + - index array has one entry for each sample; contains the index of the first row + containing data for that sample + """ + + # TODO: generalize this + assert self.time_window_handler.t_window_len.item().total_seconds() % 3600 == 0, ( + "t_window_len has to be full hour (currently {self.time_window_handler.t_window_len})" + ) + len_hrs = int(self.time_window_handler.t_window_len.item().total_seconds()) // 3600 + assert self.time_window_handler.t_window_step.item().total_seconds() % 3600 == 0, ( + "t_window_step has to be full hour (currently {self.time_window_handler.t_window_len})" + ) + step_hrs = int(self.time_window_handler.t_window_step.item().total_seconds()) // 3600 + + self.start_dt = self.time_window_handler.t_start.item() + self.end_dt = self.time_window_handler.t_end.item() + + ## Calculate the number of hours between start of hourly base index + # and the requested sample index + diff_in_hours_start = int((self.start_dt - self.base_datetime).total_seconds() / 3600) + diff_in_hours_end = int((self.end_dt - self.base_datetime).total_seconds() / 3600) + + end_range_1 = min(diff_in_hours_end, self.hrly_index.shape[0] - 1) + self.indices_start = self.hrly_index[diff_in_hours_start:end_range_1:step_hrs] + + end_range_2 = min( + diff_in_hours_end + len_hrs, self.hrly_index.shape[0] - 1 + ) # handle beyond end of data range safely + self.indices_end = ( + self.hrly_index[diff_in_hours_start + len_hrs : end_range_2 : step_hrs] - 1 + ) + ## Handle situations where the requested dataset span + # goes beyond the hourly index stored in the zarr + if diff_in_hours_end > (self.hrly_index.shape[0] - 1): + if diff_in_hours_start > (self.hrly_index.shape[0] - 1): + n = (diff_in_hours_end - diff_in_hours_start) // step_hrs + self.indices_start = np.zeros(n, dtype=int) + self.indices_end = np.zeros(n, dtype=int) + else: + self.indices_start = np.append( + self.indices_start, + np.ones( + (diff_in_hours_end - self.hrly_index.shape[0] - 1) // step_hrs, dtype=int + ) + * self.indices_start[-1], + ) + + self.indices_end = np.append( + self.indices_end, + np.ones( + # add (len_hrs + 1) since above we also have diff_in_hours_start + len_hrs + (diff_in_hours_end - self.hrly_index.shape[0] + (len_hrs + 1)) // step_hrs, + dtype=int, + ) + * self.indices_end[-1], + ) + + # Prevent -1 in samples before we have data + self.indices_end = np.maximum(self.indices_end, 0) + + # If end (yyyymmddhhmm) is not a multiple of len_hrs + # truncate the last sample so that it doesn't go beyond the requested dataset end date + self.indices_end = np.minimum(self.indices_end, self.hrly_index[end_range_1]) + + def _load_properties(self) -> None: + self.properties = {} + + self.properties["means"] = self.data.attrs["means"] + self.properties["vars"] = self.data.attrs["vars"] + + @override + def _get(self, idx: int, channels_idx: list[int]) -> ReaderData: + """ + Get data for window + + Parameters + ---------- + idx : int + Index of temporal window + channels_idx : np.array + Selection of channels + + Returns + ------- + ReaderDatas (coords, geoinfos, data, datetimes) + """ + + if len(channels_idx) == 0: + return ReaderData.empty( + num_data_fields=len(channels_idx), num_geo_fields=len(self.geoinfo_idx) + ) + + start_row = self.indices_start[idx - 1] + end_row = self.indices_end[idx] + + coords = self.data.oindex[start_row:end_row, self.coords_idx] + geoinfos = ( + self.data.oindex[start_row:end_row, self.geoinfo_idx] + if len(self.geoinfo_idx) > 0 + else np.zeros((coords.shape[0], 0), np.float32) + ) + + data = self.data.oindex[start_row:end_row, channels_idx] + datetimes = self.dt[start_row:end_row][:, 0] + + # indices_start, indices_end above work with [t_start, t_end] and violate + # our convention [t_start, t_end) where endpoint is excluded + # compute mask to enforce it + t_win = self.time_window_handler.window(idx) + t_mask = np.logical_and(datetimes >= t_win.start, datetimes < t_win.end) + + rdata = ReaderData( + coords=coords[t_mask], + geoinfos=geoinfos[t_mask], + data=data[t_mask], + datetimes=datetimes[t_mask], + ) + + dtr = self.time_window_handler.window(idx) + check_reader_data(rdata, dtr) + + return rdata diff --git a/src/weathergen/datasets/masking.py b/src/weathergen/datasets/masking.py new file mode 100644 index 000000000..fbcf10f3a --- /dev/null +++ b/src/weathergen/datasets/masking.py @@ -0,0 +1,524 @@ +import logging + +import numpy as np +import torch + +from weathergen.common.config import Config + +_logger = logging.getLogger(__name__) + + +class Masker: + """Class to generate masks for token sequences and apply them. + This class supports different masking strategies and combinations. + + Attributes: + masking_rate (float): The base rate at which tokens are masked. + masking_strategy (str): The strategy used for masking (e.g., "random", + "block", "healpix", "channel"). + current_strategy (str): The current strategy in use, relevant + when using "combination" strategy. + "random" - random masking of tokens at the level of the data + "block" - masking out large blocks of tokens in 1D, without spatial meaning + "healpix" - masking at the level of HEALPix cells, where all child cells + of a parent cell at a specific HEALpix level are masked + if the parent is masked. + The healpix level must be configured with hl_mask. + e.g. masking_strategy_config = {"hl_mask": 1} + with hl_mask the level for masking that we want to apply + e.g. level 1 very large cells masked + "channel" - masking data channels, where channels of the data are masked + can be done per-cell (each cell has different channels masked) + or globally (all have the same channels masked). + e.g. masking_strategy_config = {"mode": "per_cell"} or + {"mode": "global"} + "causal" - masking the latest timesteps in each token, according to the masking rate. + masking_rate_sampling (bool): Whether to sample the masking rate from a distribution. + masking_strategy_config (dict): Configuration for the masking strategy, can include + additional parameters like "hl_mask", etc. + specific to the masking strategy. See above. + """ + + def __init__(self, cf: Config): + self.masking_rate = cf.masking_rate + self.masking_strategy = cf.masking_strategy + self.current_strategy = cf.masking_strategy # Current strategy in use + self.masking_rate_sampling = cf.masking_rate_sampling + # masking_strategy_config is a dictionary that can hold any additional parameters + self.healpix_level_data = cf.healpix_level + self.masking_strategy_config = cf.get("masking_strategy_config", {}) + + self.mask_value = 0.0 + self.dim_time_enc = 6 + + # number of healpix cells + self.healpix_num_cells = 12 * (4**self.healpix_level_data) + + # Initialize the mask, set to None initially, + # until it is generated in mask_source. + self.perm_sel: list[np.typing.NDArray] = None + + # Per-batch strategy tracking + self.same_strategy_per_batch = self.masking_strategy_config.get( + "same_strategy_per_batch", False + ) + self.batch_strategy_set = False + + # Check for required masking_strategy_config at construction time + if self.current_strategy == "healpix": + hl_data = self.healpix_level_data + hl_mask = self.masking_strategy_config.get("hl_mask") + assert hl_data is not None and hl_mask is not None, ( + "If HEALPix masking, hl_mask must be given in masking_strategy_config." + ) + assert hl_mask < hl_data, "hl_mask must be less than hl_data for HEALPix masking." + + if self.current_strategy == "channel": + # Ensure that masking_strategy_config contains either 'global' or 'per_cell' + assert self.masking_strategy_config.get("mode") in [ + "global", + "per_cell", + ], "masking_strategy_config must contain 'mode' key with value 'global' or 'per_cell'." + + # check all streams that source and target channels are identical + for stream in cf.streams: + # check explicit includes + source_include = stream.get("source_include", []) + target_include = stream.get("target_include", []) + assert set(source_include) == set(target_include), ( + "Source and target channels not identical. Required for masking_mode=channel" + ) + # check excludes + source_exclude = stream.get("source_exclude", []) + target_exclude = stream.get("target_exclude", []) + assert set(source_exclude) == set(target_exclude), ( + "Source and target channels not identical. Required for masking_mode=channel" + ) + + def reset_rng(self, rng) -> None: + """ + Reset rng after mini_epoch to ensure proper randomization + """ + self.rng = rng + + def set_batch_strategy(self): + """ + Set strategy for this batch. + Only relevant with combination and same_strategy_per_batch. + """ + if self.masking_strategy == "combination" and self.same_strategy_per_batch: + self.current_strategy = self.rng.choice( + self.masking_strategy_config["strategies"], + p=self.masking_strategy_config["probabilities"], + ) + self.batch_strategy_set = True + + def reset_batch_strategy(self): + """ + Reset for next batch. + """ + if self.masking_strategy == "combination" and self.same_strategy_per_batch: + self.current_strategy = None + self.batch_strategy_set = False + + def _select_strategy(self): + """ + Select the strategy to use. + """ + if self.masking_strategy == "combination": + if self.same_strategy_per_batch: + assert self.batch_strategy_set, "Must call set_batch_strategy() first" + return self.current_strategy + else: + # Sample new strategy for each stream + return self.rng.choice( + self.masking_strategy_config["strategies"], + p=self.masking_strategy_config["probabilities"], + ) + else: + # Non-combination strategy, return as is + return self.masking_strategy + + def mask_source( + self, + tokenized_data: list[torch.Tensor], + coords: torch.Tensor, + geoinfos: torch.Tensor, + source: torch.Tensor, + ) -> list[torch.Tensor]: + """ + Receives tokenized data, generates a mask, and returns the source data (unmasked) + and the permutation selection mask (perm_sel) to be used for the target. + + Args: + tokenized_data (list[torch.Tensor]): A list of tensors, where each tensor + represents the tokens for a cell. + + Returns: + list[torch.Tensor]: The unmasked tokens (model input). + """ + + token_lens = [len(t) for t in tokenized_data] + num_tokens = sum(token_lens) + + # If there are no tokens, return empty lists. + if num_tokens == 0: + return tokenized_data + + # Clean strategy selection + self.current_strategy = self._select_strategy() + + # Set the masking rate. + rate = self._get_sampling_rate() + + if rate == 0.0: + _logger.warning( + "masking_rate is 0. This will result in empty target. The sample will be skipped. " + + "If this occurs repeatedtly the masking settings likely need to be revised." + ) + + # Handle the special case where all tokens are masked + if rate == 1.0: + token_lens = [len(t) for t in tokenized_data] + self.perm_sel = [np.ones(tl, dtype=bool) for tl in token_lens] + source_data = [data[~p] for data, p in zip(tokenized_data, self.perm_sel, strict=True)] + return source_data + + # Implementation of different masking strategies. + # Generate a flat boolean mask for random, block, or healpix masking at cell level. + # Generate a 3D mask to apply to each cell for channel masking. + + if self.current_strategy == "random": + flat_mask = self.rng.uniform(0, 1, num_tokens) < rate + + elif self.current_strategy == "block": + flat_mask = np.zeros(num_tokens, dtype=bool) + block_size = int(np.round(rate * num_tokens)) + if block_size > 0 and num_tokens > 0: + start_index = self.rng.integers(0, max(1, num_tokens - block_size + 1)) + flat_mask[start_index : start_index + block_size] = True + + elif self.current_strategy == "healpix": + flat_mask = self._generate_healpix_mask(token_lens, rate) + + elif self.current_strategy == "channel": + mask = self._generate_channel_mask(tokenized_data, rate, coords, geoinfos, source) + + elif self.current_strategy == "causal": + mask = self._generate_causal_mask(tokenized_data, rate, coords, geoinfos, source) + + else: + assert False, f"Unknown masking strategy: {self.current_strategy}" + + # apply mask + + # if masking_strategy is channel, we need to handle the masking differently, + # since p is not 1D Boolean for the list of cells, but 3D to mask the channels in each cell. + if self.current_strategy == "channel": + self.perm_sel = mask + # In the source_data we will set the channels that are masked to 0.0. + source_data = [] + for data, p in zip(tokenized_data, self.perm_sel, strict=True): + if len(data) > 0: + data[p] = self.mask_value + source_data.append(data) + else: + source_data.append(data) + + elif self.current_strategy == "causal": + # Only select unmasked timesteps + self.perm_sel = mask + source_data = [] + for data, p in zip(tokenized_data, self.perm_sel, strict=True): + source_data.append(data[~p] if len(data) > 0 else data) + + else: + # Split the flat mask to match the structure of the tokenized data (list of lists) + # This will be perm_sel, as a class attribute, used to mask the target data. + split_indices = np.cumsum(token_lens)[:-1] + self.perm_sel = np.split(flat_mask, split_indices) + + # Apply the mask to get the source data (where mask is False) + source_data = [data[~p] for data, p in zip(tokenized_data, self.perm_sel, strict=True)] + + return source_data + + def mask_target( + self, + target_tokenized_data: list[list[torch.Tensor]], + coords: torch.Tensor, + geoinfos: torch.Tensor, + source: torch.Tensor, + ) -> list[torch.Tensor]: + """ + Applies the permutation selection mask to + the tokenized data to create the target data. + Handles cases where a cell has no target + tokens by returning an empty tensor of the correct shape. + + Args: + target_tokens_cells (list[list[torch.Tensor]]): List of lists of tensors for each cell. + coords (torch.Tensor): Coordinates tensor, used to determine feature dimension. + geoinfos (torch.Tensor): Geoinfos tensor, used to determine feature dimension. + source (torch.Tensor): Source tensor, used to determine feature dimension. + + Returns: + list[torch.Tensor]: The target data with masked tokens, one tensor per cell. + """ + + # check that self.perm_sel is set, and not None with an assert statement + assert self.perm_sel is not None, "Masker.perm_sel must be set before calling mask_target." + + # Pre-calculate the total feature dimension of a token to create + # correctly shaped empty tensors. + + feature_dim = self.dim_time_enc + coords.shape[-1] + geoinfos.shape[-1] + source.shape[-1] + + processed_target_tokens = [] + + # process all tokens used for embedding + for cc, pp in zip(target_tokenized_data, self.perm_sel, strict=True): + if len(cc) == 0: # Skip if there's no target data + pass + + if self.current_strategy == "channel": + # If masking strategy is channel, handle target tokens differently. + # We don't have Booleans per cell, instead per channel per cell, + # we set the unmasked channels to NaN so not in loss. + selected_tensors = [] + for c, p in zip(cc, pp, strict=True): + # slightly complicated as the first dimension of c varies with data in the cell. + # do not mask the first 8 channels, + # and set unmasked channels to nan + c[:, (self.dim_time_enc + coords.shape[-1] + geoinfos.shape[-1]) :][ + :, ~p[0, (self.dim_time_enc + coords.shape[-1] + geoinfos.shape[-1]) :] + ] = torch.nan + selected_tensors.append(c) + + elif self.current_strategy == "causal": + # select only the target times where mask is True + if len(cc) == len(pp): + selected_tensors = [c for i, c in enumerate(cc) if pp[i]] + elif len(pp) == 0: + selected_tensors = cc + else: # If length of target and mask doesn't match, create new mask + ratio = np.sum(pp) / len(pp) # Ratio of masked tokens in source + indx = max(1, int(ratio * len(cc))) # Get the same for target + selected_tensors = cc[-indx:] + + elif self.current_strategy == "healpix": + selected_tensors = ( + cc if len(pp) > 0 and pp[0] else [] + ) # All tokens inside healpix cell have the same mask + + elif self.current_strategy == "random": + # For random masking, we simply select the tensors where the mask is True. + # When there's no mask it's assumed to be False. This is done via strict=False + selected_tensors = [c for c, p in zip(cc, pp, strict=False) if p] + else: + raise NotImplementedError( + f"Masking strategy {self.current_strategy} is not supported." + ) + + # Append the selected tensors to the processed_target_tokens list. + if selected_tensors: + processed_target_tokens.append(torch.cat(selected_tensors)) + else: + processed_target_tokens.append( + torch.empty(0, feature_dim, dtype=coords.dtype, device=coords.device) + ) + + return processed_target_tokens + + def _get_sampling_rate(self): + """ + Get the sampling, if requested by sampling it itself + """ + + # if masking_rate_sampling is enabled, sample the rate from a normal distribution. + if self.masking_rate_sampling: + rate = np.clip( + np.abs(self.rng.normal(loc=self.masking_rate, scale=1.0 / (2.5 * np.pi))), + 0.01, + 0.99, + ) + else: + rate = self.masking_rate + + return rate + + def _generate_healpix_mask(self, token_lens: list[int], rate: float) -> np.typing.NDArray: + """ + Generates a token-level mask based on hierarchical HEALPix cell selection. + + This method identifies parent cells at a lower resolution (hl_mask) and + masks all the child cells (and their corresponding tokens) at the data + resolution (hl_data). + + Args: + token_lens (list[int]): A list containing the number of tokens in each cell. + rate (float): The desired masking rate, applied to the parent cells. + + Returns: + np.ndarray: A flat boolean array (the token-level mask). + """ + + # hl_mask should be provided in masking_strategy_config + hl_data = self.healpix_level_data + hl_mask = self.masking_strategy_config.get("hl_mask") + + assert len(token_lens) == self.healpix_num_cells, ( + f"Expected {self.healpix_num_cells} cells at level {hl_data}, got {len(token_lens)}." + ) + + # Calculate the number of parent cells at the mask level (hl_mask) + num_parent_cells = 12 * (4**hl_mask) + level_diff = hl_data - hl_mask + num_children_per_parent = 4**level_diff + + rate = self._get_sampling_rate() + + # Choose parent cells to mask based on the specified rate. + num_parents_to_mask = int(np.round(rate * num_parent_cells)) + + if num_parents_to_mask == 0: + return np.zeros(sum(token_lens), dtype=bool) + + # Select parent cells to mask + parent_ids_to_mask = self.rng.choice(num_parent_cells, num_parents_to_mask, replace=False) + + # For each parent ID, calculate the child indices and set them in the mask + parent_ids = np.asarray(parent_ids_to_mask) + child_offsets = np.arange(num_children_per_parent) + child_indices = (parent_ids[:, None] * num_children_per_parent + child_offsets).reshape(-1) + + # set mask list for children + cell_mask = np.zeros(self.healpix_num_cells, dtype=bool) + cell_mask[child_indices] = True + + # Make the cell-level mask flat and apply it to the token lengths. + # np.repeat repeats each element of `cell_mask` a number of times specified by `token_lens`. + flat_mask = np.repeat(cell_mask, token_lens) + + return flat_mask + + def _generate_channel_mask( + self, + tokenized_data: list[torch.Tensor], + rate: float, + coords: torch.Tensor, + geoinfos: torch.Tensor, + source: torch.Tensor, + ) -> list[np.typing.NDArray]: + """ + Generates a channel mask for each cell, handling completely empty tensors. + This method is robust against cells represented as 1D tensors of shape [0]. + + Args: + tokenized_data (list[torch.Tensor]): A list of tensors. Most will have a shape of + (dim, num_tokens, num_channels), but some may + be empty with a shape of (0,), no data in cell + rate (float): The desired masking rate for channels. + coords (torch.Tensor): The coordinates tensor. + geoinfos (torch.Tensor): The geoinfos tensor. + + Returns: + list[np.ndarray]: A list of boolean masks. Each mask corresponds to a tensor + in tokenized_data. + """ + + if not tokenized_data: + return [] + + # masking rate sampling, to be refactored as shared between methods + rate = self._get_sampling_rate() + + # isolate the number of actual data channels. 6 refers to time. + num_channels = self.dim_time_enc + coords.shape[-1] + geoinfos.shape[-1] + source.shape[-1] + assert num_channels > 0, "For channel masking, number of channels has to be nonzero." + num_fixed_channels = self.dim_time_enc + coords.shape[-1] + geoinfos.shape[-1] + num_data_channels = source.shape[-1] + mask_count = int(num_data_channels * rate) + + # cat all tokens for efficient processing, split at the end again + # masks are generated simulatneously for all cells + + tokenized_data_lens = [len(t) for t in tokenized_data] + tokenized_data_merged = torch.cat(tokenized_data) + + num_tokens = tokenized_data_merged.shape[0] + token_size = tokenized_data_merged.shape[1] + + if self.masking_strategy_config.get("mode") == "global": + # generate global mask + channel_mask = np.zeros(num_channels, dtype=bool) + m = num_fixed_channels + self.rng.choice(num_data_channels, mask_count, replace=False) + channel_mask[m] = True + + full_mask = np.zeros_like(tokenized_data_merged).astype(np.bool) + full_mask[:, :] = channel_mask + + else: # different mask per cell + # generate all False mask but with swapped token_size and num_tokens dims so that + # the masking is constant per token + channel_mask = np.zeros((token_size, num_tokens, num_channels), dtype=bool) + # apply masking + nc = (num_tokens, num_data_channels) + channel_mask[:, :, num_fixed_channels:] = self.rng.uniform(0, 1, nc) < rate + # recover correct shape, i.e. swap token_size and num_tokens + full_mask = channel_mask.transpose([1, 0, 2]) + + # split across cells again + full_mask = np.split(full_mask, np.cumsum(tokenized_data_lens[:-1])) + + return full_mask + + def _generate_causal_mask( + self, + tokenized_data: list[torch.Tensor], + rate: float, + coords: torch.Tensor, + geoinfos: torch.Tensor, + source: torch.Tensor, + ) -> list[np.typing.NDArray]: + """ + Generates a causal mask, masking the latest times + in each tokenized_data according to the masking rate. + """ + if not tokenized_data: + return [] + + rate = self._get_sampling_rate() + + # Extract all lengths at once + token_lens = np.array([len(token_data) for token_data in tokenized_data]) + + if len(token_lens) == 0: + return [] + + # Calculate start indices for masking + # astype(int) performs floor operation by truncation + num_future_to_mask = (rate * token_lens).astype(int) + start_mask_indices = np.maximum(1, token_lens - num_future_to_mask) + + # Handle edge cases + mask_valid = token_lens > 1 # Only cells with >1 timestep can be masked + start_mask_indices = np.where(mask_valid, start_mask_indices, token_lens) + + # Create masks with list comprehension + # Needed to handle variable lengths + full_mask = [ + ( + np.concatenate( + [ + np.zeros(start_idx, dtype=bool), + np.ones(max(0, token_len - start_idx), dtype=bool), + ] + ) + if token_len > 1 + else (np.zeros(1, dtype=bool) if token_len == 1 else np.array([], dtype=bool)) + ) + for token_len, start_idx in zip(token_lens, start_mask_indices, strict=False) + ] + + return full_mask diff --git a/src/weathergen/datasets/multi_stream_data_sampler.py b/src/weathergen/datasets/multi_stream_data_sampler.py index 1779609a4..e38d518da 100644 --- a/src/weathergen/datasets/multi_stream_data_sampler.py +++ b/src/weathergen/datasets/multi_stream_data_sampler.py @@ -1,4 +1,4 @@ -# (C) Copyright 2024 WeatherGenerator contributors. +# (C) Copyright 2025 WeatherGenerator contributors. # # This software is licensed under the terms of the Apache Licence Version 2.0 # which can be obtained at http://www.apache.org/licenses/LICENSE-2.0. @@ -7,602 +7,492 @@ # granted to it by virtue of its status as an intergovernmental organisation # nor does it submit to any jurisdiction. +import logging +import pathlib + import numpy as np import torch -import math -import datetime -from copy import deepcopy -import logging -import time -import code -import os -import yaml - -import pandas as pd - -from weathergen.datasets.obs_dataset import ObsDataset -from weathergen.datasets.anemoi_dataset import AnemoiDataset -from weathergen.datasets.normalizer import DataNormalizer -from weathergen.datasets.batchifyer import Batchifyer -from weathergen.datasets.utils import merge_cells - -from weathergen.utils.logger import logger - - -class MultiStreamDataSampler( torch.utils.data.IterableDataset): - - ################################################### - def __init__(self, data_path, rank, num_ranks, streams, - start_date, end_date, len_hrs, step_hrs, - batch_size, masking_mode, masking_rate, masking_rate_sampling, - shuffle = True, rng_seed = None, healpix_level = 2, - forecast_delta_hrs = 0, forecast_steps = 1, forecast_policy = None, - samples_per_epoch = None, input_window_steps = 1, - embed_local_coords = False, embed_centroids_local_coords = False, - target_coords_local = False, sampling_rate_target = 1. ): - - super( MultiStreamDataSampler, self).__init__() - - assert end_date > start_date - - self.mask_value = 0. - # obs_id, year, day of year, minute of day - self.geoinfo_offset = 6 - - self.len_hrs = len_hrs - self.step_hrs = step_hrs - - fc_policy_seq = 'sequential'==forecast_policy or 'sequential_random'==forecast_policy - assert forecast_steps >= 0 if not fc_policy_seq else True - self.forecast_delta_hrs = forecast_delta_hrs if forecast_delta_hrs > 0 else self.len_hrs - self.forecast_steps =np.array([forecast_steps] if type(forecast_steps)==int else forecast_steps) - self.forecast_policy = forecast_policy - - # end date needs to be adjusted to account for window length - format_str = '%Y%m%d%H%M%S' - end_dt = datetime.datetime.strptime(str(end_date), format_str) - end_dt = end_dt + datetime.timedelta(hours=len_hrs) - end_date_padded = end_dt.strftime( format_str) - - self.len = 100000000 - - self.obs_datasets_norm, self.obs_datasets_idxs = [], [] - for i, stream_info in enumerate(streams) : - - self.obs_datasets_norm.append( []) - self.obs_datasets_idxs.append( []) - - for fname in stream_info['filenames'] : - - ds = None - if stream_info['type']=='obs' : - - ds = ObsDataset( data_path + '/' + fname, start_date, end_date_padded, len_hrs, step_hrs, False) - - # skip pre-pended columns before lat,lon - do = 0 - while ds.colnames[do] != 'lat' : - do += 1 - - # the processing here is not natural but a workaround to various inconsistencies in the - # current datasets - data_idxs = [i for i,cn in enumerate(ds.selected_colnames[do:]) if 'obsvalue_' == cn[:9]] - mask = np.ones( len(ds.selected_colnames[do:]), dtype=np.int32).astype(bool) - mask[data_idxs] = False - mask[-1] = False if 'healpix' in ds.selected_colnames[-1] else mask[-1] - geoinfo_idx = (np.arange( len(ds.selected_colnames[do:]), dtype=np.int64)[mask]).tolist() - logger.info( '{} :: {} : {}'.format( stream_info['name'], - [ds.selected_colnames[do:][i] for i in geoinfo_idx], - [ds.selected_colnames[do:][i] for i in data_idxs])) - stats_offset = 0 - - elif stream_info['type']=='anemoi' : - - ds = AnemoiDataset( data_path + '/' + fname, start_date, end_date, len_hrs, step_hrs, False) - do = 0 - geoinfo_idx = [ 0, 1] - stats_offset = 2 - # TODO: avoid hard coding - data_idxs = list(np.arange( 2, 82+2)) - - else : - assert False, 'Unsupported stream type {}.'.format( stream_info['type']) - - fsm = self.forecast_steps[0] - self.len = min( self.len, len(ds) - (self.len_hrs * (fsm+1)) // self.step_hrs ) - - normalizer = DataNormalizer( stream_info, self.geoinfo_offset, stats_offset, - ds, geoinfo_idx, data_idxs, do) - - self.obs_datasets_norm[-1] += [ (ds, normalizer, do) ] - self.obs_datasets_idxs[-1] += [ (geoinfo_idx, data_idxs) ] - - # by construction, this is identical for all datasets - self.len_native = np.array([len(ds[0]) for dss in self.obs_datasets_norm for ds in dss]).min() - - self.len = min( self.len, self.len if not samples_per_epoch else samples_per_epoch) - # adjust len to split loading across all workers - len_chunk = ((self.len_native // num_ranks) // batch_size) * batch_size - self.len = min( self.len, len_chunk) - # ensure it is multiple of batch_size - self.len = (self.len // batch_size) * batch_size - - self.rank = rank - self.num_ranks = num_ranks - - self.streams = streams - self.shuffle = shuffle - self.input_window_steps = input_window_steps - self.embed_local_coords = embed_local_coords - self.embed_centroids_local_coords = embed_centroids_local_coords - self.target_coords_local = target_coords_local - self.sampling_rate_target = sampling_rate_target - - self.masking_mode = masking_mode - self.masking_rate = masking_rate - self.masking_rate_sampling = masking_rate_sampling - - self.batch_size = batch_size - self.rng = np.random.default_rng( rng_seed) - - self.healpix_level_source = healpix_level - self.healpix_level_target = healpix_level - self.num_healpix_cells_source = 12 * 4**self.healpix_level_source - self.num_healpix_cells_target = 12 * 4**self.healpix_level_target - - self.batchifyer = Batchifyer( healpix_level) - - self.epoch = 0 - - ################################################### - def advance( self) : - ''' - Advance epoch - ''' - self.epoch += 1 - # advance since only copies are used for actual loading with parallel loaders - self.rng.random() - - ################################################### - def get_num_chs( self) : - gs = self.geoinfo_offset - return [[len(idxs[0])+gs +len(idxs[1]) for idxs in idxs_s] for idxs_s in self.obs_datasets_idxs] - - ################################################### - def reset( self): - - fsm = self.forecast_steps[ min( self.epoch, len(self.forecast_steps)-1)] - if fsm > 0 : - logger.info( f'forecast_steps at epoch={self.epoch} : {fsm}') - - # data - if self.shuffle : - # native length of datasets, independent of epoch length that has potentially been specified - self.perms = self.rng.permutation( self.len_native-(( self.len_hrs * (fsm+1))//self.step_hrs)) - # self.perms = self.perms[:len(self)] - else : - self.perms = np.arange( self.len_native) - # logging.getLogger('obslearn').info( f'perms : {self.perms[:10]}') - - # forecast time steps - len_dt_samples = len(self) // self.batch_size - if self.forecast_policy is None : - self.perms_forecast_dt = np.zeros( len_dt_samples, dtype=np.int64) - elif self.forecast_policy == 'fixed' or self.forecast_policy == 'sequential' : - self.perms_forecast_dt = fsm * np.ones( len_dt_samples, dtype=np.int64) - elif self.forecast_policy == 'random' or self.forecast_policy == 'sequential_random' : - # randint high=one-past - self.perms_forecast_dt = np.random.randint( low=self.forecast_steps.min(), high=fsm+1, - size=len_dt_samples, dtype=np.int64) - else : - assert False - - ################################################### - def denormalize_data( self, obs_id, data, with_offset=True) : - return self.obs_datasets_norm[obs_id][0][1].denormalize_data( data, with_offset) - - ################################################### - def denormalize_coords( self, obs_id, coords) : - return self.obs_datasets_norm[obs_id][0][1].denormalize_coords( coords) - - ################################################### - def get_geoinfo_size( self, obs_id, i_source ) : - return len(self.obs_datasets_idxs[obs_id][i_source][0]) + self.geoinfo_offset - - ################################################### - def get_geoinfo_sizes( self) : - return [self.get_geoinfo_size(i,0) for i,_ in enumerate(self.obs_datasets_idxs)] - - ################################################### - def create_grid( self, grid, time_win2): - - # load specified grid - source2 = np.float32(np.load( f'./assets/{grid}')) - - # generate times - start, end = time_win2 - delta = np.timedelta64(1, 'h') - dates = np.arange(start.astype('datetime64[h]')+delta, end.astype( 'datetime64[h]')+delta,delta) - - # convert to string - dates = [str(d.astype('datetime64[ns]')) for d in dates] - # TODO: avoid hard coding 40320 - times2 = np.repeat( dates, 40320) - - return (source2, times2) - - ################################################### - def read_anemoi( self, grid_info, times2, source2): - - from anemoi.datasets import open_dataset - from earthkit.meteo import thermo - - with open( f'./assets/{grid_info}') as file: - grid_info = yaml.safe_load( file) - - start = times2[0][:10] + ' ' + times2[0][11:19] - end = times2[-1][:10] + ' ' + times2[-1][11:19] - - # open anemoi - # TODO: avoid hard coding path - path = '/gpfs/scratch/ehpc01/dop/era5/aifs-ea-an-oper-0001-mars-o96-1979-2022-1h-v4.zarr' - ds_anemoi = open_dataset( path, start = start, end = end, select = grid_info['colnames'], - reorder = grid_info['colnames']) - - # reshape to fit grid - ds_anemoi = ( np.array(ds_anemoi).transpose(0, 3, 2, 1)).reshape( -1,len(grid_info['colnames'])) - - # perform transformation if specified - if 'transformation' in grid_info.keys(): - for transformation in grid_info['transformation'] : - exec(transformation) - - # inject era data into grid - source2[:,grid_info['indice_start']:] = ds_anemoi - - return source2 - - ################################################### - def __iter__(self): - - iter_start, iter_end = self.worker_workset() - - # create new shuffeling - self.reset() - - nhc_target = self.num_healpix_cells_target - nhc_source = self.num_healpix_cells_source - - # bidx is used to count the #batches that have been emitted - # idx_raw is used to index into the dataset; the decoupling is needed - # since there are empty batches - idx_raw = iter_start - for i,bidx in enumerate( range( iter_start, iter_end, self.batch_size)) : - - # targets, targets_coords, targets_idxs = [], [], [], - tcs, tcs_lens, target_tokens, source_tokens_cells, source_tokens_lens = [],[],[], [], [] - target_tokens_lens, sources, source_centroids = [], [], [] - - # forecast_dt needs to be constant per batch (amortized through data parallel training) - forecast_dt = self.perms_forecast_dt[i] - - # use while loop due to the scattered nature of the data in time and to - # ensure batches are not empty - ib = 0 - while len(source_tokens_cells) < self.batch_size : - - idx = self.perms[idx_raw % self.perms.shape[0]] - idx_raw += 1 - - step_dt = self.len_hrs // self.step_hrs - step_forecast_dt = step_dt + (self.forecast_delta_hrs * forecast_dt) // self.step_hrs - - time_win1, time_win2 = ( self.obs_datasets_norm[0][0][0].time_window( idx), - self.obs_datasets_norm[0][0][0].time_window( idx + step_forecast_dt)) - - c_tcs = [[] for _ in range(forecast_dt+1)] - c_tcs_lens = [[] for _ in range(forecast_dt+1)] - c_target_tokens = [[] for _ in range(forecast_dt+1)] - c_target_tokens_lens = [[] for _ in range(forecast_dt+1)] - c_source_tokens_cells = []; c_source_tokens_lens = []; c_source_centroids = [] - c_source_raw = [] - - for obs_id, (stream_info, stream_dsn, stream_idxs) in enumerate( zip( self.streams, - self.obs_datasets_norm, - self.obs_datasets_idxs)) : - - - s_tcs = []; s_tcs_lens = []; s_target_tokens = []; s_target_tokens_lens = [] - s_source_tokens_cells = []; s_source_tokens_lens = []; s_source_centroids = [] - s_source_raw = [] - - token_size = stream_info['token_size'] - grid = stream_info['gridded_output'] if 'gridded_output' in stream_info else None - grid_info = stream_info['gridded_output_info'] if 'gridded_output_info' in stream_info else None - - for i_source, ((ds, normalizer, do), s_idxs) in enumerate( zip(stream_dsn, stream_idxs)) : - - # source window (of potentially multi-step length) - (source1,times1) = ds[idx] - for it in range(1,self.input_window_steps) : - (source0,times0) = ds[idx - it*step_dt] - source1 = np.concatenate( [source0, source1], 0) - times1 = np.concatenate( [times0, times1], 0) - - if source1.shape[0] < token_size : - # skip if there are not enough data points - tt_cells, tt_lens = torch.tensor([]), torch.zeros([nhc_target],dtype=torch.int32) - ss_cells, ss_lens = torch.tensor([]), torch.zeros([nhc_source],dtype=torch.int32) - ss_centroids = torch.tensor([]) - tc, tc_lens = torch.tensor([]), torch.zeros([nhc_target],dtype=torch.int32) - source1_raw = torch.tensor([]) - else : - - oi = ds.properties['obs_id'] - source1 = self.prepare_window_source( oi, do, normalizer, source1, times1, time_win1, s_idxs) - - # this should only be collected in validation mode - source1_raw = normalizer.denormalize_data( source1.clone()) - - (ss_cells, ss_lens, ss_centroids) = self.batchifyer.batchify_source( - stream_info, - self.geoinfo_offset, - self.get_geoinfo_size( obs_id,i_source), - self.masking_rate, - self.masking_rate_sampling, self.rng, - source1, times1, - normalizer.normalize_coords) - - s_source_raw += [source1_raw] - s_source_tokens_lens += [ss_lens] - s_source_tokens_cells += [ss_cells] - s_source_centroids += [ss_centroids] if len(ss_centroids)>0 else [torch.tensor([])] - - # collect all sources in current stream and add to batch sample list when non-empty - if torch.tensor([len(s) for s in s_source_tokens_cells]).sum() > 0 : - - c_source_raw +=[ torch.cat( s_source_raw)] - - # collect by merging entries per cells, preserving cell structure - c_source_tokens_cells += [ merge_cells( s_source_tokens_cells, nhc_source) ] - c_source_centroids += [ merge_cells( s_source_centroids, nhc_source)] - # lens can be stacked and summed - c_source_tokens_lens += [torch.stack( s_source_tokens_lens).sum(0)] - # remove NaNs - c_source_tokens_cells[-1][ torch.isnan( c_source_tokens_cells[-1]) ] = self.mask_value - c_source_centroids[-1][ torch.isnan( c_source_centroids[-1]) ] = self.mask_value - else : - c_source_raw += [torch.tensor([])] - c_source_tokens_lens += [torch.zeros([nhc_source])] - c_source_tokens_cells += [torch.tensor([])] - c_source_centroids += [torch.tensor([])] - - # target - - # collect for all forecast steps - for fstep in range( forecast_dt+1) : - # collect all streams - for i_source, ((ds, normalizer, do), s_idxs) in enumerate( zip(stream_dsn, stream_idxs)) : - - (source2,times2) = ds[idx + step_forecast_dt] - - if grid is not None: - - (source2,times2) = self.create_grid( grid, time_win2) - - # generate ERA5 data if specified - if grid_info is not None: - source2 = self.read_anemoi( grid_info, times2, source2) - - if source2.shape[0] < token_size : - # skip if there are not enough data points - tt_cells, tt_lens = torch.tensor([]), torch.zeros([nhc_target],dtype=torch.int32) - ss_cells, ss_lens = torch.tensor([]), torch.zeros([nhc_source],dtype=torch.int32) - ss_centroids = torch.tensor([]) - tc, tc_lens = torch.tensor([]), torch.zeros([nhc_target],dtype=torch.int32) - source1_raw = torch.tensor([]) - else : - - oi = ds.properties['obs_id'] - source2 = self.prepare_window_target( oi, do, normalizer, source2, times2, time_win2, s_idxs) - - (tt_cells, tt_lens, tc, tc_lens) = self.batchifyer.batchify_target( - stream_info, - self.geoinfo_offset, - self.get_geoinfo_size( obs_id,i_source), - self.sampling_rate_target, - self.rng, - source2, times2, - normalizer.normalize_targets) - - s_target_tokens_lens += [tt_lens] if len(tt_lens)>0 else [torch.tensor([])] - s_target_tokens += [tt_cells] if len(tt_cells)>0 else [torch.tensor([])] - s_tcs += [tc]; s_tcs_lens += [tc_lens] - - # collect all sources in current stream and add to batch sample list when non-empty - if torch.tensor([len(s) for s in s_target_tokens]).sum() > 0 : - - c_tcs[fstep] += [ merge_cells( s_tcs, nhc_target) ] - c_target_tokens[fstep] += [ merge_cells( s_target_tokens, nhc_target)] - # lens can be stacked and summed - c_target_tokens_lens[fstep] += [torch.stack( s_target_tokens_lens).sum(0)] - c_tcs_lens[fstep] += [torch.stack( s_tcs_lens).sum(0)] - # remove NaNs - c_tcs[fstep][-1][ torch.isnan( c_tcs[fstep][-1]) ] = self.mask_value - - else : - c_tcs[fstep] += [torch.tensor([])]; c_tcs_lens[fstep] += [torch.tensor([])] - c_target_tokens[fstep] += [torch.tensor([])]; c_target_tokens_lens[fstep] += [torch.tensor([])] - - # add batch, ensure sample is not empty - s1 = torch.tensor( [stl.sum() for stl in c_source_tokens_lens]).sum() - s2 = torch.tensor( [len(t) for f_tcs in c_tcs for t in f_tcs]).sum() - if s1 > 0 and s2 > 0 : - # source - sources += [ c_source_raw ] - source_tokens_cells +=[ c_source_tokens_cells ] - source_tokens_lens +=[ c_source_tokens_lens ] - source_centroids += [ c_source_centroids ] - # target - tcs += [ c_tcs ] - tcs_lens += [ c_tcs_lens ] - target_tokens += [ c_target_tokens ] - target_tokens_lens += [ c_target_tokens_lens ] - ib += 1 - - # skip if source is completely empty or nothing to predict (which causes errors in back prop) - target_tokens_lens_total = torch.cat([torch.cat( t) for tt in target_tokens_lens for t in tt]) - if len(source_tokens_lens)==0 or target_tokens_lens_total.sum()==0 : - continue - - # precompute for processing in the model (with varlen flash attention) - source_cell_lens = torch.stack( [torch.stack( stl_b) if len(stl_b)>0 else torch.tensor([]) - for stl_b in source_tokens_lens]) - source_cell_lens = torch.sum( source_cell_lens, 1).flatten().to(torch.int32) - source_cell_lens = torch.cat( [torch.zeros(1, dtype=torch.int32), source_cell_lens]) - - source_tokens_lens = torch.from_numpy( np.array( source_tokens_lens)).to(torch.int32) - - # precompute index sets for scatter operation after embed - offsets_base = source_tokens_lens.sum(1).sum(0).cumsum(0) - offsets = torch.cat( [torch.zeros(1,dtype=torch.int32), offsets_base[:-1] ]) - offsets_pe = torch.zeros_like( offsets) - idxs_embed = []; idxs_embed_pe = [] - for ib, sb in enumerate(source_tokens_cells) : - idxs_embed += [ [] ]; idxs_embed_pe += [ [] ] - for itype, s in enumerate( sb) : - - if s.shape[0]==0 : - idxs_embed[-1] += [ torch.tensor([]) ]; idxs_embed_pe[-1] += [ torch.tensor([]) ] - continue - - idxs = torch.cat( [torch.arange( o, o+l, dtype=torch.int64) - for o,l in zip(offsets, source_tokens_lens[ib,itype])]) - idxs_embed[-1] += [ idxs.unsqueeze(1) ] - idxs_embed_pe[-1] += [torch.cat( [torch.arange( o, o+l, dtype=torch.int32) - for o,l in zip(offsets_pe, source_tokens_lens[ib][itype])])] - # advance offsets - offsets += source_tokens_lens[ib][itype] - offsets_pe += source_tokens_lens[ib][itype] - - target_coords_lens = tcs_lens - - # target coords idxs - tcs_lens_merged = []; tcs_idxs = [] - pad = torch.zeros( 1, dtype=torch.int32) - for ii in range( len(self.streams)) : - - # generate len lists for varlen attention (per batch list for local, per-cell attention and - # global, per-batch-item lists otherwise) - if self.target_coords_local : - tcs_lens_merged += [ [ torch.cat( [pad, torch.cat( [target_coords_lens[i_b][fstep][ii] - for i_b in range(len(tcs))]) ]).to(torch.int32) - for fstep in range( forecast_dt+1)] ] - else : - tcs_lens_merged += [ torch.cat( [pad, torch.tensor( [len(tcs[i_b][ii]) - for i_b in range(len(tcs))])]).to(torch.int32) ] - - # lengths for varlen attention - tcs_idxs += [[torch.cat([torch.arange(l) for l in tlm]) for tlm in tcs_lens_merged[-1]]] - - # reorder to have forecast step as first dimension, then batch items - def list_transpose( clist) : - return [[l[i] for l in clist] for i in range(len(clist[0]))] - target_tokens = list_transpose( target_tokens) - target_tokens_lens = list_transpose( target_tokens_lens) - tcs = list_transpose( tcs) - target_coords_lens = list_transpose( target_coords_lens) - tcs_lens_merged = list_transpose( tcs_lens_merged) - tcs_idxs = list_transpose( tcs_idxs) - - assert len(source_tokens_cells) == self.batch_size - yield ( sources, source_tokens_cells, source_tokens_lens, source_centroids, source_cell_lens, - [idxs_embed, idxs_embed_pe], - target_tokens, target_tokens_lens, tcs, target_coords_lens, [tcs_lens_merged,tcs_idxs], - forecast_dt) - - ################################################### - def prepare_window_source( self, obs_id, data_offset, normalizer, source, times, time_win, stream_idxs) : - - source = source[:,data_offset:] - # permutation to have all geoinfos at the beginning (only apply this if necessary) - idxs = np.array(stream_idxs[0] + stream_idxs[1]) - # if not (idxs == np.arange( idxs.max()+1)).all() : - source = source[:,idxs] - - # assemble tensor as fed to the network, combining geoinfo and data - fp32 = torch.float32 - dt = pd.to_datetime( times) - dt_win = pd.to_datetime( time_win) - dt_delta = dt - dt_win[0] - source = torch.cat( ( torch.full([dt.shape[0],1], obs_id, dtype=fp32), - torch.tensor( dt.year, dtype=fp32).unsqueeze(1), - torch.tensor( dt.dayofyear, dtype=fp32).unsqueeze(1), - torch.tensor( dt.hour*60+dt.minute, dtype=fp32).unsqueeze(1), - torch.tensor( dt_delta.seconds, dtype=fp32).unsqueeze(1), - torch.tensor( dt_delta.seconds, dtype=fp32).unsqueeze(1), - torch.from_numpy(source) - ), 1) - # normalize data (leave coords so that they can be utilized for task/masking) - source = normalizer.normalize_data( source) - - return source - - ################################################### - def prepare_window_target( self, obs_id, data_offset, normalizer, source, times, time_win, stream_idxs) : - - source = source[:,data_offset:] - # permutation to have all geoinfos at the beginning (only apply this if necessary) - idxs = np.array(stream_idxs[0] + stream_idxs[1]) - # if not (idxs == np.arange( idxs.max()+1)).all() : - source = source[:,idxs] - - # assemble tensor as fed to the network, combining geoinfo and data - dt = pd.to_datetime( times) - dt_win = pd.to_datetime( time_win) - # for target only provide local time - dt_delta = torch.tensor( (dt - dt_win[0]).seconds, dtype=torch.float32).unsqueeze(1) - source = torch.cat( ( torch.full([dt.shape[0],1], obs_id, dtype=torch.float32), - dt_delta, - dt_delta, - dt_delta, - dt_delta, - dt_delta, - torch.from_numpy(source) - ), 1) - # normalize data (leave coords so that they can be utilized for task/masking) - source = normalizer.normalize_data( source) - - return source - - ################################################### - def __len__(self): - return self.len - - ################################################### - def worker_workset( self) : - - # local_start, local_end = 0, len(self) - local_start, local_end = self.rank * self.len, (self.rank+1) * self.len - - worker_info = torch.utils.data.get_worker_info() - - if worker_info is None: - - assert self.num_ranks == 1 - iter_start = 0 - iter_end = len(self) - - else: - - # split workload - per_worker = (local_end - local_start) // worker_info.num_workers - iter_start = local_start + worker_info.id * per_worker - iter_end = iter_start + per_worker - if worker_info.id+1 == worker_info.num_workers : - iter_end = local_end - logging.getLogger('obslearn').info( f'{self.rank}::{worker_info.id}' - + f' : dataset [{local_start},{local_end}) : [{iter_start},{iter_end})') - - return iter_start, iter_end +from weathergen.common.io import IOReaderData +from weathergen.datasets.data_reader_anemoi import DataReaderAnemoi +from weathergen.datasets.data_reader_base import ( + DataReaderBase, + TimeWindowHandler, + TIndex, + str_to_datetime64, +) +from weathergen.datasets.data_reader_fesom import DataReaderFesom +from weathergen.datasets.data_reader_obs import DataReaderObs +from weathergen.datasets.masking import Masker +from weathergen.datasets.stream_data import StreamData, spoof +from weathergen.datasets.tokenizer_forecast import TokenizerForecast +from weathergen.datasets.tokenizer_masking import TokenizerMasking +from weathergen.datasets.utils import ( + compute_idxs_predict, + compute_offsets_scatter_embed, + compute_source_cell_lens, +) +from weathergen.readers_extra.registry import get_extra_reader +from weathergen.utils.distributed import is_root +from weathergen.utils.train_logger import Stage + +type AnyDataReader = DataReaderBase | DataReaderAnemoi | DataReaderObs + +logger = logging.getLogger(__name__) + + +def readerdata_to_torch(rdata: IOReaderData) -> IOReaderData: + """ + Convert data, coords, and geoinfos to torch tensor + """ + rdata.coords = torch.tensor(rdata.coords) + rdata.geoinfos = torch.tensor(rdata.geoinfos) + rdata.data = torch.tensor(rdata.data) + + return rdata + + +def collect_datasources(stream_datasets: list, idx: int, type: str) -> IOReaderData: + """ + Utility function to collect all sources / targets from streams list + """ + + rdatas = [] + + for ds in stream_datasets: + if type == "source": + get_reader_data = ds.get_source + normalize_channels = ds.normalize_source_channels + elif type == "target": + get_reader_data = ds.get_target + normalize_channels = ds.normalize_target_channels + else: + assert False, "invalid value for argument `type`" + + # get source (of potentially multi-step length) + rdata = get_reader_data(idx).remove_nan_coords() + rdata.data = normalize_channels(rdata.data) + rdata.geoinfos = ds.normalize_geoinfos(rdata.geoinfos) + rdatas += [rdata] + + return IOReaderData.combine(rdatas) + + +class MultiStreamDataSampler(torch.utils.data.IterableDataset): + ################################################### + def __init__( + self, + cf, + start_date_, + end_date_, + batch_size, + samples_per_mini_epoch, + stage: Stage, + shuffle=True, + ): + super(MultiStreamDataSampler, self).__init__() + + start_date = str_to_datetime64(start_date_) + end_date = str_to_datetime64(end_date_) + + assert end_date > start_date, (end_date, start_date) + + self.mask_value = 0.0 + self._stage = stage + + self.len_hrs: int = cf.len_hrs + self.step_hrs: int = cf.step_hrs + self.time_window_handler = TimeWindowHandler(start_date, end_date, cf.len_hrs, cf.step_hrs) + if is_root(): + logger.info( + f"Time window handler: start={start_date}, end={end_date}," + f"len_hrs={cf.len_hrs}, step_hrs={cf.step_hrs}" + ) + + self.forecast_offset = cf.forecast_offset + self.forecast_delta_hrs = ( + cf.forecast_delta_hrs if cf.forecast_delta_hrs > 0 else self.len_hrs + ) + assert self.forecast_delta_hrs == self.len_hrs, "Only supported option at the moment" + self.forecast_steps = np.array( + [cf.forecast_steps] if isinstance(cf.forecast_steps, int) else cf.forecast_steps + ) + if cf.forecast_policy is not None: + if self.forecast_steps.max() == 0 and is_root(): + logger.warning("forecast policy is not None but number of forecast steps is 0.") + self.forecast_policy = cf.forecast_policy + + self.len = 100000000 + + self.streams_datasets: list[list[AnyDataReader]] = [] + for _, stream_info in enumerate(cf.streams): + self.streams_datasets.append([]) + + for fname in stream_info["filenames"]: + kwargs = { + "tw_handler": self.time_window_handler, + "stream_info": stream_info, + } + dataset: type[AnyDataReader] | None = None + match stream_info["type"]: + case "obs": + dataset = DataReaderObs + datapath = cf.data_path_obs + # kwargs["end"] = end_date_padded # TODO: implement the padding + case "anemoi": + dataset = DataReaderAnemoi + datapath = cf.data_path_anemoi + case "fesom": + dataset = DataReaderFesom + datapath = cf.data_path_fesom + case type_name: + reader_entry = get_extra_reader(type_name, cf) + if reader_entry is not None: + dataset = reader_entry.constructor + datapath = reader_entry.data_path + else: + msg = f"Unsupported stream type {stream_info['type']}" + f"for stream name '{stream_info['name']}'." + raise ValueError(msg) + + datapath = pathlib.Path(datapath) + fname = pathlib.Path(fname) + # dont check if file exists since zarr stores might be directories + if fname.exists(): + # check if fname is a valid path to allow for simple overwriting + filename = fname + else: + filename = pathlib.Path(datapath) / fname + + if not filename.exists(): # see above + msg = ( + f"Did not find input data for {stream_info['type']} " + f"stream '{stream_info['name']}': {filename}." + ) + raise FileNotFoundError(msg) + + ds_type = stream_info["type"] + if is_root(): + logger.info( + f"Opening dataset with type: {ds_type}" + + f" from stream config {stream_info['name']}.", + ) + ds = dataset(filename=filename, **kwargs) + + fsm = self.forecast_steps[0] + if len(ds) > 0: + self.len = min(self.len, len(ds) - (self.len_hrs * (fsm + 1)) // self.step_hrs) + + # MODIFIES config !!! + stream_info[str(self._stage) + "_source_channels"] = ds.source_channels + stream_info[str(self._stage) + "_target_channels"] = ds.target_channels + stream_info["target_channel_weights"] = ( + ds.target_channel_weights + if ds.target_channel_weights is not None + else [1.0 for _ in ds.target_channels] + ) + + self.streams_datasets[-1] += [ds] + + index_range = self.time_window_handler.get_index_range() + self.len = int(index_range.end - index_range.start) + self.len = min(self.len, samples_per_mini_epoch if samples_per_mini_epoch else self.len) + # adjust len to split loading across all workers and ensure it is multiple of batch_size + len_chunk = ((self.len // cf.world_size) // batch_size) * batch_size + self.len = min(self.len, len_chunk) + logger.info(f"index_range={index_range}, len={self.len}, len_chunk={len_chunk}") + + self.rank = cf.rank + self.world_size = cf.world_size + + self.streams = cf.streams + self.shuffle = shuffle + # TODO: remove options that are no longer supported + self.input_window_steps = cf.input_window_steps + self.embed_local_coords = cf.embed_local_coords + self.embed_centroids_local_coords = cf.embed_centroids_local_coords + self.sampling_rate_target = cf.sampling_rate_target + + self.batch_size = batch_size + + # ensure data_loader_rng_seed is not smaller than loader_num_workers to avoid + # issues in per loader rng seed computation + self.data_loader_rng_seed = ( + cf.data_loader_rng_seed + if cf.data_loader_rng_seed > cf.loader_num_workers + else cf.data_loader_rng_seed * 13 + ) + + self.healpix_level: int = cf.healpix_level + self.num_healpix_cells: int = 12 * 4**self.healpix_level + + if cf.training_mode == "forecast": + self.tokenizer = TokenizerForecast(cf.healpix_level) + elif cf.training_mode == "masking": + masker = Masker(cf) + self.tokenizer = TokenizerMasking(cf.healpix_level, masker) + assert self.forecast_offset == 0, "masked token modeling requires auto-encoder training" + msg = "masked token modeling does not support self.input_window_steps > 1; " + msg += "increase window length" + assert self.input_window_steps == 1, msg + else: + assert False, f"Unsupported training mode: {cf.training_mode}" + + self.mini_epoch = 0 + + ################################################### + def advance(self): + """ + Advance mini_epoch (this is applied to the template for the worker processes) + """ + self.mini_epoch += 1 + + ################################################### + def get_sources_size(self): + return [ + 0 + if ds[0].get_source_num_channels() == 0 + else ds[0].get_source_num_channels() + + ds[0].get_geoinfo_size() + + ds[0].get_coords_size() + + self.tokenizer.get_size_time_embedding() + for ds in self.streams_datasets + ] + + ################################################### + def get_sources_num_channels(self): + return [ds[0].get_source_num_channels() for ds in self.streams_datasets] + + ################################################### + def get_targets_num_channels(self): + return [ds[0].get_target_num_channels() for ds in self.streams_datasets] + + ################################################### + def get_targets_coords_size(self): + # TODO: avoid hard coding magic values + # +6 at the end for stram_id and time encoding + return [ + (ds[0].get_geoinfo_size() + (5 * (3 * 5)) + 3 * 8) + 6 for ds in self.streams_datasets + ] + + ################################################### + def reset(self): + # initialize the random number generator: self.data_loader_rng_seed is set to a DDP-unique + # value in worker_workset() + self.rng = np.random.default_rng(self.data_loader_rng_seed) + + fsm = ( + self.forecast_steps[min(self.mini_epoch, len(self.forecast_steps) - 1)] + if self.forecast_policy != "random" + else self.forecast_steps.max() + ) + if fsm > 0: + logger.info(f"forecast_steps at mini_epoch={self.mini_epoch} : {fsm}") + + # data + index_range = self.time_window_handler.get_index_range() + idx_end = index_range.end + # native length of datasets, independent of mini_epoch length that has potentially been + # specified + forecast_len = (self.len_hrs * (fsm + 1)) // self.step_hrs + idx_end -= forecast_len + self.forecast_offset + assert idx_end > 0, "dataset size too small for forecast range" + self.perms = np.arange(index_range.start, idx_end) + if self.shuffle: + self.perms = self.rng.permutation(self.perms) + + # forecast time steps + len_dt_samples = len(self) // self.batch_size + if self.forecast_policy is None: + self.perms_forecast_dt = np.zeros(len_dt_samples, dtype=np.int64) + elif self.forecast_policy == "fixed" or self.forecast_policy == "sequential": + self.perms_forecast_dt = fsm * np.ones(len_dt_samples, dtype=np.int64) + elif self.forecast_policy == "random" or self.forecast_policy == "sequential_random": + # randint high=one-past + self.perms_forecast_dt = self.rng.integers( + low=self.forecast_steps.min(), high=fsm + 1, size=len_dt_samples, dtype=np.int64 + ) + else: + assert False + + self.tokenizer.reset_rng(self.rng) + + ################################################### + def denormalize_source_channels(self, stream_id, data) -> torch.Tensor: + # TODO: with multiple ds per stream we need to distinguish these here + return self.streams_datasets[stream_id][0].denormalize_source_channels(data) + + ################################################### + def denormalize_target_channels(self, stream_id, data) -> torch.Tensor: + # TODO: with multiple ds per stream we need to distinguish these here + return self.streams_datasets[stream_id][0].denormalize_target_channels(data) + + ################################################### + def __iter__(self): + """ + Return one batch of data + + Return : list[list[StreamData]] + len : number of batch items + len[*] : number of streams + """ + iter_start, iter_end = self.worker_workset() + logger.info(f"iter_start={iter_start}, iter_end={iter_end}, len={self.len}") + + # create new shuffeling + self.reset() + + # bidx is used to count the #batches that have been emitted + # idx_raw is used to index into the dataset; the decoupling is needed + # since there are empty batches + idx_raw = iter_start + for i, _bidx in enumerate(range(iter_start, iter_end, self.batch_size)): + # forecast_dt needs to be constant per batch (amortized through data parallel training) + forecast_dt = self.perms_forecast_dt[i] + + # use while loop due to the scattered nature of the data in time and to + # ensure batches are not empty + batch = [] + while len(batch) < self.batch_size: + idx: TIndex = self.perms[idx_raw % self.perms.shape[0]] + idx_raw += 1 + + time_win_source = self.time_window_handler.window(idx) + + # Sample masking strategy once per batch item + if hasattr(self.tokenizer, "masker"): + self.tokenizer.masker.set_batch_strategy() + + streams_data: list[StreamData] = [] + + # for all streams + for stream_info, stream_ds in zip(self.streams, self.streams_datasets, strict=True): + stream_data = StreamData( + idx, forecast_dt + self.forecast_offset, self.num_healpix_cells + ) + + # collect all targets for current stream + rdata: IOReaderData = collect_datasources(stream_ds, idx, "source") + + if rdata.is_empty(): + # work around for https://github.com/pytorch/pytorch/issues/158719 + # create non-empty mean data instead of empty tensor + rdata = spoof( + self.healpix_level, + time_win_source.start, + stream_ds[0].get_geoinfo_size(), + stream_ds[0].mean[stream_ds[0].source_idx], + ) + stream_data.source_is_spoof = True + + # preprocess data for model input + (ss_cells, ss_lens, ss_centroids) = self.tokenizer.batchify_source( + stream_info, + readerdata_to_torch(rdata), + (time_win_source.start, time_win_source.end), + stream_ds[0].normalize_coords, + ) + + # TODO: rdata only be collected in validation mode + stream_data.add_source(rdata, ss_lens, ss_cells, ss_centroids) + + # target + + # collect for all forecast steps + for fstep in range( + self.forecast_offset, self.forecast_offset + forecast_dt + 1 + ): + step_forecast_dt = idx + (self.forecast_delta_hrs * fstep) // self.step_hrs + time_win_target = self.time_window_handler.window(step_forecast_dt) + + # collect all targets for current stream + rdata: IOReaderData = collect_datasources( + stream_ds, step_forecast_dt, "target" + ) + + if rdata.is_empty(): + # work around for https://github.com/pytorch/pytorch/issues/158719 + # create non-empty mean data instead of empty tensor + rdata = spoof( + self.healpix_level, + time_win_target.start, + stream_ds[0].get_geoinfo_size(), + stream_ds[0].mean[stream_ds[0].target_idx], + ) + stream_data.target_is_spoof = True + + # preprocess data for model input + (tt_cells, tc, tt_c, tt_t) = self.tokenizer.batchify_target( + stream_info, + self.sampling_rate_target, + readerdata_to_torch(rdata), + (time_win_target.start, time_win_target.end), + ) + + stream_data.add_target(fstep, tt_cells, tc, tt_c, tt_t) + + # merge inputs for sources and targets for current stream + streams_data += [stream_data] + + # Reset masking strategy for next batch item + if hasattr(self.tokenizer, "masker"): + self.tokenizer.masker.reset_batch_strategy() + + # skip completely empty batch item or when all targets are empty -> no grad + if not (all(s.empty() or s.target_empty() for s in streams_data)): + batch += [streams_data] + + # aggregated lens of tokens per cell + source_cell_lens = compute_source_cell_lens(batch) + + # compute offsets for scatter computation after embedding + batch = compute_offsets_scatter_embed(batch) + + # compute offsets and auxiliary data needed for prediction computation + # (info is not per stream so separate data structure) + target_coords_idx = compute_idxs_predict(self.forecast_offset + forecast_dt, batch) + + assert len(batch) == self.batch_size + yield (batch, source_cell_lens, target_coords_idx, forecast_dt) + + ################################################### + def __len__(self): + return self.len + + ################################################### + def worker_workset(self): + local_start, local_end = self.rank * self.len, (self.rank + 1) * self.len + + worker_info = torch.utils.data.get_worker_info() + + if worker_info is None: + assert self.world_size == 1, self.world_size + iter_start = 0 + iter_end = len(self) + + else: + # ensure the rng seed is fully unique across workers and mini_epochs + # the worker processes are generated as bit-wise copy of the "template" (the actual + # instance of the present class that is created) whenever __iter__ is started. This + # happens for each mini_epoch, for train and validation, and independently for each DDP + # worker. After the bit-wise copy, the rng seed needs to be made unique for + # DDP workers, loader process, mini_epoch. + dist = torch.distributed + self.data_loader_rng_seed *= ( + (((dist.get_rank() + 1) * 73) if dist.is_initialized() else 1) + * ((worker_info.id + 1) * 37) + * (self.mini_epoch + 13) + * 7 + ) + # split workload + per_worker = (local_end - local_start) // worker_info.num_workers + iter_start = local_start + worker_info.id * per_worker + iter_end = iter_start + per_worker + if worker_info.id + 1 == worker_info.num_workers: + iter_end = local_end + logger.info( + f"{self.rank}::{worker_info.id}" + + f" : dataset [{local_start},{local_end}) : [{iter_start},{iter_end})" + ) + + return iter_start, iter_end diff --git a/src/weathergen/datasets/normalizer.py b/src/weathergen/datasets/normalizer.py deleted file mode 100644 index 821c1f913..000000000 --- a/src/weathergen/datasets/normalizer.py +++ /dev/null @@ -1,130 +0,0 @@ -# (C) Copyright 2024 WeatherGenerator contributors. -# -# This software is licensed under the terms of the Apache Licence Version 2.0 -# which can be obtained at http://www.apache.org/licenses/LICENSE-2.0. -# -# In applying this licence, ECMWF does not waive the privileges and immunities -# granted to it by virtue of its status as an intergovernmental organisation -# nor does it submit to any jurisdiction. - -import numpy as np - -from weathergen.datasets.utils import arc_alpha - - -class DataNormalizer : - - ################################################### - def __init__( self, stream_info, geoinfo_offset, stats_offset, ds, geoinfo_idx, data_idx, do) : - - # obs_id, year, day_of_year, day - self.geoinfo_offset = geoinfo_offset - self.stats_offset = stats_offset - - self.stream_info = stream_info - self.geoinfo_idx = np.array(geoinfo_idx) - self.data_idx = np.array(data_idx) - self.geoinfo_size = len(geoinfo_idx) - - self.source_chs = np.arange(len(data_idx)) - self.loss_chs = np.arange(len(data_idx)) - - self.mean = np.array(ds.properties['means'])[do:] - self.var = np.array(ds.properties['vars'])[do:] - - ################################################### - def normalize_data( self, data, with_offset=True) : - - go = self.geoinfo_size + self.geoinfo_offset - so = self.stats_offset - for i, ch in enumerate( self.data_idx) : - data[...,go+i] = (data[...,go+i] - self.mean[ch-so]) / (self.var[ch-so]**0.5) - - return data - - ################################################### - def denormalize_data( self, data, with_offset=True) : - - go = self.geoinfo_size + self.geoinfo_offset if with_offset else 0 - so = self.stats_offset - for i, ch in enumerate( self.data_idx) : - data[...,go+i] = (data[...,go+i] * (self.var[ch-so]**0.5)) + self.mean[ch-so] - - return data - - ################################################### - def normalize_coords( self, data, normalize_latlon=True) : - - so = self.stats_offset - - # TODO: geoinfo_offset should be derived from the code below and the corresponding code in - # multi_obs_data_sampler - # obs_id, year, day of the year, minute of the day - assert self.geoinfo_offset == 6 - data[...,0] /= 256. - data[...,1] /= 2100. - data[...,2] = data[...,2] / 365. - data[...,3] = data[...,3] / 1440. - data[...,4] = np.sin( data[...,4] / (12.*3600.) * 2.*np.pi) - data[...,5] = np.cos( data[...,5] / (12.*3600.) * 2.*np.pi) - - go = self.geoinfo_offset - for i, ch in enumerate( self.geoinfo_idx) : - if 0 == i : # lats - if normalize_latlon : - data[...,go+i] = np.sin( np.deg2rad( data[...,go+i])) - pass - elif 1 == i : # lons - if normalize_latlon : - data[...,go+i] = np.sin( 0.5 * np.deg2rad( data[...,go+i])) - else : - data[...,go+i] = (data[...,go+i] - self.mean[ch-so]) / ((self.var[ch-so]**0.5) if self.var[ch-so]>0. else 1.) - - return data - - ################################################### - def normalize_targets( self, data) : - - so = self.stats_offset - - # TODO: geoinfo_offset should be derived from the code below and the corresponding code in - # multi_obs_data_sampler - # obs_id, year, day of the year, minute of the day - assert self.geoinfo_offset == 6 - data[...,0] /= 256. - data[...,1] = np.sin( data[...,1] / (12.*3600.) * 2.*np.pi) - data[...,2] = np.cos( data[...,2] / (12.*3600.) * 2.*np.pi) - data[...,3] = np.sin( data[...,3] / (12.*3600.) * 2.*np.pi) - data[...,4] = np.cos( data[...,4] / (12.*3600.) * 2.*np.pi) - data[...,5] = np.sin( data[...,5] / (12.*3600.) * 2.*np.pi) - - go = self.geoinfo_offset - for i, ch in enumerate( self.geoinfo_idx) : - if i > 1 : # skip lat/lon - data[...,go+i] = (data[...,go+i] - self.mean[ch-so]) / ((self.var[ch-so]**0.5) if self.var[ch-so]>0. else 1.) - - return data - - ################################################### - def denormalize_coords( self, data) : - - # obs_id, year, day of the year, minute of the day - assert self.geoinfo_offset == 6 - data[...,0] *= 256. - data[...,1] = (arc_alpha( data[...,1], data[...,2]) / (2.*np.pi)) * (12.*3600.) - data[...,2] = data[...,1] - data[...,3] = data[...,1] - data[...,4] = data[...,1] - data[...,5] = data[...,1] - - # go = self.geoinfo_offset - # for i, ch in enumerate( self.geoinfo_idx) : - # if 0 == i : # lats - # data[...,go+i] = torch.rad2deg( torch.arcsin( data[...,go+i])) - # elif 1 == i : # lons - # data[...,go+i] = torch.rad2deg( 2.0 * torch.arcsin( data[...,go+i])) - # else : - # data[...,go+i] = (data[...,go+i] * (self.var[ch]**0.5)) + self.mean[ch] - - return data - \ No newline at end of file diff --git a/src/weathergen/datasets/obs_dataset.py b/src/weathergen/datasets/obs_dataset.py deleted file mode 100644 index ad1fd2709..000000000 --- a/src/weathergen/datasets/obs_dataset.py +++ /dev/null @@ -1,236 +0,0 @@ -# (C) Copyright 2024 WeatherGenerator contributors. -# -# This software is licensed under the terms of the Apache Licence Version 2.0 -# which can be obtained at http://www.apache.org/licenses/LICENSE-2.0. -# -# In applying this licence, ECMWF does not waive the privileges and immunities -# granted to it by virtue of its status as an intergovernmental organisation -# nor does it submit to any jurisdiction. - -import datetime - -import numpy as np -import zarr -import code - - -class ObsDataset(): - - def __init__( - self, - filename: str, - start: int, - end: int, - len_hrs: int, - step_hrs: int = None, - normalize: bool = True, - select: list[str] = None, - ) -> None: - - self.normalize = normalize - self.filename = filename - self.z = zarr.open( filename, mode="r") - self.data = self.z["data"] - self.dt = self.z["dates"] # datetime only - self.hrly_index = self.z["idx_197001010000_1"] - self.colnames = self.data.attrs["colnames"] - self.len_hrs = len_hrs - self.step_hrs = step_hrs if step_hrs else len_hrs - - # self.selected_colnames = self.colnames - # self.selected_cols_idx = np.arange(len(self.colnames)) - for i, col in enumerate( reversed( self.colnames)) : - # if col[:9] == 'obsvalue_' : - if not (col[:4] == 'sin_' or col[:4] == 'cos_') : - break - self.selected_colnames = self.colnames[ : len(self.colnames)-i ] - self.selected_cols_idx = np.arange(len(self.colnames))[ : len(self.colnames)-i ] - - # Create index for samples - self._setup_sample_index(start, end, self.len_hrs, self.step_hrs) - # assert len(self.indices_start) == len(self.indices_end) - - self._load_properties() - - if select: - self.select(select) - - def __getitem__( self, idx: int) -> tuple : - - start_row = self.indices_start[idx] - end_row = self.indices_end[idx] - - data = self.data.oindex[start_row:end_row, self.selected_cols_idx] - datetimes = self.dt[start_row:end_row][:,0] - - return (data, datetimes) - - def __len__(self) -> int: - - return min( len(self.indices_start), len(self.indices_end)) - - def select(self, cols_list: list[str]) -> None: - """ - Allow user to specify which columns they want to access. - Get functions only returned for these specified columns. - """ - self.selected_colnames = cols_list - self.selected_cols_idx = np.array( - [self.colnames.index(item) for item in cols_list] - ) - - def time_window(self, idx: int) -> tuple[np.datetime64, np.datetime64]: - """ - Returns a tuple of datetime objects describing the start and end times of the sample at position idx. - """ - - if idx < 0: - idx = len(self) + idx - - time_start = self.start_dt + datetime.timedelta( - hours=( int(idx * self.step_hrs)), seconds=1 - ) - time_end = min( - self.start_dt - + datetime.timedelta(hours=( int(idx * self.step_hrs + self.len_hrs))), - self.end_dt, - ) - - return (np.datetime64(time_start), np.datetime64(time_end)) - - def first_sample_with_data(self) -> int: - """ - Returns the position of the first sample which contains data. - """ - return ( - int(np.nonzero(self.indices_end)[0][0]) - if self.indices_end[-1] != self.indices_end[0] - else None - ) - - def last_sample_with_data(self) -> int: - """ - Returns the position of the last sample which contains data. - """ - if self.indices_end[-1] == self.indices_end[0]: - last_sample = None - else: - last_sample = int( - np.where( - np.diff(np.append(self.indices_end, self.indices_end[-1])) > 0 - )[0][-1] - + 1 - ) - - return last_sample - - def _setup_sample_index( - self, start: int, end: int, len_hrs: int, step_hrs: int - ) -> None: - """ - Dataset is divided into samples; - - each n_hours long - - sample 0 starts at start (yyyymmddhhmm) - - index array has one entry for each sample; contains the index of the first row - containing data for that sample - """ - - base_yyyymmddhhmm = 197001010000 - - assert start > base_yyyymmddhhmm, ( - f"Abort: ObsDataset sample start (yyyymmddhhmm) must be greater than {base_yyyymmddhhmm}\n" - f" Current value: {start}" - ) - - # Derive new index based on hourly backbone index - format_str = "%Y%m%d%H%M%S" - base_dt = datetime.datetime.strptime(str(base_yyyymmddhhmm), format_str) - self.start_dt = datetime.datetime.strptime(str(start), format_str) - self.end_dt = datetime.datetime.strptime(str(end), format_str) - - # Calculate the number of hours between start of hourly base index and the requested sample index - diff_in_hours_start = int((self.start_dt - base_dt).total_seconds() / 3600) - diff_in_hours_end = int((self.end_dt - base_dt).total_seconds() / 3600) - - end_range_1 = min(diff_in_hours_end, self.hrly_index.shape[0] - 1) - self.indices_start = self.hrly_index[diff_in_hours_start:end_range_1:step_hrs] - - end_range_2 = min( - diff_in_hours_end + len_hrs, self.hrly_index.shape[0] - 1 - ) # handle beyond end of data range safely - self.indices_end = ( - self.hrly_index[diff_in_hours_start + len_hrs : end_range_2 : step_hrs] - 1 - ) - # Handle situations where the requested dataset span goes beyond the hourly index stored in the zarr - if diff_in_hours_end > (self.hrly_index.shape[0] - 1): - if diff_in_hours_start > (self.hrly_index.shape[0] - 1): - n = (diff_in_hours_end - diff_in_hours_start) // step_hrs - self.indices_start = np.zeros(n, dtype=int) - self.indices_end = np.zeros(n, dtype=int) - else: - self.indices_start = np.append( - self.indices_start, - np.ones( - (diff_in_hours_end - self.hrly_index.shape[0] - 1) // step_hrs, - dtype=int - ) - * self.indices_start[-1], - ) - self.indices_end = np.append( - self.indices_end, - np.ones( - (diff_in_hours_end - self.hrly_index.shape[0] - 1) // step_hrs, - dtype=int - ) - * self.indices_end[-1], - ) - - # Prevent -1 in samples before the we have data - self.indices_end = np.maximum(self.indices_end, 0) - - if self.indices_end.shape != self.indices_start.shape: - self.indices_end = np.append(self.indices_end, self.indices_end[-1]) - - # If end (yyyymmddhhmm) is not a multiple of len_hrs - # truncate the last sample so that it doesn't go beyond the requested dataset end date - self.indices_end = np.minimum(self.indices_end, self.hrly_index[end_range_1]) - - def _load_properties(self) -> None: - - self.properties = {} - - self.properties["means"] = self.data.attrs["means"] - self.properties["vars"] = self.data.attrs["vars"] - # self.properties["data_idxs"] = self.data.attrs["data_idxs"] - self.properties["obs_id"] = self.data.attrs["obs_id"] - -#################################################################################################### -if __name__ == "__main__": - - zarrpath = config.zarrpath - zarrpath = '/lus/h2resw01/fws4/lb/project/ai-ml/observations/zarr/v0.2' - - # # polar orbiting satellites - # d1 = ObsDataset( zarrpath, '34001', 201301010000, 202112310000, 24) - # d2 = ObsDataset( zarrpath, '34002', 201301010000, 202112310000, 24) - # d3 = ObsDataset( zarrpath, '1009', 201301010000, 202112310000, 24) - # d4 = ObsDataset( zarrpath, '11002', 201301010000, 202112310000, 24) - # d5 = ObsDataset( zarrpath, '1001', 201301010000, 202112310000, 24) - # d6 = ObsDataset( zarrpath, '1004', 201301010000, 202112310000, 24) - # d7 = ObsDataset( zarrpath, '1007', 201301010000, 202112310000, 24) - - # # geostationary satellites - # d1 = ObsDataset( zarrpath, '4023', 201301010000, 202112310000, 6, - # ) - - # conventional obs - d1 = ObsDataset( zarrpath + '/16002.zarr', 201301010000, 202112310000, 24) - d2 = ObsDataset( zarrpath + '/16045.zarr', 201301010000, 202112310000, 24) - d3 = ObsDataset( zarrpath + '/bufr_ship_synop_ofb_ea_0001.zarr', 201301010000, 202112310000, 24) - d4 = ObsDataset( zarrpath + '/bufr_land_synop_ofb_ea_0001.zarr', 201301010000, 202112310000, 24) - - d = d1 - code.interact( local=locals()) - - sample = d[0] - print(sample.shape) diff --git a/src/weathergen/datasets/stream_data.py b/src/weathergen/datasets/stream_data.py new file mode 100644 index 000000000..450d5e96d --- /dev/null +++ b/src/weathergen/datasets/stream_data.py @@ -0,0 +1,309 @@ +# (C) Copyright 2025 WeatherGenerator contributors. +# +# This software is licensed under the terms of the Apache Licence Version 2.0 +# which can be obtained at http://www.apache.org/licenses/LICENSE-2.0. +# +# In applying this licence, ECMWF does not waive the privileges and immunities +# granted to it by virtue of its status as an intergovernmental organisation +# nor does it submit to any jurisdiction. + +import astropy_healpix as hp +import numpy as np +import torch + +from weathergen.common.io import IOReaderData + + +class StreamData: + """ + StreamData object that encapsulates all data the model ingests for one batch item + for one stream. + """ + + def __init__(self, idx: int, forecast_steps: int, healpix_cells: int) -> None: + """ + StreamData object + + Parameters + ---------- + forecast_steps : int + Number of forecast steps + healpix_cells : int + Number of healpix cells for source + + Returns + ------- + None + """ + + self.mask_value = 0.0 + + self.forecast_steps = forecast_steps + self.healpix_cells = healpix_cells + + self.source_is_spoof = False + self.target_is_spoof = False + + # initialize empty members + self.sample_idx = idx + self.target_coords = [torch.tensor([]) for _ in range(forecast_steps + 1)] + self.target_coords_raw = [[] for _ in range(forecast_steps + 1)] + self.target_times_raw = [[] for _ in range(forecast_steps + 1)] + # this is not directly used but to precompute index in compute_idxs_predict() + self.target_coords_lens = [ + torch.tensor([0 for _ in range(self.healpix_cells)]) for _ in range(forecast_steps + 1) + ] + self.target_tokens = [torch.tensor([]) for _ in range(forecast_steps + 1)] + self.target_tokens_lens = [ + torch.tensor([0 for _ in range(self.healpix_cells)]) for _ in range(forecast_steps + 1) + ] + + # source tokens per cell + self.source_tokens_cells = [] + # length of source tokens per cell (without padding) + self.source_tokens_lens = [] + self.source_centroids = [] + # unprocessed source (for logging) + self.source_raw = [] + # auxiliary data for scatter operation that changes from stream-centric to cell-centric + # processing after embedding + self.source_idxs_embed = torch.tensor([]) + self.source_idxs_embed_pe = torch.tensor([]) + + def to_device(self, device: str) -> None: + """ + Move data to GPU + + Parameters + ---------- + device : str + Device the data is moved/mapped to. + + Returns + ------- + None + """ + + self.source_tokens_cells = self.source_tokens_cells.to(device, non_blocking=True) + self.source_centroids = self.source_centroids.to(device, non_blocking=True) + self.source_tokens_lens = self.source_tokens_lens.to(device, non_blocking=True) + + self.target_coords = [t.to(device, non_blocking=True) for t in self.target_coords] + self.target_tokens = [t.to(device, non_blocking=True) for t in self.target_tokens] + self.target_tokens_lens = [t.to(device, non_blocking=True) for t in self.target_tokens_lens] + + self.source_idxs_embed = self.source_idxs_embed.to(device, non_blocking=True) + self.source_idxs_embed_pe = self.source_idxs_embed_pe.to(device, non_blocking=True) + + return self + + def add_empty_source(self, source: IOReaderData) -> None: + """ + Add an empty source for an input. + + Parameters + ---------- + None + + Returns + ------- + None + """ + + source = spoof(source) + self.source_raw += [source] + self.source_tokens_lens += [torch.ones([self.healpix_cells], dtype=torch.int32)] + self.source_tokens_cells += [torch.tensor([])] + self.source_centroids += [torch.tensor([])] + + def add_empty_target(self, fstep: int) -> None: + """ + Add an empty target for an input. + + Parameters + ---------- + fstep : int + forecast step + + Returns + ------- + None + """ + + self.target_tokens[fstep] += [torch.tensor([], dtype=torch.int32)] + self.target_tokens_lens[fstep] += [torch.zeros([self.healpix_cells], dtype=torch.int32)] + self.target_coords[fstep] += [torch.zeros((0, 105)) for _ in range(self.healpix_cells)] + self.target_coords_lens[fstep] += [torch.zeros([self.healpix_cells], dtype=torch.int32)] + self.target_coords_raw[fstep] += [torch.tensor([]) for _ in range(self.healpix_cells)] + self.target_times_raw[fstep] += [ + np.array([], dtype="datetime64[ns]") for _ in range(self.healpix_cells) + ] + + def add_source( + self, ss_raw: IOReaderData, ss_lens: torch.tensor, ss_cells: list, ss_centroids: list + ) -> None: + """ + Add data for source for one input. + + Parameters + ---------- + ss_raw : IOReaderData( dataclass containing coords, geoinfos, data, and datetimes ) + ss_lens : torch.tensor( number of healpix cells ) + ss_cells : list( number of healpix cells ) + [ torch.tensor( tokens per cell, token size, number of channels) ] + ss_centroids : list(number of healpix cells ) + [ torch.tensor( for source , 5) ] + + Returns + ------- + None + """ + + self.source_raw = ss_raw + self.source_tokens_lens = ss_lens + self.source_tokens_cells = torch.cat(ss_cells) + self.source_centroids = torch.cat(ss_centroids) + + idx = torch.isnan(self.source_tokens_cells) + self.source_tokens_cells[idx] = self.mask_value + + def add_target( + self, + fstep: int, + targets: list, + target_coords: torch.tensor, + target_coords_raw: torch.tensor, + times_raw: torch.tensor, + ) -> None: + """ + Add data for target for one input. + + Parameters + ---------- + fstep : int + forecast step + targets : torch.tensor( number of healpix cells ) + [ torch.tensor( num tokens, channels) ] + Target data for loss computation + targets_lens : torch.tensor( number of healpix cells) + length of targets per cell + target_coords : list( number of healpix cells) + [ torch.tensor( points per cell, 105) ] + target coordinates + target_times : list( number of healpix cells) + [ torch.tensor( points per cell) ] + absolute target times + + Returns + ------- + None + """ + + self.target_tokens[fstep] = torch.cat(targets) + self.target_coords[fstep] = torch.cat(target_coords) + self.target_times_raw[fstep] = np.concatenate(times_raw) + self.target_coords_raw[fstep] = torch.cat(target_coords_raw) + + tc = target_coords + self.target_coords_lens[fstep] = torch.tensor( + [len(f) for f in tc] if len(tc) > 1 else self.target_coords_lens[fstep], + dtype=torch.int, + ) + self.target_tokens_lens[fstep] = torch.tensor( + [len(f) for f in targets] if len(targets) > 1 else self.target_tokens_lens[fstep], + dtype=torch.int, + ) + + def target_empty(self) -> bool: + """ + Test if target for stream is empty + + Parameters + ---------- + None + + Returns + ------- + boolean + True if target is empty for stream, else False + """ + + # cat over forecast steps + return torch.cat(self.target_tokens_lens).sum() == 0 + + def source_empty(self) -> bool: + """ + Test if source for stream is empty + + Parameters + ---------- + None + + Returns + ------- + boolean + True if target is empty for stream, else False + """ + + return self.source_tokens_lens.sum() == 0 + + def empty(self): + """ + Test if stream (source and target) are empty + + Parameters + ---------- + None + + Returns + ------- + boolean + True if stream is empty for stream, else False + """ + + return self.source_empty() and self.target_empty() + + def is_spoof(self) -> bool: + """ + Either source or target is spoof + """ + return self.source_is_spoof or self.target_is_spoof + + +def spoof(healpix_level: int, datetime, geoinfo_size, mean_of_data) -> IOReaderData: + """ + Spoof an instance from data_reader_base.ReaderData instance. + other should be such an instance. + """ + + dx = 0.5 + dy = 0.5 + num_healpix_cells = 12 * 4**healpix_level + lons, lats = hp.healpix_to_lonlat( + np.arange(0, num_healpix_cells), 2**healpix_level, dx=dx, dy=dy, order="nested" + ) + coords = np.stack([lats.deg, lons.deg], axis=-1, dtype=np.float32) + geoinfos = np.zeros((coords.shape[0], geoinfo_size), dtype=np.float32) + + data = np.expand_dims(mean_of_data.astype(np.float32), axis=0).repeat(coords.shape[0], axis=0) + datetimes = np.array(datetime).repeat(coords.shape[0]) + + n_datapoints = len(data) + + assert coords.shape == (n_datapoints, 2), ( + "number of datapoints do not match data", + coords.shape, + (n_datapoints, 2), + ) + assert geoinfos.shape[0] == n_datapoints, ( + "number of datapoints do not match data", + geoinfos.shape, + (n_datapoints, geoinfo_size), + ) + assert datetimes.shape[0] == n_datapoints, ( + "number of datapoints do not match data", + datetimes.shape, + (n_datapoints,), + ) + + return IOReaderData(coords, geoinfos, data, datetimes) diff --git a/src/weathergen/datasets/tokenizer.py b/src/weathergen/datasets/tokenizer.py new file mode 100644 index 000000000..a059d6b77 --- /dev/null +++ b/src/weathergen/datasets/tokenizer.py @@ -0,0 +1,142 @@ +# (C) Copyright 2025 WeatherGenerator contributors. +# +# This software is licensed under the terms of the Apache Licence Version 2.0 +# which can be obtained at http://www.apache.org/licenses/LICENSE-2.0. +# +# In applying this licence, ECMWF does not waive the privileges and immunities +# granted to it by virtue of its status as an intergovernmental organisation +# nor does it submit to any jurisdiction. + +import warnings + +import astropy_healpix as hp +import numpy as np +import torch + +from weathergen.datasets.utils import ( + healpix_verts_rots, + r3tos2, +) + + +class Tokenizer: + """ + Base class for tokenizers. + """ + + def __init__(self, healpix_level: int): + ref = torch.tensor([1.0, 0.0, 0.0]) + + self.hl_source = healpix_level + self.hl_target = healpix_level + + self.num_healpix_cells_source = 12 * 4**self.hl_source + self.num_healpix_cells_target = 12 * 4**self.hl_target + + self.size_time_embedding = 6 + + verts00, verts00_rots = healpix_verts_rots(self.hl_source, 0.0, 0.0) + verts10, verts10_rots = healpix_verts_rots(self.hl_source, 1.0, 0.0) + verts11, verts11_rots = healpix_verts_rots(self.hl_source, 1.0, 1.0) + verts01, verts01_rots = healpix_verts_rots(self.hl_source, 0.0, 1.0) + vertsmm, vertsmm_rots = healpix_verts_rots(self.hl_source, 0.5, 0.5) + self.hpy_verts = [ + verts00.to(torch.float32), + verts10.to(torch.float32), + verts11.to(torch.float32), + verts01.to(torch.float32), + vertsmm.to(torch.float32), + ] + self.hpy_verts_rots_source = [ + verts00_rots.to(torch.float32), + verts10_rots.to(torch.float32), + verts11_rots.to(torch.float32), + verts01_rots.to(torch.float32), + vertsmm_rots.to(torch.float32), + ] + + verts00, verts00_rots = healpix_verts_rots(self.hl_target, 0.0, 0.0) + verts10, verts10_rots = healpix_verts_rots(self.hl_target, 1.0, 0.0) + verts11, verts11_rots = healpix_verts_rots(self.hl_target, 1.0, 1.0) + verts01, verts01_rots = healpix_verts_rots(self.hl_target, 0.0, 1.0) + vertsmm, vertsmm_rots = healpix_verts_rots(self.hl_target, 0.5, 0.5) + self.hpy_verts = [ + verts00.to(torch.float32), + verts10.to(torch.float32), + verts11.to(torch.float32), + verts01.to(torch.float32), + vertsmm.to(torch.float32), + ] + self.hpy_verts_rots_target = [ + verts00_rots.to(torch.float32), + verts10_rots.to(torch.float32), + verts11_rots.to(torch.float32), + verts01_rots.to(torch.float32), + vertsmm_rots.to(torch.float32), + ] + + transforms = [ + ([verts10, verts11, verts01, vertsmm], verts00_rots), + ([verts00, verts11, verts01, vertsmm], verts10_rots), + ([verts00, verts10, verts01, vertsmm], verts11_rots), + ([verts00, verts11, verts10, vertsmm], verts01_rots), + ([verts00, verts10, verts11, verts01], vertsmm_rots), + ] + + self.verts_local = [] + for _verts, rot in transforms: + # Compute local coordinates + verts = torch.stack(_verts) + # shape: + verts = verts.transpose(0, 1) + # Batch multiplication by the 3x3 rotation matrices. + # shape: @ -> + # Needs to transpose first to then transpose back. + t1 = torch.bmm(rot, verts.transpose(-1, -2)).transpose(-2, -1) + t2 = ref - t1 + self.verts_local.append(t2.flatten(1, 2)) + + self.hpy_verts_local_target = torch.stack(self.verts_local).transpose(0, 1) + + # add local coords wrt to center of neighboring cells + # (since the neighbors are used in the prediction) + num_healpix_cells = 12 * 4**self.hl_target + with warnings.catch_warnings(action="ignore"): + temp = hp.neighbours( + np.arange(num_healpix_cells), 2**self.hl_target, order="nested" + ).transpose() + # fix missing nbors with references to self + for i, row in enumerate(temp): + temp[i][row == -1] = i + self.hpy_nctrs_target = ( + vertsmm[temp.flatten()] + .reshape((num_healpix_cells, 8, 3)) + .transpose(1, 0) + .to(torch.float32) + ) + + def compute_source_centroids(self, source_tokens_cells: list[torch.Tensor]) -> torch.Tensor: + source_means = [ + ( + self.hpy_verts[-1][i].unsqueeze(0).repeat(len(s), 1) + if len(s) > 0 + else torch.tensor([]) + ) + for i, s in enumerate(source_tokens_cells) + ] + source_means_lens = [len(s) for s in source_means] + # merge and split to vectorize computations + source_means = torch.cat(source_means) + # TODO: precompute also source_means_r3 and then just cat + source_centroids = torch.cat( + [source_means.to(torch.float32), r3tos2(source_means).to(torch.float32)], -1 + ) + source_centroids = torch.split(source_centroids, source_means_lens) + + return source_centroids + + def get_size_time_embedding(self) -> int: + """ + Get size of time embedding + """ + return self.size_time_embedding diff --git a/src/weathergen/datasets/tokenizer_forecast.py b/src/weathergen/datasets/tokenizer_forecast.py new file mode 100644 index 000000000..c52d77790 --- /dev/null +++ b/src/weathergen/datasets/tokenizer_forecast.py @@ -0,0 +1,149 @@ +# (C) Copyright 2025 WeatherGenerator contributors. +# +# This software is licensed under the terms of the Apache Licence Version 2.0 +# which can be obtained at http://www.apache.org/licenses/LICENSE-2.0. +# +# In applying this licence, ECMWF does not waive the privileges and immunities +# granted to it by virtue of its status as an intergovernmental organisation +# nor does it submit to any jurisdiction. + +from functools import partial + +import numpy as np +import torch + +from weathergen.common.io import IOReaderData +from weathergen.datasets.tokenizer import Tokenizer +from weathergen.datasets.tokenizer_utils import ( + encode_times_source, + encode_times_target, + hpy_cell_splits, + tokenize_window_space, + tokenize_window_spacetime, +) +from weathergen.datasets.utils import ( + get_target_coords_local_ffast, +) + + +class TokenizerForecast(Tokenizer): + def reset_rng(self, rng) -> None: + """ + Reset rng after mini_epoch to ensure proper randomization + """ + self.rng = rng + + def batchify_source( + self, + stream_info: dict, + rdata: IOReaderData, + time_win: tuple, + normalize_coords, + ): + token_size = stream_info["token_size"] + is_diagnostic = stream_info.get("diagnostic", False) + tokenize_spacetime = stream_info.get("tokenize_spacetime", False) + + tokenize_window = partial( + tokenize_window_spacetime if tokenize_spacetime else tokenize_window_space, + time_win=time_win, + token_size=token_size, + hl=self.hl_source, + hpy_verts_rots=self.hpy_verts_rots_source[-1], + n_coords=normalize_coords, + enc_time=encode_times_source, + ) + + source_tokens_cells = [torch.tensor([])] + source_centroids = [torch.tensor([])] + source_tokens_lens = torch.zeros([self.num_healpix_cells_source], dtype=torch.int32) + + if is_diagnostic or rdata.data.shape[1] == 0 or len(rdata.data) < 2: + return (source_tokens_cells, source_tokens_lens, source_centroids) + + # TODO: properly set stream_id; don't forget to normalize + source_tokens_cells = tokenize_window( + 0, + rdata.coords, + rdata.geoinfos, + rdata.data, + rdata.datetimes, + ) + + source_tokens_cells = [ + torch.stack(c) if len(c) > 0 else torch.tensor([]) for c in source_tokens_cells + ] + + source_tokens_lens = torch.tensor([len(s) for s in source_tokens_cells], dtype=torch.int32) + if source_tokens_lens.sum() > 0: + source_centroids = self.compute_source_centroids(source_tokens_cells) + + return (source_tokens_cells, source_tokens_lens, source_centroids) + + def batchify_target( + self, + stream_info: dict, + sampling_rate_target: float, + rdata: IOReaderData, + time_win: tuple, + ): + target_tokens = torch.zeros([self.num_healpix_cells_target], dtype=torch.int32) + target_coords = torch.zeros([self.num_healpix_cells_target], dtype=torch.int32) + target_tokens_lens = torch.zeros([self.num_healpix_cells_target], dtype=torch.int32) + + sampling_rate_target = stream_info.get("sampling_rate_target", sampling_rate_target) + if sampling_rate_target < 1.0: + mask = self.rng.uniform(0.0, 1.0, rdata.data.shape[0]) < sampling_rate_target + rdata.coords = rdata.coords[mask] + rdata.geoinfos = rdata.geoinfos[mask] + rdata.data = rdata.data[mask] + rdata.datetimes = rdata.datetimes[mask] + + # TODO: currently treated as empty to avoid special case handling + if len(rdata.data) < 2: + return (target_tokens, target_coords, torch.tensor([]), torch.tensor([])) + + # compute indices for each cell + hpy_idxs_ord_split, _, _, _ = hpy_cell_splits(rdata.coords, self.hl_target) + + # TODO: expose parameter + with_perm_target = True + if with_perm_target: + hpy_idxs_ord_split = [ + idx[self.rng.permutation(len(idx))[: int(len(idx))]] for idx in hpy_idxs_ord_split + ] + + # helper variables to split according to cells + idxs_ord = np.concatenate(hpy_idxs_ord_split) + ll = np.cumsum(np.array([len(a) for a in hpy_idxs_ord_split]))[:-1] + + # compute encoding of time + times_reordered = rdata.datetimes[idxs_ord] + times_reordered_enc = encode_times_target(times_reordered, time_win) + + # reorder and split all relevant information based on cells + target_tokens = np.split(rdata.data[idxs_ord], ll) + coords_reordered = rdata.coords[idxs_ord] + target_coords = np.split(coords_reordered, ll) + target_coords_raw = np.split(coords_reordered, ll) + target_geoinfos = np.split(rdata.geoinfos[idxs_ord], ll) + target_times_raw = np.split(times_reordered, ll) + target_times = np.split(times_reordered_enc, ll) + + target_tokens_lens = torch.tensor([len(s) for s in target_tokens], dtype=torch.int32) + + # compute encoding of target coordinates used in prediction network + if target_tokens_lens.sum() > 0: + target_coords = get_target_coords_local_ffast( + self.hl_target, + target_coords, + target_geoinfos, + target_times, + self.hpy_verts_rots_target, + self.hpy_verts_local_target, + self.hpy_nctrs_target, + ) + target_coords.requires_grad = False + target_coords = list(target_coords.split(target_tokens_lens.tolist())) + + return (target_tokens, target_coords, target_coords_raw, target_times_raw) diff --git a/src/weathergen/datasets/tokenizer_masking.py b/src/weathergen/datasets/tokenizer_masking.py new file mode 100644 index 000000000..8cc3de2f5 --- /dev/null +++ b/src/weathergen/datasets/tokenizer_masking.py @@ -0,0 +1,253 @@ +# (C) Copyright 2025 WeatherGenerator contributors. +# +# This software is licensed under the terms of the Apache Licence Version 2.0 +# which can be obtained at http://www.apache.org/licenses/LICENSE-2.0. +# +# In applying this licence, ECMWF does not waive the privileges and immunities +# granted to it by virtue of its status as an intergovernmental organisation +# nor does it submit to any jurisdiction. + +from functools import partial + +import numpy as np +import torch + +from weathergen.common.io import IOReaderData +from weathergen.datasets.masking import Masker +from weathergen.datasets.tokenizer import Tokenizer +from weathergen.datasets.tokenizer_utils import ( + arc_alpha, + encode_times_source, + encode_times_target, + tokenize_window_space, + tokenize_window_spacetime, +) +from weathergen.datasets.utils import ( + get_target_coords_local_ffast, +) + + +class TokenizerMasking(Tokenizer): + def __init__(self, healpix_level: int, masker: Masker): + super().__init__(healpix_level) + self.masker = masker + + def reset_rng(self, rng) -> None: + """ + Reset rng after mini_epoch to ensure proper randomization + """ + self.masker.reset_rng(rng) + self.rng = rng + + def batchify_source( + self, + stream_info: dict, + rdata: IOReaderData, + time_win: tuple, + normalize_coords, # dataset + ): + token_size = stream_info["token_size"] + is_diagnostic = stream_info.get("diagnostic", False) + tokenize_spacetime = stream_info.get("tokenize_spacetime", False) + + tokenize_window = partial( + tokenize_window_spacetime if tokenize_spacetime else tokenize_window_space, + time_win=time_win, + token_size=token_size, + hl=self.hl_source, + hpy_verts_rots=self.hpy_verts_rots_source[-1], + n_coords=normalize_coords, + enc_time=encode_times_source, + ) + + self.token_size = token_size + + # return empty if there is no data or we are in diagnostic mode + if is_diagnostic or rdata.data.shape[1] == 0 or len(rdata.data) < 2: + source_tokens_cells = [torch.tensor([])] + source_tokens_lens = torch.zeros([self.num_healpix_cells_source], dtype=torch.int32) + source_centroids = [torch.tensor([])] + return (source_tokens_cells, source_tokens_lens, source_centroids) + + # tokenize all data first + tokenized_data = tokenize_window( + 0, + rdata.coords, + rdata.geoinfos, + rdata.data, + rdata.datetimes, + ) + + tokenized_data = [ + torch.stack(c) if len(c) > 0 else torch.tensor([]) for c in tokenized_data + ] + + # Use the masker to get source tokens and the selection mask for the target + source_tokens_cells = self.masker.mask_source( + tokenized_data, rdata.coords, rdata.geoinfos, rdata.data + ) + + source_tokens_lens = torch.tensor([len(s) for s in source_tokens_cells], dtype=torch.int32) + if source_tokens_lens.sum() > 0: + source_centroids = self.compute_source_centroids(source_tokens_cells) + else: + source_centroids = torch.tensor([]) + + return (source_tokens_cells, source_tokens_lens, source_centroids) + + def batchify_target( + self, + stream_info: dict, + sampling_rate_target: float, + rdata: IOReaderData, + time_win: tuple, + ): + token_size = stream_info["token_size"] + tokenize_spacetime = stream_info.get("tokenize_spacetime", False) + max_num_targets = stream_info.get("max_num_targets", -1) + + target_tokens, target_coords = torch.tensor([]), torch.tensor([]) + target_tokens_lens = torch.zeros([self.num_healpix_cells_target], dtype=torch.int32) + + # target is empty + if len(self.masker.perm_sel) == 0: + return (target_tokens, target_coords, torch.tensor([]), torch.tensor([])) + + # identity function + def id(arg): + return arg + + # set tokenization function, no normalization of coords + tokenize_window = partial( + tokenize_window_spacetime if tokenize_spacetime else tokenize_window_space, + time_win=time_win, + token_size=token_size, + hl=self.hl_source, + hpy_verts_rots=self.hpy_verts_rots_source[-1], + n_coords=id, + enc_time=encode_times_target, + pad_tokens=False, + local_coords=False, + ) + + # tokenize + target_tokens_cells = tokenize_window( + 0, + rdata.coords, + rdata.geoinfos, + rdata.data, + rdata.datetimes, + ) + + target_tokens = self.masker.mask_target( + target_tokens_cells, rdata.coords, rdata.geoinfos, rdata.data + ) + + target_tokens_lens = [len(t) for t in target_tokens] + total_target = sum(target_tokens_lens) + + # sampling the number of targets according to per-stream sampling_rate_target + # otherwise take global sampling_rate_target from config + sampling_rate_target = stream_info.get("sampling_rate_target", sampling_rate_target) + + samples = (torch.empty(total_target).uniform_() < sampling_rate_target).split( + target_tokens_lens + ) + target_tokens = [ + (tokens[samples]) for tokens, samples in zip(target_tokens, samples, strict=False) + ] + target_tokens_lens = [len(t) for t in target_tokens] + + if torch.tensor(target_tokens_lens).sum() == 0: + return (torch.tensor([]), torch.tensor([]), torch.tensor([]), torch.tensor([])) + + tt_lin = torch.cat(target_tokens) + tt_lens = target_tokens_lens + + if max_num_targets > 0: + target_tokens = self.sample_tensors_uniform_vectorized( + target_tokens, torch.tensor(tt_lens), max_num_targets + ) + + tt_lin = torch.cat(target_tokens) + target_tokens_lens = [len(t) for t in target_tokens] + tt_lens = target_tokens_lens + + # TODO: can we avoid setting the offsets here manually? + # TODO: ideally we would not have recover it; but using tokenize_window seems necessary for + # consistency -> split tokenize_window in two parts with the cat only happening in the + # second + offset = 6 + # offset of 1 : stream_id + target_times = torch.split(tt_lin[..., 1:offset], tt_lens) + target_coords = torch.split(tt_lin[..., offset : offset + rdata.coords.shape[-1]], tt_lens) + offset += rdata.coords.shape[-1] + target_geoinfos = torch.split( + tt_lin[..., offset : offset + rdata.geoinfos.shape[-1]], tt_lens + ) + offset += rdata.geoinfos.shape[-1] + target_tokens = torch.split(tt_lin[..., offset:], tt_lens) + + offset = 6 + target_coords_raw = torch.split( + tt_lin[:, offset : offset + rdata.coords.shape[-1]], tt_lens + ) + # recover absolute time from relatives in encoded ones + # TODO: avoid recover; see TODO above + deltas_sec = ( + arc_alpha(tt_lin[..., 1] - 0.5, tt_lin[..., 2] - 0.5) / (2.0 * np.pi) * (12 * 3600) + ) + deltas_sec = deltas_sec.numpy().astype("timedelta64[s]") + target_times_raw = np.split(time_win[0] + deltas_sec, np.cumsum(tt_lens)[:-1]) + + # compute encoding of target coordinates used in prediction network + if torch.tensor(tt_lens).sum() > 0: + target_coords = get_target_coords_local_ffast( + self.hl_target, + target_coords, + target_geoinfos, + target_times, + self.hpy_verts_rots_target, + self.hpy_verts_local_target, + self.hpy_nctrs_target, + ) + target_coords.requires_grad = False + target_coords = list(target_coords.split(tt_lens)) + + return (target_tokens, target_coords, target_coords_raw, target_times_raw) + + def sample_tensors_uniform_vectorized( + self, tensor_list: list, lengths: list, max_total_points: int + ): + """ + This function randomly selects tensors up to a maximum number of total points + + tensor_list: List[torch.tensor] the list to select from + lengths: List[int] the length of each tensor in tensor_list + max_total_points: the maximum number of total points to sample from + """ + if not tensor_list: + return [], 0 + + # Create random permutation + perm = self.rng.permutation(len(tensor_list)) + + # Vectorized cumulative sum + cumsum = torch.cumsum(lengths[perm], dim=0) + + # Find cutoff point + valid_mask = cumsum <= max_total_points + if not valid_mask.any(): + return [], 0 + + num_selected = valid_mask.sum().item() + perm = torch.tensor(perm) + selected_indices = perm[:num_selected] + selected_indices = torch.zeros_like(perm).scatter(0, selected_indices, 1) + + selected_tensors = [ + t if mask.item() == 1 else t[:0] + for t, mask in zip(tensor_list, selected_indices, strict=False) + ] + + return selected_tensors diff --git a/src/weathergen/datasets/tokenizer_utils.py b/src/weathergen/datasets/tokenizer_utils.py new file mode 100644 index 000000000..c15ece48f --- /dev/null +++ b/src/weathergen/datasets/tokenizer_utils.py @@ -0,0 +1,308 @@ +from collections.abc import Callable + +import numpy as np +import pandas as pd +import torch +from astropy_healpix.healpy import ang2pix +from torch import Tensor + +from weathergen.datasets.utils import ( + r3tos2, + s2tor3, +) + +CoordNormalizer = Callable[[torch.Tensor], torch.Tensor] + +# on some clusters our numpy version is pinned to be 1.x.x where the np.argsort does not +# the stable=True argument +numpy_argsort_args = {"stable": True} if int(np.__version__.split(".")[0]) >= 2 else {} + + +def arc_alpha(sin_alpha, cos_alpha): + """Maps a point on the unit circle (np.array or torch.tensor), defined by its (cosine, sine) + coordinates to its spherical coordinate in [0,2pi) + """ + t = torch.arccos(cos_alpha) + mask = sin_alpha < 0.0 + t[mask] = (2.0 * np.pi) - t[mask] + return t + + +def encode_times_source(times, time_win) -> torch.tensor: + """Encode times in the format used for source + + Return: + len(times) x 5 + """ + # assemble tensor as fed to the network, combining geoinfo and data + fp32 = torch.float32 + dt = pd.to_datetime(times) + dt_win = pd.to_datetime(time_win) + dt_delta = dt - dt_win[0] + time_tensor = torch.cat( + ( + torch.tensor(dt.year, dtype=fp32).unsqueeze(1), + torch.tensor(dt.dayofyear, dtype=fp32).unsqueeze(1), + torch.tensor(dt.hour * 60 + dt.minute, dtype=fp32).unsqueeze(1), + torch.tensor(dt_delta.seconds, dtype=fp32).unsqueeze(1), + torch.tensor(dt_delta.seconds, dtype=fp32).unsqueeze(1), + ), + 1, + ) + + # normalize + time_tensor[..., 0] /= 2100.0 + time_tensor[..., 1] = time_tensor[..., 1] / 365.0 + time_tensor[..., 2] = time_tensor[..., 2] / 1440.0 + time_tensor[..., 3] = np.sin(time_tensor[..., 3] / (12.0 * 3600.0) * 2.0 * np.pi) + time_tensor[..., 4] = np.cos(time_tensor[..., 4] / (12.0 * 3600.0) * 2.0 * np.pi) + + return time_tensor + + +def encode_times_target(times, time_win) -> torch.tensor: + """Encode times in the format used for target (relative time in window) + + Return: + len(times) x 5 + """ + dt = pd.to_datetime(times) + dt_win = pd.to_datetime(time_win) + # for target only provide local time + dt_delta = torch.tensor((dt - dt_win[0]).seconds, dtype=torch.float32).unsqueeze(1) + time_tensor = torch.cat( + ( + dt_delta, + dt_delta, + dt_delta, + dt_delta, + dt_delta, + ), + 1, + ) + + # normalize + time_tensor[..., 0] = np.sin(time_tensor[..., 0] / (12.0 * 3600.0) * 2.0 * np.pi) + time_tensor[..., 1] = np.cos(time_tensor[..., 1] / (12.0 * 3600.0) * 2.0 * np.pi) + time_tensor[..., 2] = np.sin(time_tensor[..., 2] / (12.0 * 3600.0) * 2.0 * np.pi) + time_tensor[..., 3] = np.cos(time_tensor[..., 3] / (12.0 * 3600.0) * 2.0 * np.pi) + time_tensor[..., 4] = np.sin(time_tensor[..., 4] / (12.0 * 3600.0) * 2.0 * np.pi) + + # We add + 0.5 as in ERA5 very often we otherwise get 0 as the first time and to prevent too + # many zeros in the input, where we cannot learn anything we add an offset + return time_tensor + 0.5 + + +def hpy_cell_splits(coords: torch.tensor, hl: int): + """Compute healpix cell id for each coordinate on given level hl + + Returns + hpy_idxs_ord_split : list of per cell indices into thetas,phis,posr3 + thetas : thetas in rad + phis : phis in rad + posr3 : (thetas,phis) as position in R3 + """ + thetas = ((90.0 - coords[:, 0]) / 180.0) * np.pi + phis = ((coords[:, 1] + 180.0) / 360.0) * 2.0 * np.pi + # healpix cells for all points + hpy_idxs = ang2pix(2**hl, thetas, phis, nest=True) + posr3 = s2tor3(thetas, phis) + + # extract information to split according to cells by first sorting and then finding split idxs + hpy_idxs_ord = np.argsort(hpy_idxs, **numpy_argsort_args) + splits = np.flatnonzero(np.diff(hpy_idxs[hpy_idxs_ord])) + + # extract per cell data + hpy_idxs_ord_temp = np.split(hpy_idxs_ord, splits + 1) + hpy_idxs_ord_split = [np.array([], dtype=np.int64) for _ in range(12 * 4**hl)] + # TODO: split smarter (with a augmented splits list?) so that this loop is not needed + for b, x in zip(np.unique(np.unique(hpy_idxs[hpy_idxs_ord])), hpy_idxs_ord_temp, strict=True): + hpy_idxs_ord_split[b] = x + + return (hpy_idxs_ord_split, thetas, phis, posr3) + + +def hpy_splits( + coords: torch.Tensor, hl: int, token_size: int, pad_tokens: bool +) -> tuple[list[torch.Tensor], list[torch.Tensor], torch.Tensor]: + """Compute healpix cell for each data point and splitting information per cell; + when the token_size is exceeded then splitting based on lat is used; + tokens can be padded + + Return : + idxs_ord : flat list of indices (to data points) per healpix cell + idxs_ord_lens : lens of lists per cell + (so that data[idxs_ord].split( idxs_ord_lens) provides per cell data) + posr3 : R^3 positions of coords + """ + + # list of data points per healpix cell + (hpy_idxs_ord_split, thetas, phis, posr3) = hpy_cell_splits(coords, hl) + + # if token_size is exceeed split based on latitude + # TODO: split by hierarchically traversing healpix scheme + thetas_sorted = [torch.argsort(thetas[idxs], stable=True) for idxs in hpy_idxs_ord_split] + # remainder for padding to token size + if pad_tokens: + rem = [ + token_size - (len(idxs) % token_size if len(idxs) % token_size != 0 else token_size) + for idxs in hpy_idxs_ord_split + ] + else: + rem = np.zeros(len(hpy_idxs_ord_split), dtype=np.int32) + + # helper variables to split according to cells + # pad to token size *and* offset by +1 to account for the index 0 that is added for the padding + idxs_ord = [ + torch.split( + torch.cat((torch.from_numpy(np.take(idxs, ts) + 1), torch.zeros(r, dtype=torch.int32))), + token_size, + ) + for idxs, ts, r in zip(hpy_idxs_ord_split, thetas_sorted, rem, strict=True) + ] + + # extract length and flatten nested list + idxs_ord_lens = [[len(a) for a in aa] for aa in idxs_ord] + idxs_ord = [torch.cat([idxs for idxs in iidxs]) for iidxs in idxs_ord] + + return idxs_ord, idxs_ord_lens, posr3 + + +def tokenize_window_space( + stream_id: float, + coords: torch.tensor, + geoinfos, + source, + times, + time_win, + token_size, + hl, + hpy_verts_rots, + n_coords: CoordNormalizer, + enc_time, + pad_tokens=True, + local_coords=True, +): + """Process one window into tokens""" + + # len(source)==1 would require special case handling that is not worth the effort + if len(source) < 2: + return + + # idx_ord_lens is length is number of tokens per healpix cell + idxs_ord, idxs_ord_lens, posr3 = hpy_splits(coords, hl, token_size, pad_tokens) + + # pad with zero at the beggining for token size padding + times_enc = enc_time(times, time_win) + times_enc_padded = torch.cat([torch.zeros_like(times_enc[0]).unsqueeze(0), times_enc]) + geoinfos_padded = torch.cat([torch.zeros_like(geoinfos[0]).unsqueeze(0), geoinfos]) + source_padded = torch.cat([torch.zeros_like(source[0]).unsqueeze(0), source]) + + # convert to local coordinates + # TODO: avoid that padded lists are rotated, which means potentially a lot of zeros + if local_coords: + coords_local = _coords_local(posr3, hpy_verts_rots, idxs_ord, n_coords) + else: + coords_local = torch.cat([torch.zeros_like(coords[0]).unsqueeze(0), coords]) + coords_local = [coords_local[idxs] for idxs in idxs_ord] + + # reorder based on cells (except for coords_local) and then cat along + # (time,coords,geoinfos,source) dimension and then split based on cells + tokens_cells = [ + ( + list( + torch.split( + torch.cat( + ( + torch.full([len(idxs), 1], stream_id, dtype=torch.float32), + times_enc_padded[idxs], + coords_local[i], + geoinfos_padded[idxs], + source_padded[idxs], + ), + 1, + ), + idxs_lens, + ) + ) + if idxs_lens[0] > 0 + else [] + ) + for i, (idxs, idxs_lens) in enumerate(zip(idxs_ord, idxs_ord_lens, strict=True)) + ] + + return tokens_cells + + +def tokenize_window_spacetime( + stream_id, + coords, + geoinfos, + source, + times, + time_win, + token_size, + hl, + hpy_verts_rots, + n_coords, + enc_time, + pad_tokens=True, + local_coords=True, +): + """Tokenize respecting an intrinsic time step in the data, i.e. each time step is tokenized + separately + """ + + num_healpix_cells = 12 * 4**hl + tokens_cells = [[] for _ in range(num_healpix_cells)] + + t_unique = np.unique(times) + for _, t in enumerate(t_unique): + mask = t == times + tokens_cells_cur = tokenize_window_space( + stream_id, + coords[mask], + geoinfos[mask], + source[mask], + times[mask], + time_win, + token_size, + hl, + hpy_verts_rots, + n_coords, + enc_time, + pad_tokens, + local_coords, + ) + + tokens_cells = [t + tc for t, tc in zip(tokens_cells, tokens_cells_cur, strict=True)] + + return tokens_cells + + +def _coords_local( + posr3: Tensor, hpy_verts_rots: Tensor, idxs_ord: list[Tensor], n_coords: CoordNormalizer +) -> list[Tensor]: + """Compute simple local coordinates for a set of 3D positions on the unit sphere.""" + fp32 = torch.float32 + posr3 = torch.cat([torch.zeros_like(posr3[0]).unsqueeze(0), posr3]) # prepend zero + + idxs_ords_lens_l = [len(idxs) for idxs in idxs_ord] + # int32 should be enough + idxs_ords_lens = torch.tensor(idxs_ords_lens_l, dtype=torch.int32) + # concat all indices + idxs_ords_c = torch.cat(idxs_ord) + # Copy the rotation matrices for each healpix cell + # num_points x 3 x 3 + rots = torch.repeat_interleave(hpy_verts_rots, idxs_ords_lens, dim=0) + # BMM only works for b x n x m and b x m x 1 + # adding a dummy dimension to posr3 + # numpoints x 3 x 1 + posr3_sel = posr3[idxs_ords_c].unsqueeze(-1) + vec_rot = torch.bmm(rots, posr3_sel) + vec_rot = vec_rot.squeeze(-1) + vec_scaled = n_coords(r3tos2(vec_rot).to(fp32)) + # split back to ragged list + # num_points x 2 + coords_local = torch.split(vec_scaled, idxs_ords_lens_l, dim=0) + return list(coords_local) diff --git a/src/weathergen/datasets/tokenizer_utils_test.py b/src/weathergen/datasets/tokenizer_utils_test.py new file mode 100644 index 000000000..322ca87eb --- /dev/null +++ b/src/weathergen/datasets/tokenizer_utils_test.py @@ -0,0 +1,64 @@ +import torch +from torch import Tensor, tensor + +from weathergen.datasets.tokenizer_utils import CoordNormalizer, _coords_local, r3tos2 + +_pos3r = tensor( + [ + [-1.2492e-02, -1.0921e-09, 9.9992e-01], + [-1.1881e-02, 9.9992e-01, -3.8603e-03], + [-1.0106e-02, -7.3428e-03, 9.9992e-01], + [-7.3428e-03, -1.0106e-02, 9.9992e-01], + [-3.8603e-03, -1.1881e-02, 9.9992e-01], + [1.4897e-10, -1.2492e-02, 9.9992e-01], + [3.8603e-03, -1.1881e-02, 9.9992e-01], + [7.3428e-03, -1.0106e-02, 9.9992e-01], + [1.0106e-02, -7.3428e-03, 9.9992e-01], + [1.1881e-02, -3.8603e-03, 9.9992e-01], + [1.2492e-02, 0.0000e00, 9.9992e-01], + [1.1881e-02, 3.8603e-03, 9.9992e-01], + [1.0106e-02, 7.3428e-03, 9.9992e-01], + [7.3428e-03, 1.0106e-02, 9.9992e-01], + [3.8603e-03, 1.1881e-02, 9.9992e-01], + [-5.4606e-10, 1.2492e-02, 9.9992e-01], + [-3.8603e-03, 1.1881e-02, 9.9992e-01], + [-7.3428e-03, 1.0106e-02, 9.9992e-01], + [-1.0106e-02, 7.3428e-03, 9.9992e-01], + ] +) + +_idxs_ord = [ + tensor([6, 4, 5, 7, 0, 0, 0, 0]), + tensor([1, 2, 3, 8, 0, 0, 0, 0]), + tensor([9, 10, 11, 0, 0, 0, 0, 0]), +] + +_hpy_verts_rots = tensor( + [ + [[0.7070, 0.7070, 0.0208], [-0.7070, 0.7072, -0.0086], [-0.0208, -0.0086, 0.9997]], + [[0.6889, 0.7236, 0.0417], [-0.7236, 0.6900, -0.0179], [-0.0417, -0.0179, 0.9990]], + [[0.7236, 0.6889, 0.0417], [-0.6889, 0.7246, -0.0167], [-0.0417, -0.0167, 0.9990]], + ] +) + + +def simple_coords_local( + posr3: Tensor, hpy_verts_rots: Tensor, idxs_ord: list[Tensor], n_coords: CoordNormalizer +) -> list[Tensor]: + fp32 = torch.float32 + posr3 = torch.cat([torch.zeros_like(posr3[0]).unsqueeze(0), posr3]) # prepend zero + """Compute simple local coordinates for a set of 3D positions on the unit sphere.""" + return [ + n_coords(r3tos2(torch.matmul(R, posr3[idxs].transpose(1, 0)).transpose(1, 0)).to(fp32)) + for R, idxs in zip(hpy_verts_rots, idxs_ord, strict=True) + ] + + +def test_coords_local(): + n_coords = lambda x: x + coords_local = simple_coords_local(_pos3r, _hpy_verts_rots, _idxs_ord, n_coords) + coords_local_ref = _coords_local(_pos3r, _hpy_verts_rots, _idxs_ord, n_coords) + torch.testing.assert_close(coords_local, coords_local_ref, atol=1e-6, rtol=0) + + +test_coords_local() diff --git a/src/weathergen/datasets/utils.py b/src/weathergen/datasets/utils.py index 39b6cc90c..b5d2279b8 100644 --- a/src/weathergen/datasets/utils.py +++ b/src/weathergen/datasets/utils.py @@ -1,4 +1,4 @@ -# (C) Copyright 2024 WeatherGenerator contributors. +# (C) Copyright 2025 WeatherGenerator contributors. # # This software is licensed under the terms of the Apache Licence Version 2.0 # which can be obtained at http://www.apache.org/licenses/LICENSE-2.0. @@ -7,447 +7,749 @@ # granted to it by virtue of its status as an intergovernmental organisation # nor does it submit to any jurisdiction. -import time import warnings +import astropy_healpix as hp import numpy as np import torch -import astropy_healpix as hp -from astropy_healpix.healpy import ang2pix, pix2ang +from astropy_healpix.healpy import ang2pix +from weathergen.datasets.stream_data import StreamData -#################################################################################################### -def arc_alpha( sin_alpha, cos_alpha) : - '''Invert cosine/sine for alpha \in [0,2pi] using both functions''' - t = torch.arccos( cos_alpha) - mask = sin_alpha < 0. - t[mask] = (2.*np.pi) - t[mask] - return t #################################################################################################### -def merge_cells( s_list, num_healpix_cells) : +def arc_alpha(sin_alpha, cos_alpha): + """Invert cosine/sine for alpha in [0,2pi] using both functions""" + t = torch.arccos(cos_alpha) + mask = sin_alpha < 0.0 + t[mask] = (2.0 * np.pi) - t[mask] + return t - if torch.tensor([len(s) for s in s_list]).sum() == 0 : - return torch.tensor([]) - - ret = torch.cat([torch.cat([s_list[i_s][i] - for i_s in range(len(s_list)) if len(s_list[i_s])>0 ]) - for i in range(num_healpix_cells)]) - - return ret #################################################################################################### -def vecs_to_rots( vecs) : - ''' - Convert vectors to rotations that align with (1,0,0) ie coordinate origin in geophysical +def vecs_to_rots(vecs): + """ + Convert vectors to rotations that align with (1,0,0) ie coordinate origin in geophysical spherical coordinates. A variant of Rodrigues formula is used - ''' - - Rs = torch.zeros( (vecs.shape[0], 3, 3), dtype=torch.float64 ) - c1 = vecs[:,0] - c2 = vecs[:,1] - c3 = vecs[:,2] - s = torch.square(c2) + torch.square(c3) - Rs[:,0,0] = c1 - Rs[:,0,1] = c2 - Rs[:,0,2] = c3 - Rs[:,1,0] = -c2 - Rs[:,1,1] = (c1 * torch.square(c2) + torch.square(c3)) / s - Rs[:,1,2] = (-1. + c1) * c2 * c3 / s - Rs[:,2,0] = -c3 - Rs[:,2,1] = (-1. + c1) * c2 * c3 / s - Rs[:,2,2] = (torch.square(c2) + c1 * torch.square(c3)) / s - - return Rs + """ + + rots = torch.zeros((vecs.shape[0], 3, 3), dtype=torch.float64) + c1 = vecs[:, 0] + c2 = vecs[:, 1] + c3 = vecs[:, 2] + s = torch.square(c2) + torch.square(c3) + rots[:, 0, 0] = c1 + rots[:, 0, 1] = c2 + rots[:, 0, 2] = c3 + rots[:, 1, 0] = -c2 + rots[:, 1, 1] = (c1 * torch.square(c2) + torch.square(c3)) / s + rots[:, 1, 2] = (-1.0 + c1) * c2 * c3 / s + rots[:, 2, 0] = -c3 + rots[:, 2, 1] = (-1.0 + c1) * c2 * c3 / s + rots[:, 2, 2] = (torch.square(c2) + c1 * torch.square(c3)) / s + + return rots + #################################################################################################### -def s2tor3( lats, lons) : - ''' +def s2tor3(lats, lons): + """ Convert from spherical to Cartesion R^3 coordinates - Note: mathematics convention with lats \in [0,pi] and lons \in [0,2pi] is used + Note: mathematics convention with lats in [0,pi] and lons in [0,2pi] is used (which is not problematic for lons but for lats care is required) - ''' - x = torch.sin(lats) * torch.cos(lons) - y = torch.sin(lats) * torch.sin(lons) - z = torch.cos(lats) - out = torch.stack( [x,y,z]) - return out.permute([ *list(np.arange(len(out.shape))[:-1]+1), 0 ]) + """ + sin_lats = torch.sin(lats) + cos_lats = torch.cos(lats) + + # Calculate the x, y, and z coordinates using vectorized operations. + x = sin_lats * torch.cos(lons) + y = sin_lats * torch.sin(lons) + z = cos_lats + + # Stack the x, y, and z tensors along the last dimension. + return torch.stack([x, y, z], dim=-1) + #################################################################################################### -def r3tos2( pos) : - ''' +def r3tos2(pos: torch.Tensor) -> torch.Tensor: + """ Convert from spherical to Cartesion R^3 coordinates - Note: mathematics convention with lats \in [0,pi] and lons \in [0,2pi] is used - (which is not problematic for lons but for lats care is required) - ''' - norm2 = torch.square(pos[...,0]) + torch.square(pos[...,1]) - r = torch.sqrt(norm2 + torch.square( pos[...,2])) - lats = torch.atan2( pos[...,2], torch.sqrt(norm2)) - lons = torch.atan2( pos[...,1],pos[...,0]) - out = torch.stack( [lats,lons]) - return out.permute([ *list(torch.arange(len(out.shape))[:-1]+1), 0 ]) + This optimized version is faster and more numerically stable by: + 1. Unbinding the input tensor to get x, y, and z components directly. + 2. Using torch.hypot for a more efficient and stable calculation of + the xy-plane norm. + 3. Stacking the final latitude and longitude tensors along the last + dimension, which avoids an expensive permute operation. + + Args: + pos (torch.Tensor): A tensor of Cartesian coordinates with shape `(..., 3)`. + + Returns: + torch.Tensor: . + """ + # Unbind the last dimension to get x, y, and z tensors. + x, y, z = torch.unbind(pos, dim=-1) + + # Use torch.hypot(x, y) + xy_norm = torch.sqrt(x**2 + y**2) + + # Calculate latitudes and longitudes using atan2. + # The output is directly a tensor with the same batch dimensions as the input. + lats = torch.atan2(z, xy_norm) + lons = torch.atan2(y, x) + + # Stack the results along the final dimension to get a `(..., 2)` tensor. + return torch.stack([lats, lons], dim=-1) + #################################################################################################### -def locs_to_cell_coords( hl : int, locs : list, dx = 0.5, dy = 0.5) -> list : - ''' - Map a list of locations per cell to spherical local coordinates centered +def locs_to_cell_coords(hl: int, locs: list, dx=0.5, dy=0.5) -> list: + """ + Map a list of locations per cell to spherical local coordinates centered at the healpix cell center - ''' + """ + + assert locs[13].shape[-1] == 3 if len(locs[13]) > 0 else True - assert locs[13].shape[-1] == 3 if len(locs[13])>0 else True + # centroids of healpix cells + num_healpix_cells = 12 * 4**hl + assert len(locs) == num_healpix_cells - # centroids of healpix cells - num_healpix_cells = 12 * 4**hl - assert len(locs) == num_healpix_cells - - lons, lats = hp.healpix_to_lonlat( np.arange(0,num_healpix_cells), 2**hl, - dx=dx, dy=dy, order='nested') - healpix_centers = s2tor3( torch.from_numpy(np.pi/2. - lats.value), torch.from_numpy(lons.value)) - healpix_centers_Rs = vecs_to_rots( healpix_centers) + lons, lats = hp.healpix_to_lonlat( + np.arange(0, num_healpix_cells), 2**hl, dx=dx, dy=dy, order="nested" + ) + healpix_centers = s2tor3( + torch.from_numpy(np.pi / 2.0 - lats.value), torch.from_numpy(lons.value) + ) + healpix_centers_rots = vecs_to_rots(healpix_centers) - # express each centroid in local coordinates w.r.t to healpix center by rotating center to origin - local_locs = [torch.matmul( R, s.transpose( -1, -2)).transpose( -2, -1) - if len(s)>0 else torch.tensor([]) - for i,(R,s) in enumerate(zip(healpix_centers_Rs,locs))] + ## express each centroid in local coordinates w.r.t to healpix center + # by rotating center to origin + local_locs = [ + torch.matmul(R, s.transpose(-1, -2)).transpose(-2, -1) if len(s) > 0 else torch.tensor([]) + for i, (R, s) in enumerate(zip(healpix_centers_rots, locs, strict=False)) + ] + + return local_locs - return local_locs #################################################################################################### -def locs_to_ctr_coords( ctrs_r3, locs : list) -> list : - ''' - Map a list of locations per cell to spherical local coordinates centered +def locs_to_ctr_coords(ctrs_r3, locs: list[torch.Tensor]) -> list: + """ + Map a list of locations per cell to spherical local coordinates centered at the healpix cell center - ''' + """ + + ctrs_rots = vecs_to_rots(ctrs_r3).to(torch.float32) + + ## express each centroid in local coordinates w.r.t to healpix center + # by rotating center to origin + + # Concatenate all points into single tensor + all_points = torch.cat(locs, dim=0) + + lengths = torch.tensor([len(s) for s in locs], device=all_points.device) + batch_indices = torch.repeat_interleave( + torch.arange(len(locs), device=all_points.device), lengths + ) - ctrs_Rs = vecs_to_rots( ctrs_r3).to(torch.float32) + point_rotations = ctrs_rots[batch_indices] - # express each centroid in local coordinates w.r.t to healpix center by rotating center to origin - local_locs = [torch.matmul( R, s.transpose( -1, -2)).transpose( -2, -1) - if len(s)>0 else torch.zeros([0,3]) - for i,(R,s) in enumerate(zip(ctrs_Rs,locs))] + # Single vectorized batch matrix multiplication + rotated_points = torch.bmm(point_rotations, all_points.unsqueeze(-1)).squeeze(-1) + + # Split back using tensor operations + local_locs = torch.split(rotated_points, lengths.tolist()) + + return list(local_locs) - return local_locs #################################################################################################### -def healpix_verts( hl : int, dx = 0.5, dy = 0.5) : - ''' +def healpix_verts(hl: int, dx=0.5, dy=0.5): + """ healpix cell center - ''' + """ + + # centroids of healpix cells + num_healpix_cells = 12 * 4**hl + lons, lats = hp.healpix_to_lonlat( + np.arange(0, num_healpix_cells), 2**hl, dx=dx, dy=dy, order="nested" + ) + verts = s2tor3(torch.from_numpy(np.pi / 2.0 - lats.value), torch.from_numpy(lons.value)) - # centroids of healpix cells - num_healpix_cells = 12 * 4**hl - lons, lats = hp.healpix_to_lonlat( np.arange(0,num_healpix_cells), 2**hl, - dx=dx, dy=dy, order='nested') - verts = s2tor3( torch.from_numpy(np.pi/2. - lats.value), torch.from_numpy(lons.value)) + return verts - return verts #################################################################################################### -def healpix_verts_rots( hl : int, dx = 0.5, dy = 0.5) : - ''' +def healpix_verts_rots(hl: int, dx=0.5, dy=0.5): + """ healpix cell center - ''' + """ - # centroids of healpix cells - num_healpix_cells = 12 * 4**hl - lons, lats = hp.healpix_to_lonlat( np.arange(0,num_healpix_cells), 2**hl, - dx=dx, dy=dy, order='nested') - verts = s2tor3( torch.from_numpy(np.pi/2. - lats.value), torch.from_numpy(lons.value)) - verts_R3 = vecs_to_rots( verts) + # centroids of healpix cells + num_healpix_cells = 12 * 4**hl + lons, lats = hp.healpix_to_lonlat( + np.arange(0, num_healpix_cells), 2**hl, dx=dx, dy=dy, order="nested" + ) + verts = s2tor3(torch.from_numpy(np.pi / 2.0 - lats.value), torch.from_numpy(lons.value)) + verts_rot3 = vecs_to_rots(verts) + + return verts, verts_rot3 - return verts, verts_R3 #################################################################################################### -def locs_to_cell_coords_ctrs( healpix_centers_Rs, locs : list) -> list : - ''' - Map a list of locations per cell to spherical local coordinates centered +def locs_to_cell_coords_ctrs( + healpix_centers_rots: torch.Tensor, locs: list[torch.Tensor] +) -> torch.Tensor: + """ + Map a list of locations per cell to spherical local coordinates centered at the healpix cell center - ''' + """ + + ## express each centroid in local coordinates w.r.t to healpix center + # by rotating center to origin + + # Concatenate all non-empty locations + all_points = torch.cat(locs, dim=0) + lengths = torch.tensor([len(s) for s in locs], device=all_points.device) - # express each centroid in local coordinates w.r.t to healpix center by rotating center to origin - local_locs = [torch.matmul( R, s.transpose( -1, -2)).transpose( -2, -1) - if len(s)>0 else torch.tensor([]) - for i,(R,s) in enumerate(zip(healpix_centers_Rs,locs))] + # Efficiently create batch indices using torch.repeat_interleave + batch_indices = torch.repeat_interleave( + torch.arange(len(locs), device=all_points.device), lengths + ) + + # Select rotation matrices for each point + rotations_selected = healpix_centers_rots[batch_indices] + + # Vectorized matrix multiplication + local_locs = torch.bmm(rotations_selected, all_points.unsqueeze(-1)).squeeze(-1) + + return local_locs - return local_locs #################################################################################################### -def coords_to_hpyidxs( hl, thetas, phis) : +def coords_to_hpyidxs(hl, thetas, phis): + thetas = ((90.0 - thetas) / 180.0) * np.pi + phis = ((180.0 + phis) / 360.0) * 2.0 * np.pi + hpyidxs = ang2pix(2**hl, thetas, phis, nest=True) - thetas = ((90. - thetas ) / 180.) * np.pi - phis = (((180. + phis) / 360.) * 2. * np.pi) - hpyidxs = ang2pix( 2**hl, thetas, phis, nest=True) + return hpyidxs - return hpyidxs #################################################################################################### -def add_local_vert_coords( hl, a, verts, tcs, zi, dx, dy, geoinfo_offset) : - ref = torch.tensor( [1., 0., 0.]) - aa = locs_to_cell_coords( hl, verts.unsqueeze(1), dx, dy) - aa = ref - torch.cat( [aaa.repeat( [*tt.shape[:-1],1]) if len(tt)>0 else torch.tensor([]) - for tt,aaa in zip(tcs,aa)] ) - a[...,(geoinfo_offset+zi):(geoinfo_offset+zi+3)] = aa - return a +def add_local_vert_coords(hl, a, verts, tcs, zi, dx, dy, geoinfo_offset): + ref = torch.tensor([1.0, 0.0, 0.0]) + aa = locs_to_cell_coords(hl, verts.unsqueeze(1), dx, dy) + aa = ref - torch.cat( + [ + aaa.repeat([*tt.shape[:-1], 1]) if len(tt) > 0 else torch.tensor([]) + for tt, aaa in zip(tcs, aa, strict=False) + ] + ) + a[..., (geoinfo_offset + zi) : (geoinfo_offset + zi + 3)] = aa + return a + #################################################################################################### -def add_local_vert_coords_ctrs2( ctrs, verts, tcs, a, zi, geoinfo_offset) : - ref = torch.tensor( [1., 0., 0.]) - aa = locs_to_cell_coords_ctrs( ctrs, verts.transpose(0,1)) - aa = ref - torch.cat( [aaa.unsqueeze(0).repeat( [*tt.shape[:-1],1,1]) - if len(tt)>0 else torch.tensor([]) - for tt,aaa in zip(tcs,aa)], 0 ) - aa = aa.flatten(1,2) - a[...,(geoinfo_offset+zi):(geoinfo_offset+zi+aa.shape[-1])] = aa - return a +def add_local_vert_coords_ctrs2(verts_local, tcs_lens, a, zi, geoinfo_offset): + ref = torch.tensor([1.0, 0.0, 0.0]) + aa = ref - torch.cat( + [ + aaa.unsqueeze(0).repeat([*tcs_lens, 1, 1]) if len(tt) > 0 else torch.tensor([]) + for tt, aaa in zip(tcs_lens, verts_local, strict=False) + ], + 0, + ) + aa = aa.flatten(1, 2) + a[..., (geoinfo_offset + zi) : (geoinfo_offset + zi + aa.shape[-1])] = aa + return a + #################################################################################################### -def add_local_vert_coords_ctrs2( verts_local, tcs_lens, a, zi, geoinfo_offset) : - ref = torch.tensor( [1., 0., 0.]) - aa = locs_to_cell_coords_ctrs( ctrs, verts.transpose(0,1)) - aa = ref - torch.cat( [aaa.unsqueeze(0).repeat( [*tcs_lens,1,1]) - if len(tt)>0 else torch.tensor([]) - for tt,aaa in zip(tcs_lens,verts_local)], 0 ) - aa = aa.flatten(1,2) - a[...,(geoinfo_offset+zi):(geoinfo_offset+zi+aa.shape[-1])] = aa - return a - -# #################################################################################################### # def add_local_vert_coords_ctrs3( ctrs, verts, tcs, a, zi, geoinfo_offset) : - + # ref = torch.tensor( [1., 0., 0.]) -# local_locs = [torch.matmul( R, s.transpose( -1, -2)).transpose( -2, -1) -# for i,(R,s) in enumerate(zip(healpix_centers_Rs,locs)) if len(s)>0] +# local_locs = [ +# torch.matmul(R, s.transpose( -1, -2)).transpose( -2, -1) +# for i,(R,s) in enumerate(zip(healpix_centers_rots,locs)) if len(s)>0 +# ] # aa = locs_to_cell_coords_ctrs( ctrs, verts.transpose(0,1)) -# aa = ref - torch.cat( [aaa.unsqueeze(0).repeat( [*tt.shape[:-1],1,1]) +# aa = ref - torch.cat( [aaa.unsqueeze(0).repeat( [*tt.shape[:-1],1,1]) # if len(tt)>0 else torch.tensor([]) -# for tt,aaa in zip(tcs,aa)] if tt>, 0 ) +# for tt,aaa in zip(tcs,aa)] +# if tt>, 0 ) # aa = aa.flatten(1,2) # a[...,(geoinfo_offset+zi):(geoinfo_offset+zi+aa.shape[-1])] = aa # return a -#################################################################################################### -def get_target_coords_local( hlc, target_coords, geoinfo_offset) : - '''Generate local coordinates for target coords w.r.t healpix cell vertices and - and for healpix cell vertices themselves - ''' - - target_coords_lens = [len(t) for t in target_coords] - tcs = [s2tor3( torch.deg2rad( 90. - t[...,geoinfo_offset].to(torch.float64)), - torch.deg2rad( 180. + t[...,geoinfo_offset+1].to(torch.float64))) - if len(t)>0 else torch.tensor([]) for t in target_coords] - target_coords = torch.cat(target_coords) - if target_coords.shape[0] == 0 : - return torch.tensor([]) - - verts00 = healpix_verts( hlc, 0.0, 0.0) - verts10 = healpix_verts( hlc, 1.0, 0.0) - verts11 = healpix_verts( hlc, 1.0, 1.0) - verts01 = healpix_verts( hlc, 0.0, 1.0) - vertsmm = healpix_verts( hlc, 0.5, 0.5) - - a = torch.zeros( [*target_coords.shape[:-1], (target_coords.shape[-1]-2) + 5*(3*5) + 3*8]) - a[...,:geoinfo_offset] = target_coords[...,:geoinfo_offset] - ref = torch.tensor( [1., 0., 0.]) - - zi=0; a[...,(geoinfo_offset+zi):(geoinfo_offset+zi+3)] = (ref - - torch.cat(locs_to_cell_coords( hlc, tcs, 0.0, 0.0))) - a = add_local_vert_coords( hlc, a, verts10, tcs, 3, 0.0, 0.0, geoinfo_offset) - a = add_local_vert_coords( hlc, a, verts11, tcs, 6, 0.0, 0.0, geoinfo_offset) - a = add_local_vert_coords( hlc, a, verts01, tcs, 9, 0.0, 0.0, geoinfo_offset) - a = add_local_vert_coords( hlc, a, vertsmm, tcs, 12, 0.0, 0.0, geoinfo_offset) - - zi=15; a[...,(geoinfo_offset+zi):(geoinfo_offset+zi+3)] = (ref - - torch.cat(locs_to_cell_coords( hlc, tcs, 1.0, 0.0))) - a = add_local_vert_coords( hlc, a, verts00, tcs, 18, 1.0, 0.0, geoinfo_offset) - a = add_local_vert_coords( hlc, a, verts11, tcs, 21, 1.0, 0.0, geoinfo_offset) - a = add_local_vert_coords( hlc, a, verts01, tcs, 24, 1.0, 0.0, geoinfo_offset) - a = add_local_vert_coords( hlc, a, vertsmm, tcs, 27, 1.0, 0.0, geoinfo_offset) - - zi=30; a[...,(geoinfo_offset+zi):(geoinfo_offset+zi+3)] = (ref - - torch.cat(locs_to_cell_coords( hlc, tcs, 1.0, 1.0))) - a = add_local_vert_coords( hlc, a, verts00, tcs, 33, 1.0, 1.0, geoinfo_offset) - a = add_local_vert_coords( hlc, a, verts10, tcs, 36, 1.0, 1.0, geoinfo_offset) - a = add_local_vert_coords( hlc, a, verts01, tcs, 39, 1.0, 1.0, geoinfo_offset) - a = add_local_vert_coords( hlc, a, vertsmm, tcs, 42, 1.0, 1.0, geoinfo_offset) - - zi=45; a[...,(geoinfo_offset+zi):(geoinfo_offset+zi+3)] = (ref - - torch.cat(locs_to_cell_coords( hlc, tcs, 0.0, 1.0))) - a = add_local_vert_coords( hlc, a, verts00, tcs, 48, 0.0, 1.0, geoinfo_offset) - a = add_local_vert_coords( hlc, a, verts11, tcs, 51, 0.0, 1.0, geoinfo_offset) - a = add_local_vert_coords( hlc, a, verts10, tcs, 54, 0.0, 1.0, geoinfo_offset) - # a = add_local_vert_coords( hlc, a, verts10, tcs, 51, 0.0, 1.0, geoinfo_offset) - # a = add_local_vert_coords( hlc, a, verts01, tcs, 54, 0.0, 1.0, geoinfo_offset) - a = add_local_vert_coords( hlc, a, vertsmm, tcs, 57, 0.0, 1.0, geoinfo_offset) - - zi=60; a[...,(geoinfo_offset+zi):(geoinfo_offset+zi+3)] = (ref - - torch.cat(locs_to_cell_coords( hlc, tcs, 0.5, 0.5))) - a = add_local_vert_coords( hlc, a, verts00, tcs, 63, 0.5, 0.5, geoinfo_offset) - a = add_local_vert_coords( hlc, a, verts10, tcs, 66, 0.5, 0.5, geoinfo_offset) - a = add_local_vert_coords( hlc, a, verts11, tcs, 69, 0.5, 0.5, geoinfo_offset) - a = add_local_vert_coords( hlc, a, verts01, tcs, 72, 0.5, 0.5, geoinfo_offset) - - # add centroids to neighboring cells wrt to cell center - num_healpix_cells = 12 * 4**hlc - with warnings.catch_warnings(action="ignore"): - temp = hp.neighbours( np.arange(num_healpix_cells), 2**hlc, order='nested').transpose() - # fix missing nbors with references to self - for i, row in enumerate(temp) : - temp[i][row == -1] = i - # coords of centers of all centers - lons, lats = hp.healpix_to_lonlat( np.arange(0,num_healpix_cells), 2**hlc, - dx=0.5, dy=0.5, order='nested') - ctrs = s2tor3( torch.from_numpy(np.pi/2. - lats.value), torch.from_numpy(lons.value)) - ctrs = ctrs[temp.flatten()].reshape( (num_healpix_cells, 8, 3)).transpose(1,0) - # local coords with respect to all neighboring centers - tcs_ctrs = torch.cat( [ref - torch.cat(locs_to_ctr_coords( c, tcs)) for c in ctrs], -1) - zi=75; a[...,(geoinfo_offset+zi):(geoinfo_offset+zi+(3*8))] = tcs_ctrs - - # remaining geoinfos (zenith angle etc) - zi=99; a[...,(geoinfo_offset+zi):] = target_coords[...,(geoinfo_offset+2):] - - return a #################################################################################################### -def get_target_coords_local_fast( hlc, target_coords, geoinfo_offset) : - '''Generate local coordinates for target coords w.r.t healpix cell vertices and - and for healpix cell vertices themselves - ''' - - # target_coords_lens = [len(t) for t in target_coords] - tcs = [s2tor3( torch.deg2rad( 90. - t[...,geoinfo_offset].to(torch.float64)), - torch.deg2rad( 180. + t[...,geoinfo_offset+1].to(torch.float64))) - if len(t)>0 else torch.tensor([]) for t in target_coords] - target_coords = torch.cat(target_coords) - if target_coords.shape[0] == 0 : - return torch.tensor([]) - - verts00, verts00_Rs = healpix_verts_rots( hlc, 0.0, 0.0) - verts10, verts10_Rs = healpix_verts_rots( hlc, 1.0, 0.0) - verts11, verts11_Rs = healpix_verts_rots( hlc, 1.0, 1.0) - verts01, verts01_Rs = healpix_verts_rots( hlc, 0.0, 1.0) - vertsmm, vertsmm_Rs = healpix_verts_rots( hlc, 0.5, 0.5) - - a = torch.zeros( [*target_coords.shape[:-1], (target_coords.shape[-1]-2) + 5*(3*5) + 3*8]) - # a = torch.zeros( [*target_coords.shape[:-1], (target_coords.shape[-1]-2) + 5*(3*5) + 3*8]) - # a = torch.zeros( [*target_coords.shape[:-1], 148]) #(target_coords.shape[-1]-2) + 5*(3*5) + 3*8]) - a[...,:geoinfo_offset] = target_coords[...,:geoinfo_offset] - ref = torch.tensor( [1., 0., 0.]) - - zi=0; a[...,(geoinfo_offset+zi):(geoinfo_offset+zi+3)] = (ref - - torch.cat(locs_to_cell_coords_ctrs( verts00_Rs, tcs))) - verts = torch.stack( [verts10, verts11, verts01, vertsmm]) - a = add_local_vert_coords_ctrs2( verts00_Rs, verts, tcs, a, 3, geoinfo_offset) - - zi=15; a[...,(geoinfo_offset+zi):(geoinfo_offset+zi+3)] = (ref - - torch.cat(locs_to_cell_coords_ctrs( verts10_Rs, tcs))) - verts = torch.stack( [verts00, verts11, verts01, vertsmm]) - a = add_local_vert_coords_ctrs2( verts10_Rs, verts, tcs, a, 18, geoinfo_offset) - - zi=30; a[...,(geoinfo_offset+zi):(geoinfo_offset+zi+3)] = (ref - - torch.cat(locs_to_cell_coords_ctrs( verts11_Rs, tcs))) - verts = torch.stack( [verts00, verts10, verts01, vertsmm]) - a = add_local_vert_coords_ctrs2( verts11_Rs, verts, tcs, a, 33, geoinfo_offset) - - zi=45; a[...,(geoinfo_offset+zi):(geoinfo_offset+zi+3)] = (ref - - torch.cat(locs_to_cell_coords_ctrs( verts01_Rs, tcs))) - verts = torch.stack( [verts00, verts11, verts10, vertsmm]) - a = add_local_vert_coords_ctrs2( verts01_Rs, verts, tcs, a, 48, geoinfo_offset) - - zi=60; a[...,(geoinfo_offset+zi):(geoinfo_offset+zi+3)] = (ref - - torch.cat(locs_to_cell_coords_ctrs( vertsmm_Rs, tcs))) - verts = torch.stack( [verts00, verts10, verts11, verts01]) - a = add_local_vert_coords_ctrs2( vertsmm_Rs, verts, tcs, a, 63, geoinfo_offset) - - # add local coords wrt to center of neighboring cells - # (since the neighbors are used in the prediction) - num_healpix_cells = 12 * 4**hlc - with warnings.catch_warnings(action="ignore"): - temp = hp.neighbours( np.arange(num_healpix_cells), 2**hlc, order='nested').transpose() - # fix missing nbors with references to self - for i, row in enumerate(temp) : - temp[i][row == -1] = i - nctrs = vertsmm[temp.flatten()].reshape( (num_healpix_cells, 8, 3)).transpose(1,0) - # local coords with respect to all neighboring centers - tcs_ctrs = torch.cat( [ref - torch.cat(locs_to_ctr_coords( c, tcs)) for c in nctrs], -1) - zi=75; a[...,(geoinfo_offset+zi):(geoinfo_offset+zi+(3*8))] = tcs_ctrs - # a = add_local_vert_coords_ctrs2( vertsmm_Rs, nctrs, tcs, a, 99, geoinfo_offset) - - # remaining geoinfos (zenith angle etc) - # zi=99+3*8; - zi=99 - # assert target_coords.shape[-1] + zi < a.shape[-1] - a[...,(geoinfo_offset+zi):] = target_coords[...,(geoinfo_offset+2):] - - return a +def get_target_coords_local(hlc, target_coords, geoinfo_offset): + """Generate local coordinates for target coords w.r.t healpix cell vertices and + and for healpix cell vertices themselves + """ + + # target_coords_lens = [len(t) for t in target_coords] + tcs = [ + ( + s2tor3( + torch.deg2rad(90.0 - t[..., geoinfo_offset].to(torch.float64)), + torch.deg2rad(180.0 + t[..., geoinfo_offset + 1].to(torch.float64)), + ) + if len(t) > 0 + else torch.tensor([]) + ) + for t in target_coords + ] + target_coords = torch.cat(target_coords) + if target_coords.shape[0] == 0: + return torch.tensor([]) + + verts00 = healpix_verts(hlc, 0.0, 0.0) + verts10 = healpix_verts(hlc, 1.0, 0.0) + verts11 = healpix_verts(hlc, 1.0, 1.0) + verts01 = healpix_verts(hlc, 0.0, 1.0) + vertsmm = healpix_verts(hlc, 0.5, 0.5) + + a = torch.zeros( + [*target_coords.shape[:-1], (target_coords.shape[-1] - 2) + 5 * (3 * 5) + 3 * 8] + ) + a[..., :geoinfo_offset] = target_coords[..., :geoinfo_offset] + ref = torch.tensor([1.0, 0.0, 0.0]) + + zi = 0 + a[..., (geoinfo_offset + zi) : (geoinfo_offset + zi + 3)] = ref - torch.cat( + locs_to_cell_coords(hlc, tcs, 0.0, 0.0) + ) + a = add_local_vert_coords(hlc, a, verts10, tcs, 3, 0.0, 0.0, geoinfo_offset) + a = add_local_vert_coords(hlc, a, verts11, tcs, 6, 0.0, 0.0, geoinfo_offset) + a = add_local_vert_coords(hlc, a, verts01, tcs, 9, 0.0, 0.0, geoinfo_offset) + a = add_local_vert_coords(hlc, a, vertsmm, tcs, 12, 0.0, 0.0, geoinfo_offset) + + zi = 15 + a[..., (geoinfo_offset + zi) : (geoinfo_offset + zi + 3)] = ref - torch.cat( + locs_to_cell_coords(hlc, tcs, 1.0, 0.0) + ) + a = add_local_vert_coords(hlc, a, verts00, tcs, 18, 1.0, 0.0, geoinfo_offset) + a = add_local_vert_coords(hlc, a, verts11, tcs, 21, 1.0, 0.0, geoinfo_offset) + a = add_local_vert_coords(hlc, a, verts01, tcs, 24, 1.0, 0.0, geoinfo_offset) + a = add_local_vert_coords(hlc, a, vertsmm, tcs, 27, 1.0, 0.0, geoinfo_offset) + + zi = 30 + a[..., (geoinfo_offset + zi) : (geoinfo_offset + zi + 3)] = ref - torch.cat( + locs_to_cell_coords(hlc, tcs, 1.0, 1.0) + ) + a = add_local_vert_coords(hlc, a, verts00, tcs, 33, 1.0, 1.0, geoinfo_offset) + a = add_local_vert_coords(hlc, a, verts10, tcs, 36, 1.0, 1.0, geoinfo_offset) + a = add_local_vert_coords(hlc, a, verts01, tcs, 39, 1.0, 1.0, geoinfo_offset) + a = add_local_vert_coords(hlc, a, vertsmm, tcs, 42, 1.0, 1.0, geoinfo_offset) + + zi = 45 + a[..., (geoinfo_offset + zi) : (geoinfo_offset + zi + 3)] = ref - torch.cat( + locs_to_cell_coords(hlc, tcs, 0.0, 1.0) + ) + a = add_local_vert_coords(hlc, a, verts00, tcs, 48, 0.0, 1.0, geoinfo_offset) + a = add_local_vert_coords(hlc, a, verts11, tcs, 51, 0.0, 1.0, geoinfo_offset) + a = add_local_vert_coords(hlc, a, verts10, tcs, 54, 0.0, 1.0, geoinfo_offset) + # a = add_local_vert_coords( hlc, a, verts10, tcs, 51, 0.0, 1.0, geoinfo_offset) + # a = add_local_vert_coords( hlc, a, verts01, tcs, 54, 0.0, 1.0, geoinfo_offset) + a = add_local_vert_coords(hlc, a, vertsmm, tcs, 57, 0.0, 1.0, geoinfo_offset) + + zi = 60 + a[..., (geoinfo_offset + zi) : (geoinfo_offset + zi + 3)] = ref - torch.cat( + locs_to_cell_coords(hlc, tcs, 0.5, 0.5) + ) + a = add_local_vert_coords(hlc, a, verts00, tcs, 63, 0.5, 0.5, geoinfo_offset) + a = add_local_vert_coords(hlc, a, verts10, tcs, 66, 0.5, 0.5, geoinfo_offset) + a = add_local_vert_coords(hlc, a, verts11, tcs, 69, 0.5, 0.5, geoinfo_offset) + a = add_local_vert_coords(hlc, a, verts01, tcs, 72, 0.5, 0.5, geoinfo_offset) + + # add centroids to neighboring cells wrt to cell center + num_healpix_cells = 12 * 4**hlc + with warnings.catch_warnings(action="ignore"): + temp = hp.neighbours(np.arange(num_healpix_cells), 2**hlc, order="nested").transpose() + # fix missing nbors with references to self + for i, row in enumerate(temp): + temp[i][row == -1] = i + # coords of centers of all centers + lons, lats = hp.healpix_to_lonlat( + np.arange(0, num_healpix_cells), 2**hlc, dx=0.5, dy=0.5, order="nested" + ) + ctrs = s2tor3(torch.from_numpy(np.pi / 2.0 - lats.value), torch.from_numpy(lons.value)) + ctrs = ctrs[temp.flatten()].reshape((num_healpix_cells, 8, 3)).transpose(1, 0) + # local coords with respect to all neighboring centers + tcs_ctrs = torch.cat([ref - torch.cat(locs_to_ctr_coords(c, tcs)) for c in ctrs], -1) + zi = 75 + a[..., (geoinfo_offset + zi) : (geoinfo_offset + zi + (3 * 8))] = tcs_ctrs + + # remaining geoinfos (zenith angle etc) + zi = 99 + a[..., (geoinfo_offset + zi) :] = target_coords[..., (geoinfo_offset + 2) :] + + return a + #################################################################################################### -def get_target_coords_local_ffast( hlc, target_coords, geoinfo_offset, - verts_Rs, verts_local, nctrs ) : - '''Generate local coordinates for target coords w.r.t healpix cell vertices and - and for healpix cell vertices themselves - ''' - - # target_coords_lens = [len(t) for t in target_coords] - tcs = [s2tor3( torch.deg2rad( 90. - t[...,geoinfo_offset]), - torch.deg2rad( 180. + t[...,geoinfo_offset+1])) - if len(t)>0 else torch.tensor([]) for t in target_coords] - target_coords = torch.cat(target_coords) - if target_coords.shape[0] == 0 : - return torch.tensor([]) - - verts00_Rs, verts10_Rs, verts11_Rs, verts01_Rs, vertsmm_Rs = verts_Rs - - a = torch.zeros( [*target_coords.shape[:-1], (target_coords.shape[-1]-2) + 5*(3*5) + 3*8]) - a[...,:geoinfo_offset] = target_coords[...,:geoinfo_offset] - ref = torch.tensor( [1., 0., 0.]) - - tcs_lens = torch.tensor([tt.shape[0] for tt in tcs], dtype=torch.int32) - tcs_lens_mask = tcs_lens > 0 - tcs_lens = tcs_lens[tcs_lens_mask] - - vls = torch.cat( [vl.repeat( [tt,1,1]) for tt,vl in zip(tcs_lens,verts_local[tcs_lens_mask])], 0) - vls = vls.transpose( 0,1) - - zi=0; a[...,(geoinfo_offset+zi):(geoinfo_offset+zi+3)] = (ref - - torch.cat(locs_to_cell_coords_ctrs( verts00_Rs, tcs))) - zi = 3; a[...,(geoinfo_offset+zi):(geoinfo_offset+zi+vls.shape[-1])] = vls[0] - - zi=15; a[...,(geoinfo_offset+zi):(geoinfo_offset+zi+3)] = (ref - - torch.cat(locs_to_cell_coords_ctrs( verts10_Rs, tcs))) - zi = 18; a[...,(geoinfo_offset+zi):(geoinfo_offset+zi+vls.shape[-1])] = vls[1] - - zi=30; a[...,(geoinfo_offset+zi):(geoinfo_offset+zi+3)] = (ref - - torch.cat(locs_to_cell_coords_ctrs( verts11_Rs, tcs))) - zi = 33; a[...,(geoinfo_offset+zi):(geoinfo_offset+zi+vls.shape[-1])] = vls[2] - - zi=45; a[...,(geoinfo_offset+zi):(geoinfo_offset+zi+3)] = (ref - - torch.cat(locs_to_cell_coords_ctrs( verts01_Rs, tcs))) - zi = 48; a[...,(geoinfo_offset+zi):(geoinfo_offset+zi+vls.shape[-1])] = vls[3] - - zi=60; a[...,(geoinfo_offset+zi):(geoinfo_offset+zi+3)] = (ref - - torch.cat(locs_to_cell_coords_ctrs( vertsmm_Rs, tcs))) - zi = 63; a[...,(geoinfo_offset+zi):(geoinfo_offset+zi+vls.shape[-1])] = vls[4] - - tcs_ctrs = torch.cat( [ref - torch.cat(locs_to_ctr_coords( c, tcs)) for c in nctrs], -1) - zi=75; a[...,(geoinfo_offset+zi):(geoinfo_offset+zi+(3*8))] = tcs_ctrs - # a = add_local_vert_coords_ctrs2( vertsmm_Rs, nctrs, tcs, a, 99, geoinfo_offset) - - # remaining geoinfos (zenith angle etc) - # zi=99+3*8; - zi=99; a[...,(geoinfo_offset+zi):] = target_coords[...,(geoinfo_offset+2):] - - return a +# TODO: remove this function, it is dead code that will fail immediately +def get_target_coords_local_fast(hlc, target_coords, geoinfo_offset): + """Generate local coordinates for target coords w.r.t healpix cell vertices and + and for healpix cell vertices themselves + """ + + # target_coords_lens = [len(t) for t in target_coords] + tcs = [ + ( + s2tor3( + torch.deg2rad(90.0 - t[..., geoinfo_offset].to(torch.float64)), + torch.deg2rad(180.0 + t[..., geoinfo_offset + 1].to(torch.float64)), + ) + if len(t) > 0 + else torch.tensor([]) + ) + for t in target_coords + ] + target_coords = torch.cat(target_coords) + if target_coords.shape[0] == 0: + return torch.tensor([]) + + verts00, verts00_rots = healpix_verts_rots(hlc, 0.0, 0.0) + verts10, verts10_rots = healpix_verts_rots(hlc, 1.0, 0.0) + verts11, verts11_rots = healpix_verts_rots(hlc, 1.0, 1.0) + verts01, verts01_rots = healpix_verts_rots(hlc, 0.0, 1.0) + vertsmm, vertsmm_rots = healpix_verts_rots(hlc, 0.5, 0.5) + + a = torch.zeros( + [*target_coords.shape[:-1], (target_coords.shape[-1] - 2) + 5 * (3 * 5) + 3 * 8] + ) + # a = torch.zeros( [*target_coords.shape[:-1], + # (target_coords.shape[-1]-2) + 5*(3*5) + 3*8]) + # a = torch.zeros( [*target_coords.shape[:-1], 148]) + # #(target_coords.shape[-1]-2) + 5*(3*5) + 3*8]) + a[..., :geoinfo_offset] = target_coords[..., :geoinfo_offset] + ref = torch.tensor([1.0, 0.0, 0.0]) + + zi = 0 + a[..., (geoinfo_offset + zi) : (geoinfo_offset + zi + 3)] = ref - torch.cat( + locs_to_cell_coords_ctrs(verts00_rots, tcs) + ) + verts = torch.stack([verts10, verts11, verts01, vertsmm]) + a = add_local_vert_coords_ctrs2(verts00_rots, verts, tcs, a, 3, geoinfo_offset) + + zi = 15 + a[..., (geoinfo_offset + zi) : (geoinfo_offset + zi + 3)] = ref - torch.cat( + locs_to_cell_coords_ctrs(verts10_rots, tcs) + ) + verts = torch.stack([verts00, verts11, verts01, vertsmm]) + a = add_local_vert_coords_ctrs2(verts10_rots, verts, tcs, a, 18, geoinfo_offset) + + zi = 30 + a[..., (geoinfo_offset + zi) : (geoinfo_offset + zi + 3)] = ref - torch.cat( + locs_to_cell_coords_ctrs(verts11_rots, tcs) + ) + verts = torch.stack([verts00, verts10, verts01, vertsmm]) + a = add_local_vert_coords_ctrs2(verts11_rots, verts, tcs, a, 33, geoinfo_offset) + + zi = 45 + a[..., (geoinfo_offset + zi) : (geoinfo_offset + zi + 3)] = ref - torch.cat( + locs_to_cell_coords_ctrs(verts01_rots, tcs) + ) + verts = torch.stack([verts00, verts11, verts10, vertsmm]) + a = add_local_vert_coords_ctrs2(verts01_rots, verts, tcs, a, 48, geoinfo_offset) + + zi = 60 + a[..., (geoinfo_offset + zi) : (geoinfo_offset + zi + 3)] = ref - torch.cat( + locs_to_cell_coords_ctrs(vertsmm_rots, tcs) + ) + verts = torch.stack([verts00, verts10, verts11, verts01]) + a = add_local_vert_coords_ctrs2(vertsmm_rots, verts, tcs, a, 63, geoinfo_offset) + + # add local coords wrt to center of neighboring cells + # (since the neighbors are used in the prediction) + num_healpix_cells = 12 * 4**hlc + with warnings.catch_warnings(action="ignore"): + temp = hp.neighbours(np.arange(num_healpix_cells), 2**hlc, order="nested").transpose() + # fix missing nbors with references to self + for i, row in enumerate(temp): + temp[i][row == -1] = i + nctrs = vertsmm[temp.flatten()].reshape((num_healpix_cells, 8, 3)).transpose(1, 0) + # local coords with respect to all neighboring centers + tcs_ctrs = torch.cat([ref - torch.cat(locs_to_ctr_coords(c, tcs)) for c in nctrs], -1) + zi = 75 + a[..., (geoinfo_offset + zi) : (geoinfo_offset + zi + (3 * 8))] = tcs_ctrs + # a = add_local_vert_coords_ctrs2( vertsmm_rots, nctrs, tcs, a, 99, geoinfo_offset) + + # remaining geoinfos (zenith angle etc) + # zi=99+3*8; + zi = 99 + # assert target_coords.shape[-1] + zi < a.shape[-1] + a[..., (geoinfo_offset + zi) :] = target_coords[..., (geoinfo_offset + 2) :] + + return a + #################################################################################################### -if __name__ == '__main__' : +def tcs_optimized(target_coords: list[torch.Tensor]) -> tuple[list[torch.Tensor], torch.Tensor]: + """ + Args: + target_coords: List of 2D coordinate tensors, each with shape [N, 2] + + Returns: + tcs: List of transformed coordinates + concatenated_coords: All original coords concatenated + """ - vecs = torch.nn.functional.normalize( torch.rand( (10,3), dtype=torch.float64)) - Rs = vecs_to_rots( vecs) - res = torch.stack( [torch.matmul( R, vec) for R,vec in zip(Rs,vecs)]) - ref = torch.tensor( [1.,0.,0.], dtype=torch.float64) - passed = torch.allclose( res, ref) - print( f'passed = {passed}') + # Concatenate all tensors + stacked_coords = torch.cat(target_coords, dim=0) # [total_points, 2] + # Single vectorized coordinate transformation + theta_all = torch.deg2rad(90.0 - stacked_coords[..., 0]) + phi_all = torch.deg2rad(180.0 + stacked_coords[..., 1]) + + # Transform all coordinates + transformed_all = s2tor3(theta_all, phi_all) # [total_points, 3] + + # Split back to original structure + sizes = [t.shape[0] for t in target_coords] # Get original tensor sizes + tcs = list(torch.split(transformed_all, sizes, dim=0)) # Split back to list + return tcs, stacked_coords + + +#################################################################################################### +def get_target_coords_local_ffast( + hlc, target_coords, target_geoinfos, target_times, verts_rots, verts_local, nctrs +): + """Generate local coordinates for target coords w.r.t healpix cell vertices and + and for healpix cell vertices themselves + """ + + # target_coords_lens = [len(t) for t in target_coords] + tcs, target_coords = tcs_optimized(target_coords) + + if target_coords.shape[0] == 0: + return torch.tensor([]) + target_geoinfos = torch.cat(target_geoinfos) + target_times = torch.cat(target_times) + + verts00_rots, verts10_rots, verts11_rots, verts01_rots, vertsmm_rots = verts_rots + + a = torch.zeros( + [ + *target_coords.shape[:-1], + 1 + target_geoinfos.shape[1] + target_times.shape[1] + 5 * (3 * 5) + 3 * 8, + ] + ) + # TODO: properly set stream_id, implicitly zero at the moment + geoinfo_offset = 1 + a[..., geoinfo_offset : geoinfo_offset + target_times.shape[1]] = target_times + geoinfo_offset += target_times.shape[1] + a[..., geoinfo_offset : geoinfo_offset + target_geoinfos.shape[1]] = target_geoinfos + geoinfo_offset += target_geoinfos.shape[1] + + ref = torch.tensor([1.0, 0.0, 0.0]) + + tcs_lens = torch.tensor([tt.shape[0] for tt in tcs], dtype=torch.int32) + tcs_lens_mask = tcs_lens > 0 + tcs_lens = tcs_lens[tcs_lens_mask] + + vls = torch.cat( + [ + vl.repeat([tt, 1, 1]) + for tt, vl in zip(tcs_lens, verts_local[tcs_lens_mask], strict=False) + ], + 0, + ) + vls = vls.transpose(0, 1) + + zi = 0 + a[..., (geoinfo_offset + zi) : (geoinfo_offset + zi + 3)] = ref - locs_to_cell_coords_ctrs( + verts00_rots, tcs + ) + + zi = 3 + a[..., (geoinfo_offset + zi) : (geoinfo_offset + zi + vls.shape[-1])] = vls[0] + + zi = 15 + a[..., (geoinfo_offset + zi) : (geoinfo_offset + zi + 3)] = ref - locs_to_cell_coords_ctrs( + verts10_rots, tcs + ) + + zi = 18 + a[..., (geoinfo_offset + zi) : (geoinfo_offset + zi + vls.shape[-1])] = vls[1] + + zi = 30 + a[..., (geoinfo_offset + zi) : (geoinfo_offset + zi + 3)] = ref - locs_to_cell_coords_ctrs( + verts11_rots, tcs + ) + + zi = 33 + a[..., (geoinfo_offset + zi) : (geoinfo_offset + zi + vls.shape[-1])] = vls[2] + + zi = 45 + a[..., (geoinfo_offset + zi) : (geoinfo_offset + zi + 3)] = ref - locs_to_cell_coords_ctrs( + verts01_rots, tcs + ) + + zi = 48 + a[..., (geoinfo_offset + zi) : (geoinfo_offset + zi + vls.shape[-1])] = vls[3] + + zi = 60 + a[..., (geoinfo_offset + zi) : (geoinfo_offset + zi + 3)] = ref - locs_to_cell_coords_ctrs( + vertsmm_rots, tcs + ) + + zi = 63 + a[..., (geoinfo_offset + zi) : (geoinfo_offset + zi + vls.shape[-1])] = vls[4] + + tcs_ctrs = torch.cat([ref - torch.cat(locs_to_ctr_coords(c, tcs)) for c in nctrs], -1) + zi = 75 + a[..., (geoinfo_offset + zi) : (geoinfo_offset + zi + (3 * 8))] = tcs_ctrs + # a = add_local_vert_coords_ctrs2( vertsmm_rots, nctrs, tcs, a, 99, geoinfo_offset) + + # remaining geoinfos (zenith angle etc) + # zi=99+3*8; + zi = 99 + a[..., (geoinfo_offset + zi) :] = target_coords[..., (geoinfo_offset + 2) :] + + return a + + +def compute_offsets_scatter_embed(batch: StreamData) -> StreamData: + """ + Compute auxiliary information for scatter operation that changes from stream-centric to + cell-centric computations + + Parameters + ---------- + batch : str + batch of stream data information for which offsets have to be computed + + Returns + ------- + StreamData + stream data with offsets added as members + """ + + # collect source_tokens_lens for all stream datas + source_tokens_lens = torch.stack( + [ + torch.stack( + [ + s.source_tokens_lens if len(s.source_tokens_lens) > 0 else torch.tensor([]) + for s in stl_b + ] + ) + for stl_b in batch + ] + ) + + # precompute index sets for scatter operation after embed + offsets_base = source_tokens_lens.sum(1).sum(0).cumsum(0) + offsets = torch.cat([torch.zeros(1, dtype=torch.int32), offsets_base[:-1]]) + offsets_pe = torch.zeros_like(offsets) + + for ib, sb in enumerate(batch): + for itype, s in enumerate(sb): + if not s.source_empty(): + s.source_idxs_embed = torch.cat( + [ + torch.arange(offset, offset + token_len, dtype=torch.int64) + for offset, token_len in zip( + offsets, source_tokens_lens[ib, itype], strict=False + ) + ] + ) + s.source_idxs_embed_pe = torch.cat( + [ + torch.arange(offset, offset + token_len, dtype=torch.int32) + for offset, token_len in zip( + offsets_pe, source_tokens_lens[ib][itype], strict=False + ) + ] + ) + + # advance offsets + offsets += source_tokens_lens[ib][itype] + offsets_pe += source_tokens_lens[ib][itype] + + return batch + + +def compute_idxs_predict(forecast_dt: int, batch: StreamData) -> list: + """ + Compute auxiliary information for prediction + + Parameters + ---------- + forecast_dt : str + number of forecast steps + batch : + StreamData information for current batch + + Returns + ------- + tuple[list,list] + - lens for each item for varlen flash attention + """ + + target_coords_lens = [[s.target_coords_lens for s in sb] for sb in batch] + + # target coords idxs + tcs_lens_merged = [] + pad = torch.zeros(1, dtype=torch.int32) + for ii in range(len(batch[0])): + # generate len lists for varlen attention (per batch list for local, per-cell attention and + # global + tcs_lens_merged += [ + [ + torch.cat( + [ + pad, + torch.cat( + [ + target_coords_lens[i_b][ii][fstep] + for i_b in range(len(target_coords_lens)) + ] + ), + ] + ).to(torch.int32) + for fstep in range(forecast_dt + 1) + ] + ] + + return tcs_lens_merged + + +def compute_source_cell_lens(batch: StreamData) -> torch.tensor: + """ + Compute auxiliary information for varlen attention for local assimilation + + Parameters + ---------- + batch : + StreamData information for current batch + + Returns + ------- + torch.tensor + Offsets for varlen attention + """ + + # precompute for processing in the model (with varlen flash attention) + source_cell_lens_raw = torch.stack( + [ + torch.stack( + [ + s.source_tokens_lens if len(s.source_tokens_lens) > 0 else torch.tensor([]) + for s in stl_b + ] + ) + for stl_b in batch + ] + ) + source_cell_lens = torch.sum(source_cell_lens_raw, 1).flatten().to(torch.int32) + source_cell_lens = torch.cat([torch.zeros(1, dtype=torch.int32), source_cell_lens]) + + return source_cell_lens diff --git a/src/weathergen/datasets/utils_test.py b/src/weathergen/datasets/utils_test.py new file mode 100644 index 000000000..56f59cd07 --- /dev/null +++ b/src/weathergen/datasets/utils_test.py @@ -0,0 +1,153 @@ +import torch +from torch import Tensor, tensor + +from weathergen.datasets.utils import ( + locs_to_cell_coords_ctrs, + locs_to_ctr_coords, + s2tor3, + tcs_optimized, + vecs_to_rots, +) + + +def _locs_to_cell_coords_ctrs( + healpix_centers_rots: torch.Tensor, locs: list[torch.Tensor] +) -> torch.Tensor: + return torch.cat( + [ + torch.matmul(R, s.transpose(-1, -2)).transpose(-2, -1) + if len(s) > 0 + else torch.tensor([]) + for _, (R, s) in enumerate(zip(healpix_centers_rots, locs, strict=False)) + ] + ) + + +def test_locs_to_cell_coords_ctrs(): + locs = [ + tensor( + [ + [0.7235, -0.6899, -0.0245], + [0.7178, -0.6951, -0.0408], + [0.7288, -0.6835, -0.0408], + [0.7229, -0.6886, -0.0571], + ] + ), + tensor( + [ + [0.6899, -0.7235, -0.0245], + [0.6835, -0.7288, -0.0408], + [0.6951, -0.7178, -0.0408], + [0.6886, -0.7229, -0.0571], + ] + ), + tensor([]), + ] + hp_centers_rots = tensor( + [ + [ + [7.0711e-01, 7.0711e-01, 6.1232e-17], + [-7.0711e-01, 7.0711e-01, -2.5363e-17], + [-6.1232e-17, -2.5363e-17, 1.0000e00], + ], + [ + [6.8939e-01, 7.2409e-01, 2.0833e-02], + [-7.2409e-01, 6.8965e-01, -8.9294e-03], + [-2.0833e-02, -8.9294e-03, 9.9974e-01], + ], + [ + [7.2409e-01, 6.8939e-01, 2.0833e-02], + [-6.8939e-01, 7.2434e-01, -8.3304e-03], + [-2.0833e-02, -8.3304e-03, 9.9975e-01], + ], + [ + [7.0649e-01, 7.0649e-01, 4.1667e-02], + [-7.0649e-01, 7.0751e-01, -1.7250e-02], + [-4.1667e-02, -1.7250e-02, 9.9898e-01], + ], + ] + ) + torch.testing.assert_close( + _locs_to_cell_coords_ctrs(hp_centers_rots, locs), + locs_to_cell_coords_ctrs(hp_centers_rots, locs), + ) + + +def _tcs_simpled(target_coords: list[Tensor]) -> tuple[list[Tensor], Tensor]: + tcs = [ + ( + s2tor3( + torch.deg2rad(90.0 - t[..., 0]), + torch.deg2rad(180.0 + t[..., 1]), + ) + if len(t) > 0 + else torch.tensor([]) + ) + for t in target_coords + ] + cat_target_coords = torch.cat(target_coords) + return tcs, cat_target_coords + + +def test_tcs(): + target_coords = [ + tensor( + [[2.3377, -135.0000], [1.4026, -135.4545], [1.4026, -134.5455], [0.4675, -135.0000]] + ), + tensor( + [[3.2727, -133.6082], [2.3377, -134.0816], [2.3377, -133.1633], [1.4026, -133.6364]] + ), + ] + tcs_ref, cat_tcs_ref = _tcs_simpled(target_coords) + tcs_opt, cat_tcs_opt = tcs_optimized(target_coords) + assert len(tcs_ref) == len(tcs_opt) + torch.testing.assert_close(cat_tcs_ref, cat_tcs_opt) + torch.testing.assert_close(tcs_ref, tcs_opt, atol=1e-8, rtol=1e-5) + + +def _locs_to_ctr_coords(ctrs_r3, locs: list[torch.Tensor]) -> list[torch.Tensor]: + ctrs_rots = vecs_to_rots(ctrs_r3).to(torch.float32) + + ## express each centroid in local coordinates w.r.t to healpix center + # by rotating center to origin + return [ + ( + torch.matmul(R, s.transpose(-1, -2)).transpose(-2, -1) + if len(s) > 0 + else torch.zeros([0, 3]) + ) + for i, (R, s) in enumerate(zip(ctrs_rots, locs, strict=False)) + ] + + +def test_locs_to_ctr_coords(): + locs = [ + tensor( + [ + [0.7235, -0.6899, -0.0245], + [0.7178, -0.6951, -0.0408], + [0.7288, -0.6835, -0.0408], + [0.7229, -0.6886, -0.0571], + ] + ), + tensor( + [ + [0.6899, -0.7235, -0.0245], + [0.6835, -0.7288, -0.0408], + [0.6951, -0.7178, -0.0408], + [0.6886, -0.7229, -0.0571], + ] + ), + tensor([]), + ] + ctrs_r3 = tensor( + [ + [7.2425e-01, 6.8954e-01, 6.1232e-17], + [7.0695e-01, 7.0695e-01, 2.0833e-02], + [7.4079e-01, 6.7141e-01, 2.0833e-02], + ] + ) + torch.testing.assert_close( + locs_to_ctr_coords(ctrs_r3, locs), + _locs_to_ctr_coords(ctrs_r3, locs), + ) diff --git a/src/weathergen/model/attention.py b/src/weathergen/model/attention.py index 30e2e5fe3..39ed1c041 100644 --- a/src/weathergen/model/attention.py +++ b/src/weathergen/model/attention.py @@ -1,4 +1,4 @@ -# (C) Copyright 2024 WeatherGenerator contributors. +# (C) Copyright 2025 WeatherGenerator contributors. # # This software is licensed under the terms of the Apache Licence Version 2.0 # which can be obtained at http://www.apache.org/licenses/LICENSE-2.0. @@ -7,505 +7,624 @@ # granted to it by virtue of its status as an intergovernmental organisation # nor does it submit to any jurisdiction. -import torch -import time -import code from functools import partial -import numpy as np - -from flash_attn import flash_attn_varlen_func, flash_attn_func - -from weathergen.model.norms import RMSNorm -from weathergen.model.norms import AdaLayerNorm - -from torch.nn.attention.flex_attention import flex_attention, create_mask, create_block_mask - -#################################################################################################### -class MultiSelfAttentionHead_Varlen(torch.nn.Module): - - ######################################### - def __init__(self, dim_embed, num_heads, dim_head_proj=None, dropout_rate=0., - with_qk_lnorm=True, with_flash=True, norm_type = 'LayerNorm', - softcap=0.0, dim_aux=None) : - - super(MultiSelfAttentionHead_Varlen, self).__init__() - - self.num_heads = num_heads - self.dropout_rate = dropout_rate - self.with_flash = with_flash - self.softcap = softcap - - assert 0 == dim_embed % num_heads - self.dim_head_proj = dim_embed // num_heads if dim_head_proj is None else dim_head_proj - - if norm_type == 'LayerNorm' : - norm = partial( torch.nn.LayerNorm, elementwise_affine=False) - else : - norm = RMSNorm - - if dim_aux is not None : - self.lnorm = AdaLayerNorm( dim_embed, dim_aux) - else : - self.lnorm = norm( dim_embed) - self.proj_heads_q = torch.nn.Linear( dim_embed, num_heads*self.dim_head_proj, bias = False) - self.proj_heads_k = torch.nn.Linear( dim_embed, num_heads*self.dim_head_proj, bias = False) - self.proj_heads_v = torch.nn.Linear( dim_embed, num_heads*self.dim_head_proj, bias = False) - self.proj_out = torch.nn.Linear( dim_embed, dim_embed, bias = False) - self.dropout = torch.nn.Dropout( p=dropout_rate) if dropout_rate > 0. else torch.nn.Identity() - - lnorm = norm if with_qk_lnorm else torch.nn.Identity - self.lnorm_q = lnorm( self.dim_head_proj) - self.lnorm_k = lnorm( self.dim_head_proj) - - assert with_flash, 'Only flash attention supported at the moment' - - ######################################### - def forward( self, x, x_lens, ada_ln_aux=None) : - - x_in = x - x = self.lnorm( x) if ada_ln_aux is None else self.lnorm( x, ada_ln_aux) - - # project onto heads and q,k,v and ensure these are 4D tensors as required for flash attention - s = [ x.shape[0], self.num_heads, -1] - qs = self.lnorm_q( self.proj_heads_q( x).reshape(s)).to(torch.float16) - ks = self.lnorm_k( self.proj_heads_k( x).reshape(s)).to(torch.float16) - vs = self.proj_heads_v( x).reshape(s) - - cum_x_lens = torch.cumsum( x_lens, 0, dtype=torch.int32) - # ordering of tensors (seq, heads, embed) (which differs from torch's flash attention implt) - outs = flash_attn_varlen_func( qs, ks, vs, cum_x_lens, cum_x_lens, x_lens.max(), x_lens.max(), - softcap=self.softcap, dropout_p=self.dropout_rate) - - # return x_in + self.dropout( self.proj_out( outs.flatten( -2, -1)) ) - return x_in + self.proj_out( outs.flatten( -2, -1)) - -#################################################################################################### -class MultiSelfAttentionHead_Varlen_Flex(torch.nn.Module): - - ######################################### - def __init__(self, dim_embed, num_heads, dim_head_proj=None, dropout_rate=0., - with_qk_lnorm=True, with_flash=True, norm_type = 'LayerNorm', - softcap=0.0) : - - super(MultiSelfAttentionHead_Varlen_Flex, self).__init__() - - self.num_heads = num_heads - self.with_flash = with_flash - self.softcap = softcap - - assert 0 == dim_embed % num_heads - self.dim_head_proj = dim_embed // num_heads if dim_head_proj is None else dim_head_proj - - if norm_type == 'LayerNorm' : - norm = partial( torch.nn.LayerNorm, elementwise_affine=False) - else : - norm = RMSNorm - - self.lnorm = norm( dim_embed) - self.proj_heads_q = torch.nn.Linear( dim_embed, num_heads*self.dim_head_proj, bias = False) - self.proj_heads_k = torch.nn.Linear( dim_embed, num_heads*self.dim_head_proj, bias = False) - self.proj_heads_v = torch.nn.Linear( dim_embed, num_heads*self.dim_head_proj, bias = False) - self.proj_out = torch.nn.Linear( dim_embed, dim_embed, bias = False) - self.dropout = torch.nn.Dropout( p=dropout_rate) if dropout_rate > 0. else torch.nn.Identity() +import torch +from flash_attn import flash_attn_func, flash_attn_varlen_func +from torch.nn.attention.flex_attention import create_block_mask, flex_attention + +from weathergen.model.norms import AdaLayerNorm, RMSNorm + + +class MultiSelfAttentionHeadVarlen(torch.nn.Module): + def __init__( + self, + dim_embed, + num_heads, + dim_head_proj=None, + dropout_rate=0.0, + with_residual=True, + with_qk_lnorm=True, + with_flash=True, + norm_type="LayerNorm", + softcap=0.0, + dim_aux=None, + norm_eps=1e-5, + attention_dtype=torch.bfloat16, + ): + super(MultiSelfAttentionHeadVarlen, self).__init__() + + self.num_heads = num_heads + self.dropout_rate = dropout_rate + self.with_flash = with_flash + self.softcap = softcap + self.with_residual = with_residual + + assert dim_embed % num_heads == 0 + self.dim_head_proj = dim_embed // num_heads if dim_head_proj is None else dim_head_proj + + if norm_type == "LayerNorm": + norm = partial(torch.nn.LayerNorm, elementwise_affine=False, eps=norm_eps) + else: + norm = RMSNorm + + if dim_aux is not None: + self.lnorm = AdaLayerNorm(dim_embed, dim_aux, norm_eps=norm_eps) + else: + self.lnorm = norm(dim_embed, eps=norm_eps) + self.proj_heads_q = torch.nn.Linear(dim_embed, num_heads * self.dim_head_proj, bias=False) + self.proj_heads_k = torch.nn.Linear(dim_embed, num_heads * self.dim_head_proj, bias=False) + self.proj_heads_v = torch.nn.Linear(dim_embed, num_heads * self.dim_head_proj, bias=False) + self.proj_out = torch.nn.Linear(dim_embed, dim_embed, bias=False) + self.dropout = ( + torch.nn.Dropout(p=dropout_rate) if dropout_rate > 0.0 else torch.nn.Identity() + ) + + lnorm = norm if with_qk_lnorm else torch.nn.Identity + self.lnorm_q = lnorm(self.dim_head_proj, eps=norm_eps) + self.lnorm_k = lnorm(self.dim_head_proj, eps=norm_eps) + + self.dtype = attention_dtype + + assert with_flash, "Only flash attention supported at the moment" + + def forward(self, x, x_lens, ada_ln_aux=None): + if self.with_residual: + x_in = x + x = self.lnorm(x) if ada_ln_aux is None else self.lnorm(x, ada_ln_aux) + + # project onto heads and q,k,v and + # ensure these are 4D tensors as required for flash attention + s = [x.shape[0], self.num_heads, -1] + qs = self.lnorm_q(self.proj_heads_q(x).reshape(s)).to(self.dtype) + ks = self.lnorm_k(self.proj_heads_k(x).reshape(s)).to(self.dtype) + vs = self.proj_heads_v(x).reshape(s) + + # set dropout rate according to training/eval mode as required by flash_attn + dropout_rate = self.dropout_rate if self.training else 0.0 + + cum_x_lens = torch.cumsum(x_lens, 0, dtype=torch.int32) + # ordering of tensors (seq, heads, embed) (which differs from torch's flash attention implt) + outs = flash_attn_varlen_func( + qs, + ks, + vs, + cum_x_lens, + cum_x_lens, + x_lens.max(), + x_lens.max(), + softcap=self.softcap, + dropout_p=dropout_rate, + ) + + out = self.proj_out(outs.flatten(-2, -1)) + + if self.with_residual: + out = out + x_in + + return out + + +class MultiSelfAttentionHeadVarlenFlex(torch.nn.Module): + def __init__( + self, + dim_embed, + num_heads, + dim_head_proj=None, + dropout_rate=0.0, + with_residual=True, + with_qk_lnorm=True, + with_flash=True, + norm_type="LayerNorm", + softcap=0.0, + norm_eps=1e-5, + attention_dtype=torch.bfloat16, + ): + super(MultiSelfAttentionHeadVarlenFlex, self).__init__() + + self.num_heads = num_heads + self.with_flash = with_flash + self.softcap = softcap + self.with_residual = with_residual + + assert dim_embed % num_heads == 0 + self.dim_head_proj = dim_embed // num_heads if dim_head_proj is None else dim_head_proj + + if norm_type == "LayerNorm": + norm = partial(torch.nn.LayerNorm, elementwise_affine=False, eps=norm_eps) + else: + norm = RMSNorm + + self.lnorm = norm(dim_embed, eps=norm_eps) + self.proj_heads_q = torch.nn.Linear(dim_embed, num_heads * self.dim_head_proj, bias=False) + self.proj_heads_k = torch.nn.Linear(dim_embed, num_heads * self.dim_head_proj, bias=False) + self.proj_heads_v = torch.nn.Linear(dim_embed, num_heads * self.dim_head_proj, bias=False) + self.proj_out = torch.nn.Linear(dim_embed, dim_embed, bias=False) + self.dropout = ( + torch.nn.Dropout(p=dropout_rate) if dropout_rate > 0.0 else torch.nn.Identity() + ) + + lnorm = norm if with_qk_lnorm else torch.nn.Identity + self.lnorm_q = lnorm(self.dim_head_proj, eps=norm_eps) + self.lnorm_k = lnorm(self.dim_head_proj, eps=norm_eps) + self.dtype = attention_dtype + + assert with_flash, "Only flash attention supported at the moment" + + def att(qs, ks, vs, x_mask): + def sparsity_mask(score, b, h, q_idx, kv_idx): + return (q_idx // 16) == (kv_idx % 16) + + return flex_attention(qs, ks, vs, score_mod=sparsity_mask) + + self.compiled_flex_attention = torch.compile(att, dynamic=False) + + def forward(self, x, x_lens=None): + if self.with_residual: + x_in = x + x = self.lnorm(x) + + # project onto heads and q,k,v and + # ensure these are 4D tensors as required for flash attention + s = [x.shape[0], 1, self.num_heads, -1] + qs = self.lnorm_q(self.proj_heads_q(x).reshape(s)).to(self.dtype).permute([1, 2, 0, 3]) + ks = self.lnorm_k(self.proj_heads_k(x).reshape(s)).to(self.dtype).permute([1, 2, 0, 3]) + vs = self.proj_heads_v(x).reshape(s).permute([1, 2, 0, 3]) + + outs = self.compiled_flex_attention(qs, ks, vs).transpose(1, 2).squeeze() + + out = self.dropout(self.proj_out(outs.flatten(-2, -1))) + if self.with_residual: + out = out + x_in + + return out + + +class MultiSelfAttentionHeadLocal(torch.nn.Module): + def __init__( + self, + dim_embed, + num_heads, + qkv_len, + block_factor, + dim_head_proj=None, + dropout_rate=0.0, + with_residual=True, + with_qk_lnorm=True, + with_flash=True, + norm_type="LayerNorm", + softcap=0.0, + dim_aux=None, + norm_eps=1e-5, + attention_dtype=torch.bfloat16, + ): + super(MultiSelfAttentionHeadLocal, self).__init__() + + self.num_heads = num_heads + self.with_flash = with_flash + self.softcap = softcap + self.with_residual = with_residual + + assert dim_embed % num_heads == 0 + self.dim_head_proj = dim_embed // num_heads if dim_head_proj is None else dim_head_proj + + if norm_type == "LayerNorm": + norm = partial(torch.nn.LayerNorm, elementwise_affine=False, eps=norm_eps) + else: + norm = RMSNorm + + if dim_aux is not None: + self.lnorm = AdaLayerNorm(dim_embed, dim_aux, norm_eps=norm_eps) + else: + self.lnorm = norm(dim_embed, eps=norm_eps) + self.proj_heads_q = torch.nn.Linear(dim_embed, num_heads * self.dim_head_proj, bias=False) + self.proj_heads_k = torch.nn.Linear(dim_embed, num_heads * self.dim_head_proj, bias=False) + self.proj_heads_v = torch.nn.Linear(dim_embed, num_heads * self.dim_head_proj, bias=False) + self.proj_out = torch.nn.Linear(dim_embed, dim_embed, bias=False) + self.dropout = ( + torch.nn.Dropout(p=dropout_rate) if dropout_rate > 0.0 else torch.nn.Identity() + ) + + lnorm = norm if with_qk_lnorm else torch.nn.Identity + self.lnorm_q = lnorm(self.dim_head_proj, eps=norm_eps) + self.lnorm_k = lnorm(self.dim_head_proj, eps=norm_eps) + + self.dtype = attention_dtype + assert with_flash, "Only flash attention supported." + + # define block mask + def mask_block_local(batch, head, idx_q, idx_kv): + return (idx_q // block_factor) == (idx_kv // block_factor) + + self.block_mask = create_block_mask( + mask_block_local, B=None, H=None, Q_LEN=qkv_len, KV_LEN=qkv_len + ) + # compile for efficiency + self.flex_attention = torch.compile(flex_attention, dynamic=False) + + def forward(self, x, ada_ln_aux=None): + if self.with_residual: + x_in = x + x = self.lnorm(x) if ada_ln_aux is None else self.lnorm(x, ada_ln_aux) + + # project onto heads + s = [x.shape[0], x.shape[1], self.num_heads, -1] + qs = self.lnorm_q(self.proj_heads_q(x).reshape(s)).to(self.dtype).permute([0, 2, 1, 3]) + ks = self.lnorm_k(self.proj_heads_k(x).reshape(s)).to(self.dtype).permute([0, 2, 1, 3]) + vs = self.proj_heads_v(x).reshape(s).permute([0, 2, 1, 3]) + + outs = self.flex_attention(qs, ks, vs, block_mask=self.block_mask).transpose(1, 2) + + out = self.proj_out(self.dropout(outs.flatten(-2, -1))) + if self.with_residual: + out = x_in + out + + return out + + +class MultiCrossAttentionHeadVarlen(torch.nn.Module): + def __init__( + self, + dim_embed_q, + dim_embed_kv, + num_heads, + dim_head_proj=None, + dropout_rate=0.0, + with_residual=True, + with_qk_lnorm=True, + with_flash=True, + norm_type="LayerNorm", + softcap=0.0, + dim_aux=None, + norm_eps=1e-5, + attention_dtype=torch.bfloat16, + ): + super(MultiCrossAttentionHeadVarlen, self).__init__() + + self.num_heads = num_heads + self.dropout_rate = dropout_rate + self.with_residual = with_residual + self.with_flash = with_flash + self.softcap = softcap + + if norm_type == "LayerNorm": + norm = partial(torch.nn.LayerNorm, elementwise_affine=False, eps=norm_eps) + else: + norm = RMSNorm + + self.dim_head_proj = dim_embed_q // num_heads if dim_head_proj is None else dim_head_proj + + if dim_aux is not None: + self.lnorm_in_q = AdaLayerNorm(dim_embed_q, dim_aux, norm_eps=norm_eps) + else: + self.lnorm_in_q = norm(dim_embed_q, eps=norm_eps) + self.lnorm_in_kv = norm(dim_embed_kv, eps=norm_eps) + + self.proj_heads_q = torch.nn.Linear(dim_embed_q, num_heads * self.dim_head_proj, bias=False) + self.proj_heads_k = torch.nn.Linear( + dim_embed_kv, num_heads * self.dim_head_proj, bias=False + ) + self.proj_heads_v = torch.nn.Linear( + dim_embed_kv, num_heads * self.dim_head_proj, bias=False + ) + + self.proj_out = torch.nn.Linear(self.dim_head_proj * num_heads, dim_embed_q, bias=False) + self.dropout = ( + torch.nn.Dropout(p=dropout_rate) if dropout_rate > 0.0 else torch.nn.Identity() + ) + + lnorm = norm if with_qk_lnorm else torch.nn.Identity + self.lnorm_q = lnorm(self.dim_head_proj, eps=norm_eps) + self.lnorm_k = lnorm(self.dim_head_proj, eps=norm_eps) + + self.dtype = attention_dtype + assert with_flash, "Only flash attention supported at the moment" + + def forward(self, x_q, x_kv, x_q_lens=None, x_kv_lens=None, ada_ln_aux=None): + if self.with_residual: + x_q_in = x_q + x_q = self.lnorm_in_q(x_q) if ada_ln_aux is None else self.lnorm_in_q(x_q, ada_ln_aux) + x_kv = self.lnorm_in_kv(x_kv) + + # project onto heads and q,k,v and + # ensure these are 4D tensors as required for flash attention + s = [x_q.shape[0], self.num_heads, self.dim_head_proj] + qs = self.lnorm_q(self.proj_heads_q(x_q).reshape(s)).to(self.dtype) + s = [x_kv.shape[0], self.num_heads, self.dim_head_proj] + ks = self.lnorm_k(self.proj_heads_k(x_kv).reshape(s)).to(self.dtype) + vs = self.proj_heads_v(x_kv).reshape(s) + + # set dropout rate according to training/eval mode as required by flash_attn + dropout_rate = self.dropout_rate if self.training else 0.0 + + if x_kv_lens is not None: + cum_x_q_lens = torch.cumsum(x_q_lens, 0, dtype=torch.int32) + cum_x_kv_lens = torch.cumsum(x_kv_lens, 0, dtype=torch.int32) + outs = flash_attn_varlen_func( + qs, + ks, + vs, + cum_x_q_lens, + cum_x_kv_lens, + x_q_lens.max(), + x_kv_lens.max(), + softcap=self.softcap, + dropout_p=dropout_rate, + ) + else: + assert False + + outs = self.proj_out(outs.flatten(-2, -1)) + if self.with_residual: + outs = x_q_in + outs + + return outs + + +class MultiCrossAttentionHeadVarlenSlicedQ(torch.nn.Module): + def __init__( + self, + dim_embed_q, + dim_embed_kv, + num_slices_q, + num_heads, + dim_head_proj=None, + dropout_rate=0.0, + with_residual=True, + with_qk_lnorm=True, + with_flash=True, + norm_type="LayerNorm", + softcap=0.0, + dim_aux=None, + norm_eps=1e-5, + attention_dtype=torch.bfloat16, + ): + super(MultiCrossAttentionHeadVarlenSlicedQ, self).__init__() + + self.num_slices_q = num_slices_q + self.num_heads = num_heads + self.dropout_rate = dropout_rate + self.with_residual = with_residual + self.with_flash = with_flash + self.softcap = softcap + + if norm_type == "LayerNorm": + norm = partial(torch.nn.LayerNorm, elementwise_affine=False, eps=norm_eps) + else: + norm = RMSNorm + + self.dim_head_proj = dim_embed_q // num_heads if dim_head_proj is None else dim_head_proj + + if dim_aux is not None: + self.lnorm_in_q = AdaLayerNorm(dim_embed_q, dim_aux, norm_eps=norm_eps) + else: + self.lnorm_in_q = norm(dim_embed_q, eps=norm_eps) + self.lnorm_in_kv = norm(dim_embed_kv, eps=norm_eps) + + assert num_heads % num_slices_q == 0 + num_heads_r = num_heads + self.proj_heads_q = torch.nn.ModuleList() + for _ in range(num_slices_q): + self.proj_heads_q.append( + torch.nn.Linear(dim_embed_q, num_heads_r * self.dim_head_proj, bias=False) + ) + self.proj_heads_k = torch.nn.Linear( + dim_embed_kv, num_heads_r * self.dim_head_proj, bias=False + ) + self.proj_heads_v = torch.nn.Linear( + dim_embed_kv, num_heads_r * self.dim_head_proj, bias=False + ) + + self.proj_out = torch.nn.Linear(self.dim_head_proj * num_heads, dim_embed_q, bias=False) + self.dropout = ( + torch.nn.Dropout(p=dropout_rate) if dropout_rate > 0.0 else torch.nn.Identity() + ) + + lnorm = norm if with_qk_lnorm else torch.nn.Identity + self.lnorm_q = lnorm(self.dim_head_proj, eps=norm_eps) + self.lnorm_k = lnorm(self.dim_head_proj, eps=norm_eps) + + self.dtype = attention_dtype + assert with_flash, "Only flash attention supported at the moment" + + def forward(self, x_q, x_kv, x_q_lens=None, x_kv_lens=None, ada_ln_aux=None): + if self.with_residual: + x_q_in = x_q + x_q = self.lnorm_in_q(x_q) if ada_ln_aux is None else self.lnorm_in_q(x_q, ada_ln_aux) + x_kv = self.lnorm_in_kv(x_kv) + + # project onto heads and q,k,v and + # ensure these are 4D tensors as required for flash attention + s = [x_q.shape[0], self.num_heads, self.dim_head_proj] + qs = [ + self.lnorm_q(head_proj(x_q_i).reshape(s)).to(self.dtype) + for head_proj, x_q_i in zip(self.proj_heads_q, x_q.transpose(1, 0), strict=False) + ] + s = [x_kv.shape[0], self.num_heads, self.dim_head_proj] + ks = self.lnorm_k(self.proj_heads_k(x_kv).reshape(s)).to(self.dtype) + vs = self.proj_heads_v(x_kv).reshape(s) + + # set dropout rate according to training/eval mode as required by flash_attn + dropout_rate = self.dropout_rate if self.training else 0.0 + + cum_x_q_lens = torch.cumsum(x_q_lens, 0, dtype=torch.int32) + cum_x_kv_lens = torch.cumsum(x_kv_lens, 0, dtype=torch.int32) + outs = [] + for _i, qs_i in enumerate(qs): + outs += [ + flash_attn_varlen_func( + qs_i, + ks, + vs, + cum_x_q_lens, + cum_x_kv_lens, + x_q_lens.max(), + x_kv_lens.max(), + softcap=self.softcap, + dropout_p=dropout_rate, + ) + ] + + outs = self.proj_out(torch.stack(outs).transpose(1, 0).flatten(-2, -1)) + if self.with_residual: + outs = x_q_in + outs.reshape(x_q_in.shape) + + return outs - lnorm = norm if with_qk_lnorm else torch.nn.Identity - self.lnorm_q = lnorm( self.dim_head_proj) - self.lnorm_k = lnorm( self.dim_head_proj) - assert with_flash, 'Only flash attention supported at the moment' - - def att( qs, ks, vs, x_mask) : - - def sparsity_mask( score, b, h, q_idx, kv_idx): - # return x_mask[q_idx] == x_mask[kv_idx] - return (q_idx // 16) == (kv_idx % 16) - - return flex_attention( qs, ks, vs, score_mod=sparsity_mask) - self.compiled_flex_attention = torch.compile( att, dynamic=False) - - ######################################### - def forward( self, x, x_lens = None) : - - x_in = x - x = self.lnorm( x) - - # project onto heads and q,k,v and ensure these are 4D tensors as required for flash attention - s = [ x.shape[0], 1, self.num_heads, -1] - qs = self.lnorm_q( self.proj_heads_q( x).reshape(s)).to(torch.float16).permute( [1, 2, 0, 3]) - ks = self.lnorm_k( self.proj_heads_k( x).reshape(s)).to(torch.float16).permute( [1, 2, 0, 3]) - vs = self.proj_heads_v( x).reshape(s).permute( [1, 2, 0, 3]) - - outs = self.compiled_flex_attention( qs, ks, vs).transpose(1,2).squeeze() - - return x_in + self.dropout( self.proj_out( outs.flatten( -2, -1)) ) - -#################################################################################################### -class MultiSelfAttentionHead_Local(torch.nn.Module): - - ######################################### - def __init__(self, dim_embed, num_heads, qkv_len, block_factor, - dim_head_proj=None, dropout_rate=0., - with_qk_lnorm=True, with_flash=True, norm_type = 'LayerNorm', - softcap=0.0, dim_aux=None) : - - super(MultiSelfAttentionHead_Local, self).__init__() - - self.num_heads = num_heads - self.with_flash = with_flash - self.softcap = softcap - - assert 0 == dim_embed % num_heads - self.dim_head_proj = dim_embed // num_heads if dim_head_proj is None else dim_head_proj - - if norm_type == 'LayerNorm' : - norm = partial( torch.nn.LayerNorm, elementwise_affine=False) - else : - norm = RMSNorm - - if dim_aux is not None : - self.lnorm = AdaLayerNorm( dim_embed, dim_aux) - else : - self.lnorm = norm( dim_embed) - self.proj_heads_q = torch.nn.Linear( dim_embed, num_heads*self.dim_head_proj, bias = False) - self.proj_heads_k = torch.nn.Linear( dim_embed, num_heads*self.dim_head_proj, bias = False) - self.proj_heads_v = torch.nn.Linear( dim_embed, num_heads*self.dim_head_proj, bias = False) - self.proj_out = torch.nn.Linear( dim_embed, dim_embed, bias = False) - self.dropout = torch.nn.Dropout( p=dropout_rate) if dropout_rate > 0. else torch.nn.Identity() - - lnorm = norm if with_qk_lnorm else torch.nn.Identity - self.lnorm_q = lnorm( self.dim_head_proj) - self.lnorm_k = lnorm( self.dim_head_proj) - - assert with_flash, 'Only flash attention supported.' - - # define block mask - def mask_block_local( batch, head, idx_q, idx_kv): - return (idx_q // block_factor) == (idx_kv // block_factor) - self.block_mask = create_block_mask( mask_block_local, B=None, H=None, - Q_LEN=qkv_len, KV_LEN=qkv_len) - # compile for efficiency - self.flex_attention = torch.compile( flex_attention, dynamic=False) - - ######################################### - def forward( self, x, ada_ln_aux=None) : - - x_in = x - x = self.lnorm( x) if ada_ln_aux is None else self.lnorm( x, ada_ln_aux) - - # project onto heads - s = [ x.shape[0], x.shape[1], self.num_heads, -1] - qs = self.lnorm_q( self.proj_heads_q( x).reshape(s)).to(torch.float16).permute( [0, 2, 1, 3]) - ks = self.lnorm_k( self.proj_heads_k( x).reshape(s)).to(torch.float16).permute( [0, 2, 1, 3]) - vs = self.proj_heads_v( x).reshape(s).permute( [0, 2, 1, 3]) - - outs = self.flex_attention( qs, ks, vs, block_mask=self.block_mask).transpose(1,2) - - return x_in + self.proj_out( self.dropout( outs.flatten( -2, -1)) ) - -#################################################################################################### -class MultiCrossAttentionHead_Varlen(torch.nn.Module): - - ######################################### - def __init__(self, dim_embed_q, dim_embed_kv, num_heads, dim_head_proj=None, - dropout_rate=0., with_residual=True, with_qk_lnorm=True, with_flash=True, - norm_type = 'LayerNorm', softcap=0., dim_aux=None) : - - super(MultiCrossAttentionHead_Varlen, self).__init__() - - self.num_heads = num_heads - self.dropout_rate = dropout_rate - self.with_residual = with_residual - self.with_flash = with_flash - self.softcap = softcap - - if norm_type == 'LayerNorm' : - norm = partial( torch.nn.LayerNorm, elementwise_affine=False) - elif norm_type == 'RMSNorm' : - norm = RMSNorm - else : - assert False, 'Unsupported norm type.' - - self.dim_head_proj = dim_embed_q // num_heads if dim_head_proj is None else dim_head_proj - - if dim_aux is not None : - self.lnorm_in_q = AdaLayerNorm( dim_embed_q, dim_aux) - else : - self.lnorm_in_q = norm( dim_embed_q) - self.lnorm_in_kv = norm( dim_embed_kv) - - self.proj_heads_q = torch.nn.Linear( dim_embed_q, num_heads*self.dim_head_proj, bias=False) - self.proj_heads_k = torch.nn.Linear( dim_embed_kv, num_heads*self.dim_head_proj, bias=False) - self.proj_heads_v = torch.nn.Linear( dim_embed_kv, num_heads*self.dim_head_proj, bias=False) - - self.proj_out = torch.nn.Linear( self.dim_head_proj*num_heads, dim_embed_q, bias=False) - self.dropout = torch.nn.Dropout( p=dropout_rate) if dropout_rate > 0. else torch.nn.Identity() - - lnorm = norm if with_qk_lnorm else torch.nn.Identity - self.lnorm_q = lnorm( self.dim_head_proj) - self.lnorm_k = lnorm( self.dim_head_proj) - - assert with_flash, 'Only flash attention supported at the moment' - - ######################################### - def forward( self, x_q, x_kv, x_q_lens=None, x_kv_lens=None, ada_ln_aux=None) : - - if self.with_residual : - x_q_in = x_q - x_q = self.lnorm_in_q( x_q) if ada_ln_aux is None else self.lnorm_in_q( x_q, ada_ln_aux) - x_kv = self.lnorm_in_kv( x_kv) - - # project onto heads and q,k,v and ensure these are 4D tensors as required for flash attention - s = [ x_q.shape[0], self.num_heads, self.dim_head_proj] - qs = self.lnorm_q( self.proj_heads_q( x_q).reshape(s)).to(torch.float16) - s = [ x_kv.shape[0], self.num_heads, self.dim_head_proj ] - ks = self.lnorm_k( self.proj_heads_k( x_kv).reshape(s)).to(torch.float16) - vs = self.proj_heads_v( x_kv).reshape(s) - - if x_kv_lens is not None : - cum_x_q_lens = torch.cumsum( x_q_lens, 0, dtype=torch.int32) - cum_x_kv_lens = torch.cumsum( x_kv_lens, 0, dtype=torch.int32) - outs = flash_attn_varlen_func( qs, ks, vs, cum_x_q_lens, cum_x_kv_lens, - x_q_lens.max(), x_kv_lens.max(), - softcap=self.softcap, - dropout_p=self.dropout_rate) - else : - assert False - - # outs = self.dropout( self.proj_out( outs.flatten( -2, -1)) ) - outs = self.proj_out( outs.flatten( -2, -1)) - if self.with_residual : - outs = x_q_in + outs - - return outs - - ######################################### - def attention( self, q, k, v) : - scaling = 1. / torch.sqrt( torch.tensor(q.shape[-1])) - return torch.matmul( self.softmax( scaling * self.score( q, k)), v) - - ######################################### - def score( self, q, k) : - return torch.matmul( q, torch.transpose( k, -2, -1)) - -#################################################################################################### -class MultiCrossAttentionHead_Varlen_SlicedQ(torch.nn.Module): - - ######################################### - def __init__(self, dim_embed_q, dim_embed_kv, num_slices_q, num_heads, dim_head_proj=None, - dropout_rate=0., with_residual=True, with_qk_lnorm=True, with_flash=True, - norm_type = 'LayerNorm', softcap=0., dim_aux=None) : - - super(MultiCrossAttentionHead_Varlen_SlicedQ, self).__init__() - - self.num_slices_q = num_slices_q - self.num_heads = num_heads - self.dropout_rate = dropout_rate - self.with_residual = with_residual - self.with_flash = with_flash - self.softcap = softcap - - if norm_type == 'LayerNorm' : - norm = partial( torch.nn.LayerNorm, elementwise_affine=False) - elif norm_type == 'RMSNorm' : - norm = RMSNorm - else : - assert False, 'Unsupported norm type.' - - self.dim_head_proj = dim_embed_q // num_heads if dim_head_proj is None else dim_head_proj - - if dim_aux is not None : - self.lnorm_in_q = AdaLayerNorm( dim_embed_q, dim_aux) - else : - self.lnorm_in_q = norm( dim_embed_q) - self.lnorm_in_kv = norm( dim_embed_kv) - - assert num_heads % num_slices_q == 0 - num_heads_r = num_heads - self.proj_heads_q = torch.nn.ModuleList() - for _ in range( num_slices_q) : - self.proj_heads_q.append( torch.nn.Linear( dim_embed_q, num_heads_r*self.dim_head_proj, bias=False)) - self.proj_heads_k = torch.nn.Linear( dim_embed_kv, num_heads_r*self.dim_head_proj, bias=False) - self.proj_heads_v = torch.nn.Linear( dim_embed_kv, num_heads_r*self.dim_head_proj, bias=False) - - self.proj_out = torch.nn.Linear( self.dim_head_proj*num_heads, dim_embed_q, bias=False) - self.dropout = torch.nn.Dropout( p=dropout_rate) if dropout_rate > 0. else torch.nn.Identity() - - lnorm = norm if with_qk_lnorm else torch.nn.Identity - self.lnorm_q = lnorm( self.dim_head_proj) - self.lnorm_k = lnorm( self.dim_head_proj) - - assert with_flash, 'Only flash attention supported at the moment' - - ######################################### - def forward( self, x_q, x_kv, x_q_lens=None, x_kv_lens=None, ada_ln_aux=None) : - - if self.with_residual : - x_q_in = x_q - x_q = self.lnorm_in_q( x_q) if ada_ln_aux is None else self.lnorm_in_q( x_q, ada_ln_aux) - x_kv = self.lnorm_in_kv( x_kv) - - # project onto heads and q,k,v and ensure these are 4D tensors as required for flash attention - s = [ x_q.shape[0], self.num_heads, self.dim_head_proj] - qs = [self.lnorm_q( head_proj( x_q_i).reshape(s)).to(torch.float16) - for head_proj,x_q_i in zip( self.proj_heads_q, x_q.transpose(1,0))] - s = [ x_kv.shape[0], self.num_heads, self.dim_head_proj ] - ks = self.lnorm_k( self.proj_heads_k( x_kv).reshape(s)).to(torch.float16) - vs = self.proj_heads_v( x_kv).reshape(s) - - cum_x_q_lens = torch.cumsum( x_q_lens, 0, dtype=torch.int32) - cum_x_kv_lens = torch.cumsum( x_kv_lens, 0, dtype=torch.int32) - outs = [] - for i,qs_i in enumerate(qs) : - outs += [ flash_attn_varlen_func( qs_i, ks, vs, cum_x_q_lens, cum_x_kv_lens, - x_q_lens.max(), x_kv_lens.max(), - softcap=self.softcap, - dropout_p=self.dropout_rate) ] - - # outs = self.dropout( self.proj_out( torch.stack(outs).transpose(1,0).flatten( -2, -1)) ) - outs = self.proj_out( torch.stack(outs).transpose(1,0).flatten( -2, -1)) - if self.with_residual : - outs = x_q_in + outs.reshape(x_q_in.shape) - - return outs - -#################################################################################################### class MultiSelfAttentionHead(torch.nn.Module): + def __init__( + self, + dim_embed, + num_heads, + dim_head_proj=None, + dropout_rate=0.0, + with_residual=True, + with_qk_lnorm=True, + with_flash=True, + softcap=0.0, + norm_type="LayerNorm", + dim_aux=None, + norm_eps=1e-5, + attention_dtype=torch.bfloat16, + ): + super(MultiSelfAttentionHead, self).__init__() + + self.num_heads = num_heads + self.with_flash = with_flash + self.softcap = softcap + self.dropout_rate = dropout_rate + self.with_residual = with_residual + + assert dim_embed % num_heads == 0 + self.dim_head_proj = dim_embed // num_heads if dim_head_proj is None else dim_head_proj + + if norm_type == "LayerNorm": + norm = partial(torch.nn.LayerNorm, elementwise_affine=False, eps=norm_eps) + else: + norm = RMSNorm + + if dim_aux is not None: + self.lnorm = AdaLayerNorm(dim_embed, dim_aux, norm_eps=norm_eps) + else: + self.lnorm = norm(dim_embed, eps=norm_eps) + self.proj_heads_q = torch.nn.Linear(dim_embed, num_heads * self.dim_head_proj, bias=False) + self.proj_heads_k = torch.nn.Linear(dim_embed, num_heads * self.dim_head_proj, bias=False) + self.proj_heads_v = torch.nn.Linear(dim_embed, num_heads * self.dim_head_proj, bias=False) + self.proj_out = torch.nn.Linear(dim_embed, dim_embed, bias=False) + self.dropout = ( + torch.nn.Dropout(p=dropout_rate) if dropout_rate > 0.0 else torch.nn.Identity() + ) + + lnorm = norm if with_qk_lnorm else torch.nn.Identity + self.lnorm_q = lnorm(self.dim_head_proj, eps=norm_eps) + self.lnorm_k = lnorm(self.dim_head_proj, eps=norm_eps) + + self.dtype = attention_dtype + if with_flash: + self.att = torch.nn.functional.scaled_dot_product_attention + else: + self.att = self.attention + self.softmax = torch.nn.Softmax(dim=-1) + + def forward(self, x, ada_ln_aux=None): + if self.with_residual: + x_in = x + x = self.lnorm(x) if ada_ln_aux is None else self.lnorm(x, ada_ln_aux) + + # project onto heads and q,k,v and + # ensure these are 4D tensors as required for flash attention + s = [*([x.shape[0], 1] if len(x.shape) == 2 else x.shape[:-1]), self.num_heads, -1] + qs = self.lnorm_q(self.proj_heads_q(x).reshape(s)).to(self.dtype) + ks = self.lnorm_k(self.proj_heads_k(x).reshape(s)).to(self.dtype) + vs = self.proj_heads_v(x).reshape(s).to(self.dtype) + + # set dropout rate according to training/eval mode as required by flash_attn + dropout_rate = self.dropout_rate if self.training else 0.0 + + # ordering of tensors (seq, heads, embed) (which differs from torch's flash attention implt) + outs = flash_attn_func(qs, ks, vs, softcap=self.softcap, dropout_p=dropout_rate) + + out = self.proj_out(outs.flatten(-2, -1)) + if self.with_residual: + out = out + x_in + + return out - ######################################### - def __init__(self, dim_embed, num_heads, dim_head_proj=None, dropout_rate=0., - with_qk_lnorm=True, with_flash=True, norm_type = 'LayerNorm', dim_aux=None) : - - super(MultiSelfAttentionHead, self).__init__() - - self.num_heads = num_heads - self.with_flash = with_flash - self.dropout_rate = dropout_rate - - assert 0 == dim_embed % num_heads - self.dim_head_proj = dim_embed // num_heads if dim_head_proj is None else dim_head_proj - - if norm_type == 'LayerNorm' : - norm = partial( torch.nn.LayerNorm, elementwise_affine=False) - else : - norm = RMSNorm - - if dim_aux is not None : - self.lnorm = AdaLayerNorm( dim_embed, dim_aux) - else : - self.lnorm = norm( dim_embed) - self.proj_heads_q = torch.nn.Linear( dim_embed, num_heads*self.dim_head_proj, bias = False) - self.proj_heads_k = torch.nn.Linear( dim_embed, num_heads*self.dim_head_proj, bias = False) - self.proj_heads_v = torch.nn.Linear( dim_embed, num_heads*self.dim_head_proj, bias = False) - self.proj_out = torch.nn.Linear( dim_embed, dim_embed, bias = False) - self.dropout = torch.nn.Dropout( p=dropout_rate) if dropout_rate > 0. else torch.nn.Identity() - - lnorm = norm if with_qk_lnorm else torch.nn.Identity - self.lnorm_q = lnorm( self.dim_head_proj) - self.lnorm_k = lnorm( self.dim_head_proj) - - if with_flash : - self.att = torch.nn.functional.scaled_dot_product_attention - else : - self.att = self.attention - self.softmax = torch.nn.Softmax(dim=-1) - - ######################################### - def forward( self, x, ada_ln_aux=None) : - - x_in = x - # x = self.lnorm( x) - x = self.lnorm( x) if ada_ln_aux is None else self.lnorm( x, ada_ln_aux) - - # project onto heads and q,k,v and ensure these are 4D tensors as required for flash attention - s = [ *([x.shape[0],1] if len(x.shape)==2 else x.shape[:-1]), self.num_heads, -1] - qs = self.lnorm_q( self.proj_heads_q( x).reshape(s)).to(torch.float16) - ks = self.lnorm_k( self.proj_heads_k( x).reshape(s)).to(torch.float16) - vs = self.proj_heads_v( x).reshape(s) - - # ordering of tensors (seq, heads, embed) (which differs from torch's flash attention implt) - outs = flash_attn_func( qs, ks, vs, dropout_p=self.dropout_rate) - - # return x_in + self.dropout( self.proj_out( outs.flatten( -2, -1)) ) - return x_in + self.proj_out( outs.flatten( -2, -1)) - - ######################################### - def attention( self, q, k, v) : - scaling = 1. / torch.sqrt( torch.tensor(q.shape[-1])) - return torch.matmul( self.softmax( scaling * self.score( q, k)), v) - - ######################################### - def score( self, q, k) : - return torch.matmul( q, torch.transpose( k, -2, -1)) - -#################################################################################################### -class MultiCrossAttentionHead(torch.nn.Module): - ######################################### - def __init__(self, dim_embed_q, dim_embed_kv, num_heads, dim_head_proj=None, - dropout_rate=0., with_residual=True, with_qk_lnorm=True, with_flash=True, - norm_type = 'LayerNorm') : - - super(MultiCrossAttentionHead, self).__init__() - - self.num_heads = num_heads - self.with_residual = with_residual - self.with_flash = with_flash - - if norm_type == 'LayerNorm' : - norm = partial( torch.nn.LayerNorm, elementwise_affine=False) - else : - norm = RMSNorm - - assert 0 == dim_embed_q % num_heads - self.dim_head_proj = dim_embed_q // num_heads if dim_head_proj is None else dim_head_proj - - self.lnorm_in_q = norm( dim_embed_q) - self.lnorm_in_kv = norm( dim_embed_kv) - - self.proj_heads_q = torch.nn.Linear( dim_embed_q, num_heads*self.dim_head_proj, bias=False) - self.proj_heads_k = torch.nn.Linear( dim_embed_kv, num_heads*self.dim_head_proj, bias=False) - self.proj_heads_v = torch.nn.Linear( dim_embed_kv, num_heads*self.dim_head_proj, bias=False) - - self.proj_out = torch.nn.Linear( self.dim_head_proj*num_heads, dim_embed_q, bias=False) - self.dropout = torch.nn.Dropout( p=dropout_rate) if dropout_rate > 0. else torch.nn.Identity() - - lnorm = norm if with_qk_lnorm else torch.nn.Identity - self.lnorm_q = lnorm( self.dim_head_proj) - self.lnorm_k = lnorm( self.dim_head_proj) - - self.att = torch.nn.functional.scaled_dot_product_attention - self.softmax = torch.nn.Softmax(dim=-1) - - ######################################### - def forward( self, x_q, x_kv) : - - if self.with_residual : - x_q_in = x_q - x_q, x_kv = self.lnorm_in_q( x_q), self.lnorm_in_kv( x_kv) - - # project onto heads and q,k,v and ensure these are 4D tensors as required for flash attention - s = [ x_q.shape[0], -1, self.num_heads, self.dim_head_proj] - qs = self.lnorm_q( self.proj_heads_q( x_q).reshape(s)).to(torch.float16).transpose( -3, -2) - s = [ x_kv.shape[0], -1, self.num_heads, self.dim_head_proj ] - ks = self.lnorm_k( self.proj_heads_k( x_kv).reshape(s)).to(torch.float16).transpose( -3, -2) - vs = self.proj_heads_v( x_kv).reshape(s).transpose( -3, -2) - - # correct ordering of tensors with seq dimension second but last is critical - with torch.nn.attention.sdpa_kernel( torch.nn.attention.SDPBackend.FLASH_ATTENTION) : - outs = self.att( qs, ks, vs).transpose( 2, 1) - - outs = self.dropout( self.proj_out( outs.flatten( -2, -1)) ) - if self.with_residual : - outs = x_q_in + outs - - return outs - - ######################################### - def attention( self, q, k, v) : - scaling = 1. / torch.sqrt( torch.tensor(q.shape[-1])) - return torch.matmul( self.softmax( scaling * self.score( q, k)), v) - - ######################################### - def score( self, q, k) : - return torch.matmul( q, torch.transpose( k, -2, -1)) +class MultiCrossAttentionHead(torch.nn.Module): + def __init__( + self, + dim_embed_q, + dim_embed_kv, + num_heads, + dim_head_proj=None, + dropout_rate=0.0, + with_residual=True, + with_qk_lnorm=True, + with_flash=True, + norm_type="LayerNorm", + norm_eps=1e-5, + attention_dtype=torch.bfloat16, + ): + super(MultiCrossAttentionHead, self).__init__() + + self.num_heads = num_heads + self.with_residual = with_residual + self.with_flash = with_flash + + if norm_type == "LayerNorm": + norm = partial(torch.nn.LayerNorm, elementwise_affine=False, eps=norm_eps) + else: + norm = RMSNorm + + assert dim_embed_q % num_heads == 0 + self.dim_head_proj = dim_embed_q // num_heads if dim_head_proj is None else dim_head_proj + + self.lnorm_in_q = norm(dim_embed_q, eps=norm_eps) + self.lnorm_in_kv = norm(dim_embed_kv, eps=norm_eps) + + self.proj_heads_q = torch.nn.Linear(dim_embed_q, num_heads * self.dim_head_proj, bias=False) + self.proj_heads_k = torch.nn.Linear( + dim_embed_kv, num_heads * self.dim_head_proj, bias=False + ) + self.proj_heads_v = torch.nn.Linear( + dim_embed_kv, num_heads * self.dim_head_proj, bias=False + ) + + self.proj_out = torch.nn.Linear(self.dim_head_proj * num_heads, dim_embed_q, bias=False) + self.dropout = ( + torch.nn.Dropout(p=dropout_rate) if dropout_rate > 0.0 else torch.nn.Identity() + ) + + lnorm = norm if with_qk_lnorm else torch.nn.Identity + self.lnorm_q = lnorm(self.dim_head_proj, eps=norm_eps) + self.lnorm_k = lnorm(self.dim_head_proj, eps=norm_eps) + + self.dtype = attention_dtype + self.att = torch.nn.functional.scaled_dot_product_attention + self.softmax = torch.nn.Softmax(dim=-1) + + ######################################### + def forward(self, x_q, x_kv): + if self.with_residual: + x_q_in = x_q + x_q, x_kv = self.lnorm_in_q(x_q), self.lnorm_in_kv(x_kv) + + # project onto heads and q,k,v and + # ensure these are 4D tensors as required for flash attention + s = [x_q.shape[0], -1, self.num_heads, self.dim_head_proj] + qs = self.lnorm_q(self.proj_heads_q(x_q).reshape(s)).to(self.dtype).transpose(-3, -2) + s = [x_kv.shape[0], -1, self.num_heads, self.dim_head_proj] + ks = self.lnorm_k(self.proj_heads_k(x_kv).reshape(s)).to(self.dtype).transpose(-3, -2) + vs = self.proj_heads_v(x_kv).reshape(s).transpose(-3, -2) + + # correct ordering of tensors with seq dimension second but last is critical + with torch.nn.attention.sdpa_kernel(torch.nn.attention.SDPBackend.FLASH_ATTENTION): + outs = self.att(qs, ks, vs).transpose(2, 1) + + outs = self.dropout(self.proj_out(outs.flatten(-2, -1))) + if self.with_residual: + outs = x_q_in + outs + + return outs diff --git a/src/weathergen/model/blocks.py b/src/weathergen/model/blocks.py new file mode 100644 index 000000000..061928f64 --- /dev/null +++ b/src/weathergen/model/blocks.py @@ -0,0 +1,259 @@ +# (C) Copyright 2025 WeatherGenerator contributors. +# +# This software is licensed under the terms of the Apache Licence Version 2.0 +# which can be obtained at http://www.apache.org/licenses/LICENSE-2.0. +# +# In applying this licence, ECMWF does not waive the privileges and immunities +# granted to it by virtue of its status as an intergovernmental organisation +# nor does it submit to any jurisdiction. + + +import torch.nn as nn + +from weathergen.model.attention import ( + MultiCrossAttentionHeadVarlen, + MultiSelfAttentionHeadVarlen, +) +from weathergen.model.layers import MLP +from weathergen.model.norms import AdaLayerNormLayer +from weathergen.utils.utils import get_dtype + + +class SelfAttentionBlock(nn.Module): + """ + A self attention block, i.e., adaptive layer norm with multi head self attenttion and adaptive + layer norm with a FFN. + """ + + def __init__(self, dim, dim_aux, with_adanorm, num_heads, dropout_rate, **kwargs): + super().__init__() + + self.with_adanorm = with_adanorm + + self.mhsa = MultiSelfAttentionHeadVarlen( + dim_embed=dim, + num_heads=num_heads, + with_residual=False, + **kwargs["attention_kwargs"], + ) + if self.with_adanorm: + self.mhsa_block = AdaLayerNormLayer(dim, dim_aux, self.mhsa, dropout_rate) + else: + self.ln_sa = nn.LayerNorm(dim, eps=kwargs["attention_kwargs"]["norm_eps"]) + self.mhsa_block = lambda x, _, **kwargs: self.mhsa(self.ln_sa(x), **kwargs) + x + + approx_gelu = lambda: nn.GELU(approximate="tanh") + self.mlp = MLP( + dim_in=dim, + dim_out=dim, + hidden_factor=4, + dropout_rate=0.1, + nonlin=approx_gelu, + with_residual=False, + ) + if self.with_adanorm: + self.mlp_fn = lambda x, **kwargs: self.mlp(x) + self.mlp_block = AdaLayerNormLayer(dim, dim_aux, self.mlp_fn, dropout_rate) + else: + self.ln_mlp = nn.LayerNorm(norm_eps=kwargs["attention_kwargs"]["norm_eps"]) + self.mlp_block = lambda x, _, **kwargs: self.mlp(self.ln_mlp(x), None, **kwargs) + x + + self.initialise_weights() + if self.with_adanorm: + # Has to happen after the basic weight init to ensure it is zero! + self.mhsa_block.initialise_weights() + self.mlp_block.initialise_weights() + + def initialise_weights(self): + # Initialise transformer layers: + def _basic_init(module): + if isinstance(module, nn.Linear): + nn.init.xavier_uniform_(module.weight) + if module.bias is not None: + nn.init.constant_(module.bias, 0) + + self.apply(_basic_init) + + def forward(self, x, x_lens, aux=None): + # we have aux_lens as arg to be consistent with the CrossAttentionBlock + assert self.with_adanorm ^ (aux is None), "Conditioning is not being used" + x = self.mhsa_block(x, aux, x_lens=x_lens) + x = self.mlp_block(x, aux) + return x + + +class CrossAttentionBlock(nn.Module): + """ + A cross attention block, i.e., adaptive layer norm with cross attenttion and adaptive layer norm + with a FFN. + """ + + def __init__( + self, + dim_q, + dim_kv, + dim_aux, + with_self_attn, + with_adanorm, + with_mlp, + num_heads, + dropout_rate, + **kwargs, + ): + super().__init__() + + self.with_adanorm = with_adanorm + self.with_self_attn = with_self_attn + self.with_mlp = with_self_attn + + if with_self_attn: + self.mhsa = MultiSelfAttentionHeadVarlen( + dim_embed=dim_q, + num_heads=num_heads, + with_residual=False, + **kwargs["attention_kwargs"], + ) + if self.with_adanorm: + self.mhsa_block = AdaLayerNormLayer(dim_q, dim_aux, self.mhsa, dropout_rate) + else: + self.ln_sa = nn.LayerNorm(dim_q, eps=kwargs["attention_kwargs"]["norm_eps"]) + self.mhsa_block = lambda x, _, **kwargs: self.mhsa(self.ln_sa(x), **kwargs) + x + + self.cross_attn = MultiCrossAttentionHeadVarlen( + dim_embed_q=dim_q, + dim_embed_kv=dim_kv, + num_heads=num_heads, + with_residual=False, + **kwargs["attention_kwargs"], + ) + if self.with_adanorm: + self.cross_attn_block = AdaLayerNormLayer(dim_q, dim_aux, self.cross_attn, dropout_rate) + else: + self.ln_ca = nn.LayerNorm(dim_q, eps=kwargs["attention_kwargs"]["norm_eps"]) + self.cross_attn_block = ( + lambda x, _, **kwargs: self.cross_attn(self.ln_ca(x), **kwargs) + x + ) + + if self.with_mlp: + approx_gelu = lambda: nn.GELU(approximate="tanh") + self.mlp = MLP( + dim_in=dim_q, + dim_out=dim_q, + hidden_factor=4, + nonlin=approx_gelu, + with_residual=False, + ) + if self.with_adanorm: + self.mlp_fn = lambda x, **kwargs: self.mlp(x) + self.mlp_block = AdaLayerNormLayer(dim_q, dim_aux, self.mlp_fn, dropout_rate) + else: + self.ln_mlp = nn.LayerNorm(dim_q, eps=kwargs["attention_kwargs"]["norm_eps"]) + self.mlp_block = lambda x, _, **kwargs: self.mlp(self.ln_mlp(x)) + x + else: + self.mlp_block = lambda x, _, **kwargs: x + + self.initialise_weights() + if self.with_adanorm: + # Has to happen after the basic weight init to ensure it is zero! + self.mhsa_block.initialise_weights() + self.cross_attn_block.initialise_weights() + self.mlp_block.initialise_weights() + + def initialise_weights(self): + # Initialise transformer layers: + def _basic_init(module): + if isinstance(module, nn.Linear): + nn.init.xavier_uniform_(module.weight) + if module.bias is not None: + nn.init.constant_(module.bias, 0) + + self.apply(_basic_init) + + def forward(self, x, x_kv, aux, x_kv_lens=None, x_lens=None): + x = self.cross_attn_block(x, aux, x_kv=x_kv, x_lens=x_lens, x_kv_lens=x_kv_lens) + if self.with_self_attn: + x = self.mhsa_block(x, aux, x_lens=x_lens) + x = self.mlp_block(x, aux, x_lens=x_lens) + return x + + +class OriginalPredictionBlock(nn.Module): + def __init__( + self, + config, + dim_in, + dim_out, + dim_kv, + dim_aux, + num_heads, + attention_kwargs, + tr_dim_head_proj, + tr_mlp_hidden_factor, + tro_type, + mlp_norm_eps=1e-6, + ): + super().__init__() + + self.cf = config + self.tro_type = tro_type + self.tr_dim_head_proj = tr_dim_head_proj + self.tr_mlp_hidden_factor = tr_mlp_hidden_factor + + self.block = nn.ModuleList() + + # Multi-Cross Attention Head + self.block.append( + MultiCrossAttentionHeadVarlen( + dim_in, + self.cf.ae_global_dim_embed, + self.cf.streams[0]["target_readout"]["num_heads"], + dim_head_proj=self.tr_dim_head_proj, + with_residual=True, + with_qk_lnorm=True, + dropout_rate=0.1, # Assuming dropout_rate is 0.1 + with_flash=self.cf.with_flash_attention, + norm_type=attention_kwargs["norm_type"], + softcap=0.0, + dim_aux=dim_aux, + norm_eps=attention_kwargs["norm_eps"], + attention_dtype=get_dtype(self.cf.attention_dtype), + ) + ) + + # Optional Self-Attention Head + if self.cf.pred_self_attention: + self.block.append( + MultiSelfAttentionHeadVarlen( + dim_in, + num_heads=self.cf.streams[0]["target_readout"]["num_heads"], + dropout_rate=0.1, # Assuming dropout_rate is 0.1 + with_qk_lnorm=True, + with_flash=self.cf.with_flash_attention, + norm_type=self.cf.norm_type, + dim_aux=dim_aux, + norm_eps=self.cf.norm_eps, + attention_dtype=get_dtype(self.cf.attention_dtype), + ) + ) + + # MLP Block + self.block.append( + MLP( + dim_in, + dim_out, + with_residual=True, + hidden_factor=self.tr_mlp_hidden_factor, + dropout_rate=0.1, # Assuming dropout_rate is 0.1 + norm_type=self.cf.norm_type, + dim_aux=(dim_aux if self.cf.pred_mlp_adaln else None), + norm_eps=self.cf.mlp_norm_eps, + ) + ) + + def forward(self, latent, output, coords, latent_lens, output_lens): + for layer in self.block: + if isinstance(layer, MultiCrossAttentionHeadVarlen): + output = layer(output, latent, output_lens, latent_lens, coords) + else: + output = layer(output, output_lens, coords) + return output diff --git a/src/weathergen/model/ema.py b/src/weathergen/model/ema.py new file mode 100644 index 000000000..7acbbf9f0 --- /dev/null +++ b/src/weathergen/model/ema.py @@ -0,0 +1,71 @@ +# (C) Copyright 2025 WeatherGenerator contributors. +# +# This software is licensed under the terms of the Apache Licence Version 2.0 +# which can be obtained at http://www.apache.org/licenses/LICENSE-2.0. +# +# In applying this licence, ECMWF does not waive the privileges and immunities +# granted to it by virtue of its status as an intergovernmental organisation +# nor does it submit to any jurisdiction. + + +import torch + + +class EMAModel: + """ + Taken and modified from https://github.com/NVlabs/edm2/tree/main + """ + + @torch.no_grad() + def __init__( + self, + model, + empty_model, + halflife_steps=float("inf"), + rampup_ratio=0.09, + is_model_sharded=False, + ): + self.original_model = model + self.halflife_steps = halflife_steps + self.rampup_ratio = rampup_ratio + self.ema_model = empty_model + self.is_model_sharded = is_model_sharded + + self.reset() + + @torch.no_grad() + def reset(self): + """ + This function resets the EMAModel to be the same as the Model. + + It operates via the state_dict to be able to deal with sharded tensors in case + FSDP2 is used. + """ + self.ema_model.to_empty(device="cuda") + maybe_sharded_sd = self.original_model.state_dict() + # this copies correctly tested in pdb + mkeys, ukeys = self.ema_model.load_state_dict(maybe_sharded_sd, strict=True, assign=False) + + @torch.no_grad() + def update(self, cur_step, batch_size): + halflife_steps = self.halflife_steps + if self.rampup_ratio is not None: + halflife_steps = min(halflife_steps, cur_step / 1e3 * self.rampup_ratio) + beta = 0.5 ** (batch_size / max(halflife_steps * 1e3, 1e-6)) + for p_net, p_ema in zip( + self.original_model.parameters(), self.ema_model.parameters(), strict=True + ): + p_ema.lerp_(p_net, 1 - beta) + + @torch.no_grad() + def forward_eval(self, *args, **kwargs): + self.ema_model.eval() + out = self.ema_model(*args, **kwargs) + self.ema_model.train() + return out + + def state_dict(self): + return self.ema_model.state_dict() + + def load_state_dict(self, state, **kwargs): + self.ema_model.load_state_dict(state, **kwargs) diff --git a/src/weathergen/model/embeddings.py b/src/weathergen/model/embeddings.py new file mode 100644 index 000000000..c9a7b456c --- /dev/null +++ b/src/weathergen/model/embeddings.py @@ -0,0 +1,217 @@ +# (C) Copyright 2025 WeatherGenerator contributors. +# +# This software is licensed under the terms of the Apache Licence Version 2.0 +# which can be obtained at http://www.apache.org/licenses/LICENSE-2.0. +# +# In applying this licence, ECMWF does not waive the privileges and immunities +# granted to it by virtue of its status as an intergovernmental organisation +# nor does it submit to any jurisdiction. + +import numpy as np +import torch +from torch.utils.checkpoint import checkpoint + +from weathergen.model.attention import MultiSelfAttentionHead +from weathergen.model.layers import MLP + +# from weathergen.model.mlp import MLP +from weathergen.model.norms import RMSNorm +from weathergen.model.positional_encoding import positional_encoding_harmonic + + +class StreamEmbedTransformer(torch.nn.Module): + def __init__( + self, + mode, + num_tokens, + token_size, + num_channels, + dim_embed, + dim_out, + num_blocks, + num_heads, + dropout_rate=0.0, + norm_type="LayerNorm", + embed_size_centroids=64, + unembed_mode="full", + stream_name="stream_embed", + ): + """Constructor + + unembed_mode : { 'full' , 'block'} + full : monolithic (and correspondingly large) unembedding network that maps from + (num_tokens x dim_embed) to dim_out, allowing for mixing between channels/columns + block : per-channel/column unembedding network + (which is hence a block-sparse form of full) + """ + + super(StreamEmbedTransformer, self).__init__() + + self.name = f"StreamEmbedder_{stream_name}" + self.mode = mode + self.num_tokens = num_tokens + self.token_size = token_size + self.num_channels = num_channels + self.dim_in = token_size if mode == "channels" else num_channels + self.dim_embed = dim_embed + self.dim_out = dim_out + self.num_blocks = num_blocks + self.num_heads = num_heads + self.embed_size_centroids = embed_size_centroids + self.unembed_mode = unembed_mode + + norm = torch.nn.LayerNorm if norm_type == "LayerNorm" else RMSNorm + + self.layers = torch.nn.ModuleList() + for _ in range(self.num_blocks): + self.layers.append( + MultiSelfAttentionHead( + self.dim_embed, + self.num_heads, + dropout_rate=dropout_rate, + with_qk_lnorm=True, + with_flash=True, + ) + ) + self.layers.append( + MLP( + self.dim_embed, + self.dim_embed, + hidden_factor=2, + dropout_rate=dropout_rate, + with_residual=True, + ) + ) + + if mode == "channels": + self.embed = torch.nn.Linear(self.dim_in, self.dim_embed) + + if self.unembed_mode == "full": + self.ln_final = norm(num_channels * self.dim_embed, eps=1e-03) + self.unembed = torch.nn.Linear( + num_channels * self.dim_embed, + self.num_tokens * self.dim_out - embed_size_centroids, + ) + + elif self.unembed_mode == "block": + # modify embed_size_centroids to ensure no additional padding is needed + rem = (self.num_tokens * self.dim_out - embed_size_centroids) % num_channels + embed_size_centroids += rem + dim_out = (self.num_tokens * self.dim_out - embed_size_centroids) // num_channels + self.unembed = torch.nn.ModuleList( + [torch.nn.Linear(dim_embed, dim_out) for _ in range(num_channels)] + # [ + # torch.nn.Sequential( + # torch.nn.Linear(dim_embed, max(dim_embed//2,4*dim_out)), + # torch.nn.GELU(), + # torch.nn.Linear(max(dim_embed//2,4*dim_out), dim_out) + # ) for _ in range(num_channels) + # ] + ) + self.ln_final = torch.nn.ModuleList( + [norm(dim_embed, eps=1e-6) for _ in range(num_channels)] + ) + + else: + raise ValueError(f"Unknown unembed mode: {unembed_mode}") + + elif mode == "columns": + assert embed_size_centroids == 0 + self.embed = torch.nn.Linear(self.dim_in, self.dim_embed) + + assert self.unembed_mode == "block" # only supported mode at the moment + # padding needed if the unembedded columns cannot be concatenated to dim_out (e.g GPSRO) + self.pad = self.dim_out % token_size + self.out_pad = torch.nn.Parameter(torch.zeros(self.pad), requires_grad=False) + self.unembed = torch.nn.Linear( + self.dim_embed, + self.num_tokens * ((self.dim_out - embed_size_centroids) // token_size), + ) + self.ln_final = norm(dim_out, eps=1e-6) + + # TODO: factorization when sqrt is not int + dim1 = int(np.sqrt(dim_out)) + assert dim1 * dim1 == dim_out + self.unembed1 = torch.nn.Linear(self.dim_embed, dim1) + self.unembed_nonlin = torch.nn.GELU() + self.unembed2 = torch.nn.Linear(self.token_size, dim1) + + else: + raise ValueError(f"Unknown mode: {mode}") + + self.dropout_final = torch.nn.Dropout(0.1) + self.embed_centroids = torch.nn.Linear(5, embed_size_centroids) + + def forward_channels(self, x_in, centroids): + peh = positional_encoding_harmonic + + # embed provided input data + x = peh(checkpoint(self.embed, x_in.transpose(-2, -1), use_reentrant=False)) + + for layer in self.layers: + x = checkpoint(layer, x, use_reentrant=False) + + # read out + if self.unembed_mode == "full": + out = checkpoint(self.unembed, self.ln_final(x.flatten(-2, -1)), use_reentrant=False) + elif self.unembed_mode == "block": + out = [ + checkpoint(ue, ln(x[:, i]), use_reentrant=False) + for i, (ue, ln) in enumerate(zip(self.unembed, self.ln_final, strict=True)) + ] + out = torch.stack(out, dim=1).flatten(-2, -1) + else: + raise ValueError(f"Unknown unembed mode: {self.unembed_mode}") + + # append centroids + if self.embed_size_centroids > 0: + out = torch.cat([out, self.embed_centroids(centroids)], -1) + # if self.embed_size_centroids==0 and self.dim_out is not divisible by #channels with + # unembed_mode block then we need to pad to have the expected output shape + if out.shape[-1] < self.dim_out: + out = torch.nn.functional.pad(out, [0, self.dim_out - out.shape[-1]], value=0.0) + # final reshape + out = self.dropout_final(out.reshape(-1, self.num_tokens, self.dim_out)) + + return out + + def forward_columns(self, x_in, centroids): + # embed provided input data + x = positional_encoding_harmonic(checkpoint(self.embed, x_in, use_reentrant=False)) + + for layer in self.layers: + x = checkpoint(layer, x, use_reentrant=False) + + out = checkpoint(self.unembed1, x, use_reentrant=False) + out = self.unembed_nonlin(out) + out = checkpoint(self.unembed2, out.transpose(-2, -1), use_reentrant=False) + out = out.flatten(-2, -1).unsqueeze(1) + + # final normalize and dropout + out = self.dropout_final(self.ln_final(out)) + + return out.to(torch.float16) + + def forward(self, x_in, centroids): + if self.mode == "channels": + return self.forward_channels(x_in, centroids) + elif self.mode == "columns": + return self.forward_columns(x_in, centroids) + else: + raise ValueError(f"Unknown mode {self.mode}") + + +class StreamEmbedLinear(torch.nn.Module): + def __init__(self, dim_in, dim_out, stream_name="stream_embed"): + """Constructor""" + + super(StreamEmbedLinear, self).__init__() + + self.name = f"StreamEmbedder_{stream_name}" + self.layer = torch.nn.Linear(dim_in, dim_out) + + def forward(self, x): + # x = checkpoint( self.layer, x.flatten( -2, -1), use_reentrant=True) + x = self.layer(x.flatten(-2, -1)) + + return x diff --git a/src/weathergen/model/engines.py b/src/weathergen/model/engines.py new file mode 100644 index 000000000..7359d1403 --- /dev/null +++ b/src/weathergen/model/engines.py @@ -0,0 +1,734 @@ +# (C) Copyright 2025 WeatherGenerator contributors. +# +# This software is licensed under the terms of the Apache Licence Version 2.0 +# which can be obtained at http://www.apache.org/licenses/LICENSE-2.0. +# +# In applying this licence, ECMWF does not waive the privileges and immunities +# granted to it by virtue of its status as an intergovernmental organisation +# nor does it submit to any jurisdiction. + +import torch +import torch.nn as nn +from torch.utils.checkpoint import checkpoint + +from weathergen.common.config import Config +from weathergen.model.attention import ( + MultiCrossAttentionHeadVarlen, + MultiCrossAttentionHeadVarlenSlicedQ, + MultiSelfAttentionHead, + MultiSelfAttentionHeadLocal, + MultiSelfAttentionHeadVarlen, +) +from weathergen.model.blocks import CrossAttentionBlock, OriginalPredictionBlock, SelfAttentionBlock +from weathergen.model.embeddings import ( + StreamEmbedLinear, + StreamEmbedTransformer, +) +from weathergen.model.layers import MLP +from weathergen.model.utils import ActivationFactory +from weathergen.utils.utils import get_dtype + + +class EmbeddingEngine(torch.nn.Module): + name: "EmbeddingEngine" + + def __init__(self, cf: Config, sources_size) -> None: + """ + Initialize the EmbeddingEngine with the configuration. + + :param cf: Configuration object containing parameters for the engine. + :param sources_size: List of source sizes for each stream. + """ + super(EmbeddingEngine, self).__init__() + self.cf = cf + self.sources_size = sources_size # KCT:iss130, what is this? + self.embeds = torch.nn.ModuleList() + + for i, si in enumerate(self.cf.streams): + stream_name = si.get("name", i) + + if si.get("diagnostic", False) or self.sources_size[i] == 0: + self.embeds.append(torch.nn.Identity()) + continue + + if si["embed"]["net"] == "transformer": + self.embeds.append( + StreamEmbedTransformer( + mode=self.cf.embed_orientation, + num_tokens=si["embed"]["num_tokens"], + token_size=si["token_size"], + num_channels=self.sources_size[i], + dim_embed=si["embed"]["dim_embed"], + dim_out=self.cf.ae_local_dim_embed, + num_blocks=si["embed"]["num_blocks"], + num_heads=si["embed"]["num_heads"], + dropout_rate=self.cf.embed_dropout_rate, + norm_type=self.cf.norm_type, + embed_size_centroids=self.cf.embed_size_centroids, + unembed_mode=self.cf.embed_unembed_mode, + stream_name=stream_name, + ) + ) + elif si["embed"]["net"] == "linear": + self.embeds.append( + StreamEmbedLinear( + self.sources_size[i] * si["token_size"], + self.cf.ae_local_dim_embed, + stream_name=stream_name, + ) + ) + else: + raise ValueError("Unsupported embedding network type") + + def forward(self, streams_data, pe_embed, dtype, device): + source_tokens_lens = torch.stack( + [ + torch.stack( + [ + s.source_tokens_lens if len(s.source_tokens_lens) > 0 else torch.tensor([]) + for s in stl_b + ] + ) + for stl_b in streams_data + ] + ) + offsets_base = source_tokens_lens.sum(1).sum(0).cumsum(0) + + tokens_all = torch.empty( + (int(offsets_base[-1]), self.cf.ae_local_dim_embed), dtype=dtype, device=device + ) + + for _, sb in enumerate(streams_data): + for _, (s, embed) in enumerate(zip(sb, self.embeds, strict=False)): + if not s.source_empty(): + idxs = s.source_idxs_embed.to(device) + idxs_pe = s.source_idxs_embed_pe.to(device) + + # create full scatter index + # (there's no broadcasting which is likely highly inefficient) + idxs = idxs.unsqueeze(1).repeat((1, self.cf.ae_local_dim_embed)) + x_embed = embed(s.source_tokens_cells, s.source_centroids).flatten(0, 1) + # there's undocumented limitation in flash_attn that will make embed fail if + # #tokens is too large; code below is a work around + # x_embed = torch.cat( + # [ + # embed(s_c, c_c).flatten(0, 1) + # for s_c, c_c in zip( + # torch.split(s.source_tokens_cells, 49152), + # torch.split(s.source_centroids, 49152), + # ) + # ] + # ) + + # scatter write to reorder from per stream to per cell ordering + tokens_all.scatter_(0, idxs, x_embed + pe_embed[idxs_pe]) + return tokens_all + + +class LocalAssimilationEngine(torch.nn.Module): + name: "LocalAssimilationEngine" + + def __init__(self, cf: Config) -> None: + """ + Initialize the LocalAssimilationEngine with the configuration. + + :param cf: Configuration object containing parameters for the engine. + """ + super(LocalAssimilationEngine, self).__init__() + self.cf = cf + self.ae_local_blocks = torch.nn.ModuleList() + + for _ in range(self.cf.ae_local_num_blocks): + self.ae_local_blocks.append( + MultiSelfAttentionHeadVarlen( + self.cf.ae_local_dim_embed, + num_heads=self.cf.ae_local_num_heads, + dropout_rate=self.cf.ae_local_dropout_rate, + with_qk_lnorm=self.cf.ae_local_with_qk_lnorm, + with_flash=self.cf.with_flash_attention, + norm_type=self.cf.norm_type, + norm_eps=self.cf.norm_eps, + attention_dtype=get_dtype(self.cf.attention_dtype), + ) + ) + self.ae_local_blocks.append( + MLP( + self.cf.ae_local_dim_embed, + self.cf.ae_local_dim_embed, + with_residual=True, + dropout_rate=self.cf.ae_local_dropout_rate, + norm_type=self.cf.norm_type, + norm_eps=self.cf.mlp_norm_eps, + ) + ) + + def forward(self, tokens_c, cell_lens_c, use_reentrant): + for block in self.ae_local_blocks: + tokens_c = checkpoint(block, tokens_c, cell_lens_c, use_reentrant=use_reentrant) + return tokens_c + + +class Local2GlobalAssimilationEngine(torch.nn.Module): + name: "Local2GlobalAssimilationEngine" + + def __init__(self, cf: Config) -> None: + """ + Initialize the Local2GlobalAssimilationEngine with the configuration. + + :param cf: Configuration object containing parameters for the engine. + """ + super(Local2GlobalAssimilationEngine, self).__init__() + self.cf = cf + self.ae_adapter = torch.nn.ModuleList() + + self.ae_adapter.append( + MultiCrossAttentionHeadVarlenSlicedQ( + self.cf.ae_global_dim_embed, + self.cf.ae_local_dim_embed, + num_slices_q=self.cf.ae_local_num_queries, + dim_head_proj=self.cf.ae_adapter_embed, + num_heads=self.cf.ae_adapter_num_heads, + with_residual=self.cf.ae_adapter_with_residual, + with_qk_lnorm=self.cf.ae_adapter_with_qk_lnorm, + dropout_rate=self.cf.ae_adapter_dropout_rate, + with_flash=self.cf.with_flash_attention, + norm_type=self.cf.norm_type, + norm_eps=self.cf.norm_eps, + attention_dtype=get_dtype(self.cf.attention_dtype), + ) + ) + self.ae_adapter.append( + MLP( + self.cf.ae_global_dim_embed, + self.cf.ae_global_dim_embed, + with_residual=True, + dropout_rate=self.cf.ae_adapter_dropout_rate, + norm_type=self.cf.norm_type, + norm_eps=self.cf.mlp_norm_eps, + ) + ) + self.ae_adapter.append( + MultiCrossAttentionHeadVarlenSlicedQ( + self.cf.ae_global_dim_embed, + self.cf.ae_local_dim_embed, + num_slices_q=self.cf.ae_local_num_queries, + dim_head_proj=self.cf.ae_adapter_embed, + num_heads=self.cf.ae_adapter_num_heads, + with_residual=self.cf.ae_adapter_with_residual, + with_qk_lnorm=self.cf.ae_adapter_with_qk_lnorm, + dropout_rate=self.cf.ae_adapter_dropout_rate, + with_flash=self.cf.with_flash_attention, + norm_type=self.cf.norm_type, + norm_eps=self.cf.norm_eps, + attention_dtype=get_dtype(self.cf.attention_dtype), + ) + ) + + def forward(self, tokens_c, tokens_global_c, q_cells_lens_c, cell_lens_c, use_reentrant): + for block in self.ae_adapter: + tokens_global_c = checkpoint( + block, + tokens_global_c, + tokens_c, + q_cells_lens_c, + cell_lens_c, + use_reentrant=use_reentrant, + ) + return tokens_global_c + + +class GlobalAssimilationEngine(torch.nn.Module): + name: "GlobalAssimilationEngine" + + def __init__(self, cf: Config, num_healpix_cells: int) -> None: + """ + Initialize the GlobalAssimilationEngine with the configuration. + + :param cf: Configuration object containing parameters for the engine. + :param num_healpix_cells: Number of healpix cells used for local queries. + """ + super(GlobalAssimilationEngine, self).__init__() + self.cf = cf + self.num_healpix_cells = num_healpix_cells + + self.ae_global_blocks = torch.nn.ModuleList() + + global_rate = int(1 / self.cf.ae_global_att_dense_rate) + for i in range(self.cf.ae_global_num_blocks): + ## Alternate between local and global attention + # as controlled by cf.ae_global_att_dense_rate + # Last block is always global attention + if i % global_rate == 0 or i + 1 == self.cf.ae_global_num_blocks: + self.ae_global_blocks.append( + MultiSelfAttentionHead( + self.cf.ae_global_dim_embed, + num_heads=self.cf.ae_global_num_heads, + dropout_rate=self.cf.ae_global_dropout_rate, + with_qk_lnorm=self.cf.ae_global_with_qk_lnorm, + with_flash=self.cf.with_flash_attention, + norm_type=self.cf.norm_type, + norm_eps=self.cf.norm_eps, + attention_dtype=get_dtype(self.cf.attention_dtype), + ) + ) + else: + self.ae_global_blocks.append( + MultiSelfAttentionHeadLocal( + self.cf.ae_global_dim_embed, + num_heads=self.cf.ae_global_num_heads, + qkv_len=self.num_healpix_cells * self.cf.ae_local_num_queries, + block_factor=self.cf.ae_global_block_factor, + dropout_rate=self.cf.ae_global_dropout_rate, + with_qk_lnorm=self.cf.ae_global_with_qk_lnorm, + with_flash=self.cf.with_flash_attention, + norm_type=self.cf.norm_type, + norm_eps=self.cf.norm_eps, + attention_dtype=get_dtype(self.cf.attention_dtype), + ) + ) + # MLP block + self.ae_global_blocks.append( + MLP( + self.cf.ae_global_dim_embed, + self.cf.ae_global_dim_embed, + with_residual=True, + dropout_rate=self.cf.ae_global_dropout_rate, + hidden_factor=self.cf.ae_global_mlp_hidden_factor, + norm_type=self.cf.norm_type, + norm_eps=self.cf.mlp_norm_eps, + ) + ) + + def forward(self, tokens, use_reentrant): + for block in self.ae_global_blocks: + tokens = checkpoint(block, tokens, use_reentrant=use_reentrant) + return tokens + + +class ForecastingEngine(torch.nn.Module): + name: "ForecastingEngine" + + def __init__(self, cf: Config, num_healpix_cells: int) -> None: + """ + Initialize the ForecastingEngine with the configuration. + + :param cf: Configuration object containing parameters for the engine. + :param num_healpix_cells: Number of healpix cells used for local queries. + """ + super(ForecastingEngine, self).__init__() + self.cf = cf + self.num_healpix_cells = num_healpix_cells + self.fe_blocks = torch.nn.ModuleList() + + global_rate = int(1 / self.cf.forecast_att_dense_rate) + if self.cf.forecast_policy is not None: + for i in range(self.cf.fe_num_blocks): + # Alternate between global and local attention + if (i % global_rate == 0) or i + 1 == self.cf.ae_global_num_blocks: + self.fe_blocks.append( + MultiSelfAttentionHead( + self.cf.ae_global_dim_embed, + num_heads=self.cf.fe_num_heads, + dropout_rate=self.cf.fe_dropout_rate, + with_qk_lnorm=self.cf.fe_with_qk_lnorm, + with_flash=self.cf.with_flash_attention, + norm_type=self.cf.norm_type, + dim_aux=1, + norm_eps=self.cf.norm_eps, + attention_dtype=get_dtype(self.cf.attention_dtype), + ) + ) + else: + self.fe_blocks.append( + MultiSelfAttentionHeadLocal( + self.cf.ae_global_dim_embed, + num_heads=self.cf.fe_num_heads, + qkv_len=self.num_healpix_cells * self.cf.ae_local_num_queries, + block_factor=self.cf.ae_global_block_factor, + dropout_rate=self.cf.fe_dropout_rate, + with_qk_lnorm=self.cf.fe_with_qk_lnorm, + with_flash=self.cf.with_flash_attention, + norm_type=self.cf.norm_type, + dim_aux=1, + norm_eps=self.cf.norm_eps, + attention_dtype=get_dtype(self.cf.attention_dtype), + ) + ) + # Add MLP block + self.fe_blocks.append( + MLP( + self.cf.ae_global_dim_embed, + self.cf.ae_global_dim_embed, + with_residual=True, + dropout_rate=self.cf.fe_dropout_rate, + norm_type=self.cf.norm_type, + dim_aux=1, + norm_eps=self.cf.mlp_norm_eps, + ) + ) + + def init_weights_final(m): + if isinstance(m, torch.nn.Linear): + torch.nn.init.normal_(m.weight, mean=0, std=0.001) + if m.bias is not None: + torch.nn.init.normal_(m.bias, mean=0, std=0.001) + + for block in self.fe_blocks: + block.apply(init_weights_final) + + def forward(self, tokens, fstep): + aux_info = torch.tensor([fstep], dtype=torch.float32, device="cuda") + for block in self.fe_blocks: + tokens = checkpoint(block, tokens, aux_info, use_reentrant=False) + + return tokens + + +class EnsPredictionHead(torch.nn.Module): + def __init__( + self, + dim_embed, + dim_out, + ens_num_layers, + ens_size, + stream_name: str, + norm_type="LayerNorm", + hidden_factor=2, + final_activation: None | str = None, + ): + """Constructor""" + + super(EnsPredictionHead, self).__init__() + + self.name = f"EnsPredictionHead_{stream_name}" + + dim_internal = dim_embed * hidden_factor + # norm = torch.nn.LayerNorm if norm_type == "LayerNorm" else RMSNorm + enl = ens_num_layers + + self.pred_heads = torch.nn.ModuleList() + for i in range(ens_size): + self.pred_heads.append(torch.nn.ModuleList()) + + # self.pred_heads[-1].append( norm( dim_embed)) + self.pred_heads[-1].append( + torch.nn.Linear(dim_embed, dim_out if enl == 1 else dim_internal) + ) + + for i in range(ens_num_layers - 1): + self.pred_heads[-1].append(torch.nn.GELU()) + self.pred_heads[-1].append( + torch.nn.Linear(dim_internal, dim_out if enl - 2 == i else dim_internal) + ) + + # Add optional final non-linear activation + if final_activation is not None and enl >= 1: + fal = ActivationFactory.get(final_activation) + self.pred_heads[-1].append(fal) + + ######################################### + def forward(self, toks): + preds = [] + for pred_head in self.pred_heads: + cpred = toks + for block in pred_head: + cpred = block(cpred) + preds.append(cpred) + preds = torch.stack(preds, 0) + + return preds + + +class TargetPredictionEngineClassic(nn.Module): + def __init__( + self, + cf, + dims_embed, + dim_coord_in, + tr_dim_head_proj, + tr_mlp_hidden_factor, + softcap, + tro_type, + stream_name: str, + ): + """ + Initialize the TargetPredictionEngine with the configuration. + + :param cf: Configuration object containing parameters for the engine. + :param dims_embed: List of embedding dimensions for each layer. + :param dim_coord_in: Input dimension for coordinates. + :param tr_dim_head_proj: Dimension for head projection. + :param tr_mlp_hidden_factor: Hidden factor for the MLP layers. + :param softcap: Softcap value for the attention layers. + :param tro_type: Type of target readout (e.g., "obs_value"). + """ + super(TargetPredictionEngineClassic, self).__init__() + self.name = f"TargetPredictionEngine_{stream_name}" + + self.cf = cf + self.dims_embed = dims_embed + self.dim_coord_in = dim_coord_in + self.tr_dim_head_proj = tr_dim_head_proj + self.tr_mlp_hidden_factor = tr_mlp_hidden_factor + self.softcap = softcap + self.tro_type = tro_type + self.tte = torch.nn.ModuleList() + + for i in range(len(self.dims_embed) - 1): + # Multi-Cross Attention Head + self.tte.append( + MultiCrossAttentionHeadVarlen( + dim_embed_q=self.dims_embed[i], + dim_embed_kv=self.cf.ae_global_dim_embed, + num_heads=self.cf.streams[0]["target_readout"]["num_heads"], + dim_head_proj=self.tr_dim_head_proj, + with_residual=True, + with_qk_lnorm=True, + dropout_rate=0.1, # Assuming dropout_rate is 0.1 + with_flash=self.cf.with_flash_attention, + norm_type=self.cf.norm_type, + softcap=self.softcap, + dim_aux=self.dim_coord_in, + norm_eps=self.cf.norm_eps, + attention_dtype=get_dtype(self.cf.attention_dtype), + ) + ) + + # Optional Self-Attention Head + if self.cf.pred_self_attention: + self.tte.append( + MultiSelfAttentionHeadVarlen( + dim_embed=self.dims_embed[i], + num_heads=self.cf.streams[0]["target_readout"]["num_heads"], + dropout_rate=0.1, # Assuming dropout_rate is 0.1 + with_qk_lnorm=True, + with_flash=self.cf.with_flash_attention, + norm_type=self.cf.norm_type, + dim_aux=self.dim_coord_in, + norm_eps=self.cf.norm_eps, + attention_dtype=get_dtype(self.cf.attention_dtype), + ) + ) + + # MLP Block + self.tte.append( + MLP( + self.dims_embed[i], + self.dims_embed[i + 1], + with_residual=(self.cf.pred_dyadic_dims or self.tro_type == "obs_value"), + hidden_factor=self.tr_mlp_hidden_factor, + dropout_rate=0.1, # Assuming dropout_rate is 0.1 + norm_type=self.cf.norm_type, + dim_aux=(self.dim_coord_in if self.cf.pred_mlp_adaln else None), + norm_eps=self.cf.mlp_norm_eps, + ) + ) + + def forward(self, latent, output, latent_lens, output_lens, coordinates): + tc_tokens = output + tcs_lens = output_lens + tokens_stream = latent + tokens_lens = latent_lens + tcs_aux = coordinates + + for ib, block in enumerate(self.tte): + if self.cf.pred_self_attention and ib % 3 == 1: + tc_tokens = checkpoint(block, tc_tokens, tcs_lens, tcs_aux, use_reentrant=False) + else: + tc_tokens = checkpoint( + block, + tc_tokens, + tokens_stream, + tcs_lens, + tokens_lens, + tcs_aux, + use_reentrant=False, + ) + return tc_tokens + + +class TargetPredictionEngine(nn.Module): + def __init__( + self, + cf, + dims_embed, + dim_coord_in, + tr_dim_head_proj, + tr_mlp_hidden_factor, + softcap, + tro_type, + stream_name: str, + ): + """ + Initialize the TargetPredictionEngine with the configuration. + + :param cf: Configuration object containing parameters for the engine. + :param dims_embed: List of embedding dimensions for each layer. + :param dim_coord_in: Input dimension for coordinates. + :param tr_dim_head_proj: Dimension for head projection. + :param tr_mlp_hidden_factor: Hidden factor for the MLP layers. + :param softcap: Softcap value for the attention layers. + :param tro_type: Type of target readout (e.g., "obs_value"). + + the decoder_type decides the how the conditioning is done + + PerceiverIO: is a simple CrossAttention layer with no MLP or Adaptive LayerNorm + AdaLayerNormConditioning: only conditions via the Adaptive LayerNorm + CrossAttentionConditioning: conditions via the CrossAttention layer but also uses an MLP + CrossAttentionAdaNormConditioning: conditions via the CrossAttention layer and + Adaptive LayerNorm + PerceiverIOCoordConditioning: The conditioning is the coordinates and is a modified Adaptive + LayerNorm that does not scale after the layer is applied + """ + super(TargetPredictionEngine, self).__init__() + self.name = f"TargetPredictionEngine_{stream_name}" + + self.cf = cf + self.dims_embed = dims_embed + self.dim_coord_in = dim_coord_in + self.tr_dim_head_proj = tr_dim_head_proj + self.tr_mlp_hidden_factor = tr_mlp_hidden_factor + self.softcap = softcap + self.tro_type = tro_type + + # For backwards compatibility + from omegaconf import OmegaConf + + self.cf = OmegaConf.merge( + OmegaConf.create({"decoder_type": "PerceiverIOCoordConditioning"}), self.cf + ) + + attention_kwargs = { + "with_qk_lnorm": True, + "dropout_rate": 0.1, # Assuming dropout_rate is 0.1 + "with_flash": self.cf.with_flash_attention, + "norm_type": self.cf.norm_type, + "softcap": self.softcap, + "dim_aux": self.dim_coord_in, + "norm_eps": self.cf.norm_eps, + "attention_dtype": get_dtype(self.cf.attention_dtype), + } + self.tte = nn.ModuleList() + self.output_in_norm = nn.LayerNorm(self.dims_embed[0]) + self.latent_in_norm = nn.LayerNorm(self.cf.ae_global_dim_embed) + self.final_norm = nn.Identity() # nn.RMSNorm(self.dims_embed[-1]) + self.dropout = nn.Dropout(0.2) + self.pos_embed = nn.Parameter(torch.zeros(1, 9, self.cf.ae_global_dim_embed)) + dim_aux = self.cf.ae_global_dim_embed + + for ith, dim in enumerate(self.dims_embed[:-1]): + if self.cf.decoder_type == "PerceiverIO": + # a single cross attention layer as per https://arxiv.org/pdf/2107.14795 + self.tte.append( + CrossAttentionBlock( + dim_q=dim, + dim_kv=dim_aux, + dim_aux=dim_aux, + num_heads=self.cf.streams[0]["target_readout"]["num_heads"], + with_self_attn=False, + with_adanorm=False, + with_mlp=False, + attention_kwargs=attention_kwargs, + ) + ) + elif self.cf.decoder_type == "AdaLayerNormConditioning": + self.tte.append( + SelfAttentionBlock( + dim=dim, + dim_aux=dim_aux, + num_heads=self.cf.streams[0]["target_readout"]["num_heads"], + attention_kwargs=attention_kwargs, + with_adanorm=True, + dropout_rate=0.1, + ) + ) + elif self.cf.decoder_type == "CrossAttentionConditioning": + self.tte.append( + CrossAttentionBlock( + dim_q=dim, + dim_kv=self.cf.ae_global_dim_embed, + dim_aux=dim_aux, + num_heads=self.cf.streams[0]["target_readout"]["num_heads"], + with_self_attn=True, + with_adanorm=False, + with_mlp=True, + dropout_rate=0.1, + attention_kwargs=attention_kwargs, + ) + ) + elif self.cf.decoder_type == "CrossAttentionAdaNormConditioning": + self.tte.append( + CrossAttentionBlock( + dim_q=dim, + dim_kv=dim_aux, + dim_aux=dim_aux, + num_heads=self.cf.streams[0]["target_readout"]["num_heads"], + with_self_attn=True, + with_adanorm=True, + with_mlp=True, + dropout_rate=0.1, + attention_kwargs=attention_kwargs, + ) + ) + elif self.cf.decoder_type == "PerceiverIOCoordConditioning": + self.tte.append( + OriginalPredictionBlock( + config=self.cf, + dim_in=dim, + dim_out=self.dims_embed[ith + 1], + dim_kv=dim_aux, + dim_aux=self.dim_coord_in, + num_heads=self.cf.streams[0]["target_readout"]["num_heads"], + attention_kwargs=attention_kwargs, + tr_dim_head_proj=tr_dim_head_proj, + tr_mlp_hidden_factor=tr_mlp_hidden_factor, + tro_type=tro_type, + mlp_norm_eps=self.cf.mlp_norm_eps, + ) + ) + else: + raise NotImplementedError( + f"{self.cf.decoder_type} is not implemented for prediction heads" + ) + + def forward(self, latent, output, latent_lens, output_lens, coordinates): + latent = ( + self.dropout(self.latent_in_norm(latent + self.pos_embed)) + if self.cf.decoder_type != "PerceiverIOCoordConditioning" + else latent + ) + for layer in self.tte: + if isinstance(layer, OriginalPredictionBlock): + output = checkpoint( + layer, + latent=latent.flatten(0, 1), + output=output, + coords=coordinates, + latent_lens=latent_lens, + output_lens=output_lens, + use_reentrant=False, + ) + elif isinstance(layer, CrossAttentionBlock): + output = checkpoint( + layer, + x=output, + x_kv=latent.flatten(0, 1), + x_lens=output_lens, + aux=latent[:, 0], + x_kv_lens=latent_lens, + use_reentrant=False, + ) + else: + output = checkpoint( + layer, + x=output, + x_lens=output_lens, + aux=latent[:, 0], + use_reentrant=False, + ) + output = ( + self.final_norm(output) + if self.cf.decoder_type != "PerceiverIOCoordConditioning" + else output + ) + return output diff --git a/src/weathergen/model/ens_prediction_head.py b/src/weathergen/model/ens_prediction_head.py deleted file mode 100644 index e205c884a..000000000 --- a/src/weathergen/model/ens_prediction_head.py +++ /dev/null @@ -1,55 +0,0 @@ -# (C) Copyright 2024 WeatherGenerator contributors. -# -# This software is licensed under the terms of the Apache Licence Version 2.0 -# which can be obtained at http://www.apache.org/licenses/LICENSE-2.0. -# -# In applying this licence, ECMWF does not waive the privileges and immunities -# granted to it by virtue of its status as an intergovernmental organisation -# nor does it submit to any jurisdiction. - -import code -import torch - -from weathergen.model.norms import RMSNorm - -class EnsPredictionHead( torch.nn.Module) : - - ######################################### - def __init__(self, dim_embed, dim_out, - ens_num_layers, ens_size, norm_type = 'LayerNorm', - hidden_factor = 2) : - '''Constructor''' - - super( EnsPredictionHead, self).__init__() - - dim_internal = dim_embed * hidden_factor - norm = torch.nn.LayerNorm if norm_type == 'LayerNorm' else RMSNorm - enl = ens_num_layers - - self.pred_heads = torch.nn.ModuleList() - for i in range( ens_size) : - - self.pred_heads.append( torch.nn.ModuleList()) - - # self.pred_heads[-1].append( norm( dim_embed)) - self.pred_heads[-1].append( torch.nn.Linear( dim_embed, dim_out if 1==enl else dim_internal)) - - for i in range( ens_num_layers-1) : - self.pred_heads[-1].append( torch.nn.GELU()) - self.pred_heads[-1].append( torch.nn.Linear( dim_internal, dim_out if enl-2==i else dim_internal)) - - ######################################### - @torch.amp.custom_fwd(cast_inputs=torch.float32, device_type='cuda') - def forward( self, toks) : - - preds = [ ] - for pred_head in self.pred_heads : - cpred = toks - for block in pred_head : - cpred = block( cpred) - preds.append( cpred) - preds = torch.stack( preds, 0) - - return preds - - diff --git a/src/weathergen/model/layers.py b/src/weathergen/model/layers.py new file mode 100644 index 000000000..1f7b8df5d --- /dev/null +++ b/src/weathergen/model/layers.py @@ -0,0 +1,95 @@ +# (C) Copyright 2025 WeatherGenerator contributors. +# +# This software is licensed under the terms of the Apache Licence Version 2.0 +# which can be obtained at http://www.apache.org/licenses/LICENSE-2.0. +# +# In applying this licence, ECMWF does not waive the privileges and immunities +# granted to it by virtue of its status as an intergovernmental organisation +# nor does it submit to any jurisdiction. + + +import torch +import torch.nn as nn + +from weathergen.model.norms import AdaLayerNorm, RMSNorm + + +class NamedLinear(torch.nn.Module): + def __init__(self, name: str | None = None, **kwargs): + super(NamedLinear, self).__init__() + self.linear = nn.Linear(**kwargs) + if name is not None: + self.name = name + + def reset_parameters(self): + self.linear.reset_parameters() + + def forward(self, x): + return self.linear(x) + + +class MLP(torch.nn.Module): + def __init__( + self, + dim_in, + dim_out, + num_layers=2, + hidden_factor=2, + pre_layer_norm=True, + dropout_rate=0.0, + nonlin=torch.nn.GELU, + with_residual=False, + norm_type="LayerNorm", + dim_aux=None, + norm_eps=1e-5, + name: str | None = None, + ): + """Constructor""" + + super(MLP, self).__init__() + + if name is not None: + self.name = name + + assert num_layers >= 2 + + self.with_residual = with_residual + self.with_aux = dim_aux is not None + dim_hidden = int(dim_in * hidden_factor) + + self.layers = torch.nn.ModuleList() + + norm = torch.nn.LayerNorm if norm_type == "LayerNorm" else RMSNorm + + if pre_layer_norm: + self.layers.append( + norm(dim_in, eps=norm_eps) + if dim_aux is None + else AdaLayerNorm(dim_in, dim_aux, norm_eps=norm_eps) + ) + + self.layers.append(torch.nn.Linear(dim_in, dim_hidden)) + self.layers.append(nonlin()) + self.layers.append(torch.nn.Dropout(p=dropout_rate)) + + for _ in range(num_layers - 2): + self.layers.append(torch.nn.Linear(dim_hidden, dim_hidden)) + self.layers.append(nonlin()) + self.layers.append(torch.nn.Dropout(p=dropout_rate)) + + self.layers.append(torch.nn.Linear(dim_hidden, dim_out)) + + def forward(self, *args): + x, x_in, aux = args[0], args[0], args[-1] + + for i, layer in enumerate(self.layers): + x = layer(x, aux) if (i == 0 and self.with_aux) else layer(x) + + if self.with_residual: + if x.shape[-1] == x_in.shape[-1]: + x = x_in + x + else: + assert x.shape[-1] % x_in.shape[-1] == 0 + x = x + x_in.repeat([*[1 for _ in x.shape[:-1]], x.shape[-1] // x_in.shape[-1]]) + + return x diff --git a/src/weathergen/model/mlp.py b/src/weathergen/model/mlp.py deleted file mode 100644 index be500b996..000000000 --- a/src/weathergen/model/mlp.py +++ /dev/null @@ -1,64 +0,0 @@ -# (C) Copyright 2024 WeatherGenerator contributors. -# -# This software is licensed under the terms of the Apache Licence Version 2.0 -# which can be obtained at http://www.apache.org/licenses/LICENSE-2.0. -# -# In applying this licence, ECMWF does not waive the privileges and immunities -# granted to it by virtue of its status as an intergovernmental organisation -# nor does it submit to any jurisdiction. - -import torch -from weathergen.model.norms import RMSNorm -from weathergen.model.norms import AdaLayerNorm - -class MLP( torch.nn.Module) : - - def __init__(self, dim_in, dim_out, num_layers = 2, hidden_factor = 2, - pre_layer_norm = True, dropout_rate = 0., nonlin = torch.nn.GELU, - with_residual = False, norm_type = 'LayerNorm', dim_aux=None) : - '''Constructor''' - - super( MLP, self).__init__() - - assert num_layers >= 2 - - self.with_residual = with_residual - self.with_aux = dim_aux is not None - dim_hidden = int( dim_in * hidden_factor) - - self.layers = torch.nn.ModuleList() - - norm = torch.nn.LayerNorm if norm_type=='LayerNorm' else RMSNorm - - if pre_layer_norm : - self.layers.append( norm( dim_in) if dim_aux is None else AdaLayerNorm( dim_in, dim_aux)) - - self.layers.append( torch.nn.Linear( dim_in, dim_hidden)) - self.layers.append( nonlin()) - self.layers.append( torch.nn.Dropout( p = dropout_rate)) - - for il in range(num_layers-2) : - self.layers.append( torch.nn.Linear( dim_hidden, dim_hidden)) - self.layers.append( nonlin()) - self.layers.append( torch.nn.Dropout( p = dropout_rate)) - - self.layers.append( torch.nn.Linear( dim_hidden, dim_out)) - self.layers.append( nonlin()) - - def forward( self, *args) : - - x, x_in, aux = args[0], args[0], args[-1] - - for i,layer in enumerate(self.layers) : - x = layer( x, aux) if (i==0 and self.with_aux) else layer( x) - - if self.with_residual : - if x.shape[-1] == x_in.shape[-1] : - x = x_in + x - else : - assert x.shape[-1] % x_in.shape[-1] == 0 - x = x + x_in.repeat([ *[1 for _ in x.shape[:-1]], x.shape[-1] // x_in.shape[-1] ]) - - return x - - diff --git a/src/weathergen/model/model.py b/src/weathergen/model/model.py index 3aa57c3ea..875498cfd 100644 --- a/src/weathergen/model/model.py +++ b/src/weathergen/model/model.py @@ -1,4 +1,6 @@ -# (C) Copyright 2024 WeatherGenerator contributors. +# ruff: noqa: T201 +# (C) Copyright 2025 WeatherGenerator contributors. + # # This software is licensed under the terms of the Apache Licence Version 2.0 # which can be obtained at http://www.apache.org/licenses/LICENSE-2.0. @@ -7,651 +9,855 @@ # granted to it by virtue of its status as an intergovernmental organisation # nor does it submit to any jurisdiction. -import os -import numpy as np +import logging import math -import time -import code import warnings -import torch -import astropy_healpix.healpy - -from torch.nn.attention.flex_attention import flex_attention, create_mask, create_block_mask import astropy_healpix as hp - +import astropy_healpix.healpy +import numpy as np +import torch +import torch.nn as nn +from astropy_healpix import healpy from torch.utils.checkpoint import checkpoint -from weathergen.model.stream_embed_transformer import StreamEmbedTransformer -from weathergen.model.stream_embed_linear import StreamEmbedLinear -from weathergen.model.ens_prediction_head import EnsPredictionHead - -from weathergen.model.attention import ( MultiSelfAttentionHead, - MultiSelfAttentionHead_Local, - MultiCrossAttentionHead, - MultiSelfAttentionHead_Varlen, - MultiCrossAttentionHead_Varlen, - MultiCrossAttentionHead_Varlen_SlicedQ) -from weathergen.model.mlp import MLP - -from weathergen.model.utils import get_num_parameters, freeze_weights - -from weathergen.model.positional_encoding import positional_encoding_harmonic -from weathergen.model.positional_encoding import positional_encoding_harmonic_idx -from weathergen.model.positional_encoding import positional_encoding_harmonic_global - -from weathergen.utils.logger import logger - - -class ModelParams( torch.nn.Module) : - - def __init__( self) : - - super( ModelParams, self).__init__() - - def create( self, cf) : - - self.healpix_level = cf.healpix_level - self.num_healpix_cells = 12 * 4**cf.healpix_level - - # positional encodings - - dim_embed = cf.ae_local_dim_embed - len_token_seq = 1024 - position = torch.arange( 0, len_token_seq).unsqueeze(1) - div = torch.exp(torch.arange( 0, dim_embed, 2) * -(math.log(len_token_seq) / dim_embed)) - pe_embed = torch.zeros( len_token_seq, dim_embed, dtype=torch.float16) - pe_embed[:, 0::2] = torch.sin( position * div[ : pe_embed[:, 0::2].shape[1] ]) - pe_embed[:, 1::2] = torch.cos( position * div[ : pe_embed[:, 1::2].shape[1] ]) - self.pe_embed = torch.nn.Parameter( pe_embed, requires_grad=False) - - dim_embed = 1024 - len_token_seq = 8192*4 #900000 - # print( f'len_token_seq = {len_token_seq}') - position = torch.arange( 0, len_token_seq).unsqueeze(1) - div = torch.exp(torch.arange( 0, dim_embed, 2) * -(math.log(len_token_seq) / dim_embed)) - pe_tc_tokens = torch.zeros( len_token_seq, dim_embed, dtype=torch.float16) - pe_tc_tokens[:, 0::2] = torch.sin( position * div[ : pe_tc_tokens[:, 0::2].shape[1] ]) - pe_tc_tokens[:, 1::2] = torch.cos( position * div[ : pe_tc_tokens[:, 1::2].shape[1] ]) - self.pe_tc_tokens = torch.nn.Parameter( pe_tc_tokens, requires_grad=False) - - dim_embed = cf.ae_global_dim_embed - pe = torch.zeros( self.num_healpix_cells, cf.ae_local_num_queries, dim_embed, dtype=torch.float16) - xs = 2. * np.pi * torch.arange( 0, dim_embed, 2) / dim_embed - pe[ ..., 0::2] = 0.5 * torch.sin( torch.outer( 8 * torch.arange( cf.ae_local_num_queries), xs) ) - pe[ ..., 0::2] += torch.sin( torch.outer( torch.arange( self.num_healpix_cells), xs) ).unsqueeze(1).repeat( (1,cf.ae_local_num_queries,1)) - pe[ ..., 1::2] = 0.5 * torch.cos( torch.outer( 8 * torch.arange( cf.ae_local_num_queries), xs) ) - pe[ ..., 1::2] += torch.cos( torch.outer( torch.arange( self.num_healpix_cells), xs) ).unsqueeze(1).repeat( (1,cf.ae_local_num_queries,1)) - self.pe_global = torch.nn.Parameter( pe, requires_grad=False) - - # healpix neighborhood structure - - hlc = self.healpix_level - num_healpix_cells = self.num_healpix_cells - with warnings.catch_warnings(action="ignore"): - temp = hp.neighbours( np.arange(num_healpix_cells), 2**hlc, order='nested').transpose() - # fix missing nbors with references to self - for i, row in enumerate(temp) : - temp[i][row == -1] = i - # nbors *and* self - nbours = torch.empty( (temp.shape[0], (temp.shape[1]+1) ), dtype=torch.int32) - nbours[:,0] = torch.arange( temp.shape[0]) - nbours[:,1:] = torch.from_numpy(temp) - self.hp_nbours = torch.nn.Parameter( nbours, requires_grad=False) - - # varlen index set for tokens - assert cf.batch_size == cf.batch_size_validation - bs = cf.batch_size - nqs = 9 - s = [bs, self.num_healpix_cells, cf.ae_local_num_queries, cf.ae_global_dim_embed] - pad = torch.zeros( 1, dtype=torch.int32) - if cf.target_cell_local_prediction : - tokens_lens = torch.cat([pad, nqs*s[2]*torch.ones(bs*s[1], dtype=torch.int32)]) - else : - tokens_lens = torch.cat([pad, nqs*s[1]*s[2]*torch.ones(bs, dtype=torch.int32)]) - self.tokens_lens = torch.nn.Parameter( tokens_lens, requires_grad=False) - - # precompute for varlen attention - s = (self.num_healpix_cells, cf.ae_local_num_queries, cf.ae_global_dim_embed) - # q_cells_lens = s[1] * torch.ones( s[0], dtype=torch.int32) - q_cells_lens = torch.ones( s[0], dtype=torch.int32) - q_cells_lens = torch.cat( [torch.zeros( 1, dtype=torch.int32), q_cells_lens]) - self.q_cells_lens = torch.nn.Parameter( q_cells_lens, requires_grad=False) - - return self - -#################################################################################################### -class Model( torch.nn.Module) : - - ######################################### - def __init__(self, cf, num_channels, geoinfo_sizes) : - '''Constructor''' - - super( Model, self).__init__() - - self.healpix_level = cf.healpix_level - self.num_healpix_cells = 12 * 4**self.healpix_level - - self.cf = cf - self.num_channels = num_channels - self.geoinfo_sizes = geoinfo_sizes - - ######################################### - def create( self) : - - cf = self.cf - - # separate embedding networks for differnt observation types - self.embeds = torch.nn.ModuleList() - for i, si in enumerate( cf.streams) : - if 'diagnostic' in si : - if si['diagnostic'] : - self.embeds.append( torch.nn.Identity()) - continue - if si['embed']['net'] == 'transformer' : - self.embeds.append( StreamEmbedTransformer( mode=cf.embed_orientation, - num_tokens=si['embed']['num_tokens'], - token_size=si['token_size'], - num_channels=self.num_channels[i][0], - dim_embed=si['embed']['dim_embed'], - dim_out=cf.ae_local_dim_embed, - num_blocks=si['embed']['num_blocks'], - num_heads=si['embed']['num_heads'], - norm_type=cf.norm_type, - embed_size_centroids = cf.embed_size_centroids, - unembed_mode = cf.embed_unembed_mode )) - elif si['embed']['net'] == 'linear' : - self.embeds.append( StreamEmbedLinear( self.num_channels[i][0]*si['token_size'], - cf.ae_local_dim_embed) ) - else : - assert False, 'Unsupported embedding network type' - - # local assimilation engine - self.ae_local_blocks = torch.nn.ModuleList() - for i in range( cf.ae_local_num_blocks) : - self.ae_local_blocks.append( MultiSelfAttentionHead_Varlen( cf.ae_local_dim_embed, - num_heads=cf.ae_local_num_heads, - dropout_rate=cf.ae_local_dropout_rate, - with_qk_lnorm=cf.ae_local_with_qk_lnorm, - with_flash=cf.with_flash_attention, - norm_type=cf.norm_type)) - self.ae_local_blocks.append( MLP( cf.ae_local_dim_embed, cf.ae_local_dim_embed, - with_residual=True, dropout_rate=cf.ae_local_dropout_rate, - norm_type=cf.norm_type )) - - ############## - # local -> global assimilation engine adapter - self.ae_adapter = torch.nn.ModuleList() - self.ae_adapter.append( MultiCrossAttentionHead_Varlen_SlicedQ( cf.ae_global_dim_embed, cf.ae_local_dim_embed, - num_slices_q=cf.ae_local_num_queries, - dim_head_proj=cf.ae_adapter_embed, - num_heads=cf.ae_adapter_num_heads, - with_residual=cf.ae_adapter_with_residual, - with_qk_lnorm=cf.ae_adapter_with_qk_lnorm, - dropout_rate=cf.ae_adapter_dropout_rate, - with_flash=cf.with_flash_attention, - norm_type=cf.norm_type)) - self.ae_adapter.append( MLP( cf.ae_global_dim_embed, cf.ae_global_dim_embed, - with_residual=True, dropout_rate=cf.ae_adapter_dropout_rate, - norm_type=cf.norm_type )) - self.ae_adapter.append( MultiCrossAttentionHead_Varlen_SlicedQ( cf.ae_global_dim_embed, cf.ae_local_dim_embed, - num_slices_q=cf.ae_local_num_queries, - dim_head_proj=cf.ae_adapter_embed, - num_heads=cf.ae_adapter_num_heads, - with_residual=cf.ae_adapter_with_residual, - with_qk_lnorm=cf.ae_adapter_with_qk_lnorm, - dropout_rate=cf.ae_adapter_dropout_rate, - with_flash=cf.with_flash_attention, - norm_type=cf.norm_type)) - - # learnable queries - if cf.ae_local_queries_per_cell : - s = (self.num_healpix_cells, cf.ae_local_num_queries, cf.ae_global_dim_embed) - q_cells = torch.rand( s, requires_grad=True) / cf.ae_global_dim_embed - # add meta data - q_cells[:,:,-8:-6] = (torch.arange( self.num_healpix_cells) / self.num_healpix_cells).unsqueeze(1).unsqueeze(1).repeat( (1,cf.ae_local_num_queries,2)) - theta, phi = healpy.pix2ang( nside=2**self.healpix_level, ipix=torch.arange( self.num_healpix_cells) ) - q_cells[:,:,-6:-3] = torch.cos(theta).unsqueeze(1).unsqueeze(1).repeat( (1,cf.ae_local_num_queries,3)) - q_cells[:,:,-3:] = torch.sin(phi).unsqueeze(1).unsqueeze(1).repeat( (1,cf.ae_local_num_queries,3)) - q_cells[:,:,-9] = torch.arange( cf.ae_local_num_queries) - q_cells[:,:,-10] = torch.arange( cf.ae_local_num_queries) - else : - s = (1, cf.ae_local_num_queries, cf.ae_global_dim_embed) - q_cells = torch.rand( s, requires_grad=True) / cf.ae_global_dim_embed - self.q_cells = torch.nn.Parameter( q_cells, requires_grad=True) - - ############## - # global assimilation engine - global_rate = int( 1 / cf.ae_global_att_dense_rate) - self.ae_global_blocks = torch.nn.ModuleList() - for i in range( cf.ae_global_num_blocks) : - # alternate between local and global attention as controlled by cf.ae_global_att_dense_rate - # last block is always global attention - # if (i % global_rate == 0 and i>0) or i+1 == cf.ae_global_num_blocks : - if i % global_rate == 0 or i+1 == cf.ae_global_num_blocks : - self.ae_global_blocks.append( MultiSelfAttentionHead( cf.ae_global_dim_embed, - num_heads=cf.ae_global_num_heads, - dropout_rate=cf.ae_global_dropout_rate, - with_qk_lnorm=cf.ae_global_with_qk_lnorm, - with_flash=cf.with_flash_attention, - norm_type=cf.norm_type)) - else : - self.ae_global_blocks.append( MultiSelfAttentionHead_Local( cf.ae_global_dim_embed, - num_heads=cf.ae_global_num_heads, - qkv_len=self.num_healpix_cells*cf.ae_local_num_queries, - block_factor=cf.ae_global_block_factor, - dropout_rate=cf.ae_global_dropout_rate, - with_qk_lnorm=cf.ae_global_with_qk_lnorm, - with_flash=cf.with_flash_attention, - norm_type=cf.norm_type)) - # MLP block - self.ae_global_blocks.append( MLP( cf.ae_global_dim_embed, cf.ae_global_dim_embed, - with_residual=True, dropout_rate=cf.ae_global_dropout_rate, - hidden_factor=cf.ae_global_mlp_hidden_factor, - norm_type=cf.norm_type)) - - ############### - # forecasting engine - - global_rate = int( 1 / cf.forecast_att_dense_rate) - self.fe_blocks = torch.nn.ModuleList() - if cf.forecast_policy is not None : - for i in range( cf.fe_num_blocks) : - if (i % global_rate == 0 and i>0) or i+1 == cf.ae_global_num_blocks : - self.fe_blocks.append( MultiSelfAttentionHead( cf.ae_global_dim_embed, - num_heads=cf.fe_num_heads, - dropout_rate=cf.fe_dropout_rate, - with_qk_lnorm=cf.fe_with_qk_lnorm, - with_flash=cf.with_flash_attention, - norm_type=cf.norm_type, dim_aux=1)) - else : - self.fe_blocks.append( MultiSelfAttentionHead_Local( cf.ae_global_dim_embed, - num_heads=cf.fe_num_heads, - qkv_len=self.num_healpix_cells*cf.ae_local_num_queries, - block_factor=cf.ae_global_block_factor, - dropout_rate=cf.fe_dropout_rate, - with_qk_lnorm=cf.fe_with_qk_lnorm, - with_flash=cf.with_flash_attention, - norm_type=cf.norm_type, dim_aux=1)) - self.fe_blocks.append( MLP( cf.ae_global_dim_embed, cf.ae_global_dim_embed, - with_residual=True, dropout_rate=cf.fe_dropout_rate, - norm_type=cf.norm_type, dim_aux=1)) - - ############### - - # embed coordinates yielding one query token for each target token - dropout_rate = 0.1 - self.embed_target_coords = torch.nn.ModuleList() - self.target_token_engines = torch.nn.ModuleList() - self.pred_adapter_kv = torch.nn.ModuleList() - self.pred_heads = torch.nn.ModuleList() - - for i_obs, si in enumerate( cf.streams) : - - # extract and setup relevant parameters - etc = si['embed_target_coords'] - tro_type = si['target_readout']['type'] if 'type' in si['target_readout'] else 'token' - dim_embed = si['embed_target_coords']['dim_embed'] - dim_out = max( dim_embed, si['token_size']*(self.num_channels[i_obs][0]-self.geoinfo_sizes[i_obs])) - tr = si['target_readout'] - num_layers = tr['num_layers'] - tr_mlp_hidden_factor = tr['mlp_hidden_factor'] if 'mlp_hidden_factor' in tr else 2 - tr_dim_head_proj = tr['dim_head_proj'] if 'dim_head_proj' in tr else None - softcap = tr['softcap'] if 'softcap' in tr else 0. - n_chs = self.num_channels[i_obs] - - if tro_type == 'obs_value' : - # fixed dimension for obs_value type - dims_embed = [si['embed_target_coords']['dim_embed'] for _ in range(num_layers+1)] - else : - if cf.pred_dyadic_dims : - coord_dim = self.geoinfo_sizes[i_obs]*si['token_size'] - dims_embed = torch.tensor([dim_out//2**i for i in range( num_layers-1, -1, -1)] + [dim_out]) - dims_embed[dims_embed < coord_dim] = dims_embed[ torch.where( dims_embed >= coord_dim)[0][0] ] - dims_embed = dims_embed.tolist() - else : - dims_embed = torch.linspace( dim_embed, dim_out, num_layers+1, dtype=torch.int32).tolist() - - logger.info( '{} :: coord embed: :: {}'.format( si['name'], dims_embed)) - - dim_coord_in = ((self.geoinfo_sizes[i_obs]-2)+(5*(3*5))+3*8) * (1 if tro_type == 'obs_value' else si['token_size']) - dim_pred = (n_chs[0]-self.geoinfo_sizes[i_obs]) * (1 if tro_type=='obs_value' else si['token_size']) - - # embedding network for coordinates - if etc['net'] == 'linear' : - self.embed_target_coords.append( torch.nn.Linear( dim_coord_in, dims_embed[0])) - elif etc['net'] == 'mlp' : - self.embed_target_coords.append( MLP( dim_coord_in, dims_embed[0], - hidden_factor = 8, with_residual=False, - dropout_rate=dropout_rate)) - else : - assert False - - # obs-specific adapter for tokens - if cf.pred_adapter_kv : - self.pred_adapter_kv.append( MLP( cf.ae_global_dim_embed, cf.ae_global_dim_embed, - hidden_factor = 2, with_residual=True, - dropout_rate=dropout_rate, norm_type=cf.norm_type)) - else : - self.pred_adapter_kv.append( torch.nn.Identity()) - - # target prediction engines - tte = torch.nn.ModuleList() - for i in range( num_layers) : - tte.append( MultiCrossAttentionHead_Varlen( dims_embed[i], cf.ae_global_dim_embed, - si['target_readout']['num_heads'], - dim_head_proj=tr_dim_head_proj, - with_residual=True, - with_qk_lnorm=True, - dropout_rate=dropout_rate, - with_flash=cf.with_flash_attention, - norm_type=cf.norm_type, - softcap=softcap, - dim_aux=dim_coord_in)) - if cf.pred_self_attention : - tte.append( MultiSelfAttentionHead_Varlen( dims_embed[i], - num_heads=si['target_readout']['num_heads'], - dropout_rate=dropout_rate, - with_qk_lnorm=True, - with_flash=cf.with_flash_attention, - norm_type=cf.norm_type, - dim_aux=dim_coord_in)) - tte.append( MLP( dims_embed[i], dims_embed[i+1], - with_residual=(True if cf.pred_dyadic_dims or tro_type=='obs_value' else False), - hidden_factor=tr_mlp_hidden_factor, - dropout_rate=dropout_rate, norm_type=cf.norm_type, - dim_aux = (dim_coord_in if cf.pred_mlp_adaln else None) )) - self.target_token_engines.append( tte) - - # ensemble prediction heads to provide probabilistic prediction - self.pred_heads.append( EnsPredictionHead( dims_embed[-1], dim_pred, - si['pred_head']['num_layers'], si['pred_head']['ens_size'], - norm_type=cf.norm_type)) - - return self - - ######################################### - def freeze_weights_forecast( self): - '''Freeze model weights''' - - # freeze everything - for p in self.parameters() : - p.requires_grad = False - self.q_cells.requires_grad = False - - # unfreeze forecast part - for p in self.fe_blocks.parameters() : - p.requires_grad = True - - return self - - ######################################### - def print_num_parameters( self) : - - cf = self.cf - num_params_embed = [get_num_parameters( embed) for embed in self.embeds] - num_params_total = get_num_parameters( self) - num_params_ae_local = get_num_parameters( self.ae_local_blocks) - num_params_ae_global = get_num_parameters( self.ae_global_blocks) - - num_params_q_cells = np.prod(self.q_cells.shape) if self.q_cells.requires_grad else 0 - num_params_ae_adapater = get_num_parameters( self.ae_adapter) - - num_params_fe = get_num_parameters( self.fe_blocks) - - num_params_pred_adapter = [get_num_parameters( kv) for kv in self.pred_adapter_kv] - num_params_embed_tcs = [get_num_parameters( etc) for etc in self.embed_target_coords] - num_params_tte = [get_num_parameters( tte) for tte in self.target_token_engines] - num_params_preds = [get_num_parameters(head) for head in self.pred_heads] - - print( '-----------------') - print( f'Total number of trainable parameters: {num_params_total:,}') - print( 'Number of parameters:') - print( ' Embedding networks:') - [print(' {} : {:,}'.format(si['name'],np)) for si,np in zip(cf.streams,num_params_embed)] - print( f' Local assimilation engine: {num_params_ae_local:,}') - print( f' Local-global adapter: {num_params_ae_adapater:,}') - print( f' Learnable queries: {num_params_q_cells:,}') - print( f' Global assimilation engine: {num_params_ae_global:,}') - print( f' Forecast engine: {num_params_fe:,}') - print( ' kv-adapter, coordinate embedding, prediction networks and prediction heads:') - zps=zip(cf.streams,num_params_pred_adapter,num_params_embed_tcs,num_params_tte,num_params_preds) - [print(' {} : {:,} / {:,} / {:,} / {:,}'.format(si['name'],np0,np1,np2,np3)) - for si,np0,np1,np2,np3 in zps] - print( '-----------------') - - ######################################### - def load( self, run_id, epoch = None) : - - path_run = './models/' + run_id + '/' - fname = path_run + f'{run_id}' - fname += '_epoch{:05d}.chkpt'.format( epoch) if epoch is not None else '_latest.chkpt' - - params = torch.load( fname, map_location=torch.device('cpu'), weights_only=True) - params_renamed = {} - for k in params.keys() : - params_renamed[k.replace( 'module.', '')] = params[k] - mkeys, ukeys = self.load_state_dict( params_renamed, strict=False) - # mkeys, ukeys = self.load_state_dict( params, strict=False) - - if len(mkeys) > 0 : - logger.warning( f'Missing keys when loading model: {mkeys}') - - if len(ukeys) > 0 : - logger.warning( f'Unused keys when loading model: {mkeys}') - - ######################################### - def forward_jac( self, *args) : - - sources = args[:-1] - sources_lens = args[-1] - # no-op when satisfied but needed for Jacobian - sources_lens = sources_lens.to(torch.int64).cpu() - - preds_all = self.forward( sources, sources_lens) - - return tuple(preds_all[0]) - - ######################################### - def forward( self, model_params, source_tokens_cells, source_tokens_lens, source_centroids, source_cell_lens, - source_idxs_embed, target_coords, target_coords_lens, target_coords_idxs, - num_time_steps) : - - batch_size = self.cf.batch_size if self.training else self.cf.batch_size_validation - assert len(source_tokens_cells) == batch_size - - # embed - tokens = self.embed_cells( model_params, source_tokens_cells, source_tokens_lens, source_centroids, source_idxs_embed) - - # local assimilation engine and adapter - tokens = self.assimilate_local( model_params, tokens, source_cell_lens) - - tokens = self.assimilate_global( model_params, tokens) - - # roll-out in latent space - preds_all = [] - for it in range( num_time_steps ) : - - # prediction - preds_all += [ self.predict( model_params, it, tokens, - target_coords, target_coords_lens, target_coords_idxs) ] - - tokens = self.forecast( model_params, tokens) - - # prediction for final step - preds_all += [ self.predict( model_params, num_time_steps, tokens, - target_coords, target_coords_lens, target_coords_idxs) ] - - return preds_all - - ######################################### - def embed_cells( self, model_params, source_tokens_cells, source_tokens_lens, source_centroids, source_idxs_embed) : - - cat = torch.cat - - offsets_base = source_tokens_lens.sum(1).sum(0).cumsum(0) - tokens_all = torch.empty( (int(offsets_base[-1]), self.cf.ae_local_dim_embed), - dtype=torch.float16, device='cuda') - - for ib, sb in enumerate(source_tokens_cells) : - for itype, (s,embed) in enumerate( zip(sb,self.embeds)) : - if s.shape[0]>0 : - - idxs = source_idxs_embed[0][ib][itype] - idxs_pe = source_idxs_embed[1][ib][itype] - # create full scatter index (there's no broadcasting which is likely highly inefficient) - idxs = idxs.repeat( (1,self.cf.ae_local_dim_embed)) - x_embed = embed( s, source_centroids[ib][itype]).flatten(0,1) - # x_embed = torch.cat( [embed( s_c, c_c).flatten(0,1) - # for s_c,c_c in zip( torch.split( s, 49152), - # torch.split( source_centroids[ib][itype], 49152))]) - tokens_all.scatter_( 0, idxs, x_embed + model_params.pe_embed[idxs_pe]) - return tokens_all +from weathergen.common.config import Config +from weathergen.model.engines import ( + EmbeddingEngine, + EnsPredictionHead, + ForecastingEngine, + GlobalAssimilationEngine, + Local2GlobalAssimilationEngine, + LocalAssimilationEngine, + TargetPredictionEngine, + TargetPredictionEngineClassic, +) +from weathergen.model.layers import MLP, NamedLinear +from weathergen.model.parametrised_prob_dist import LatentInterpolator +from weathergen.model.utils import get_num_parameters +from weathergen.utils.distributed import is_root +from weathergen.utils.utils import get_dtype + +logger = logging.getLogger(__name__) + + +class ModelParams(torch.nn.Module): + """Creation of query and embedding parameters of the model.""" + + def __init__(self, cf) -> None: + super(ModelParams, self).__init__() + + self.cf = cf + + self.healpix_level = cf.healpix_level + self.num_healpix_cells = 12 * 4**cf.healpix_level + self.dtype = get_dtype(cf.attention_dtype) + + bs = cf.batch_size_per_gpu + nqs = 9 + s = [bs, self.num_healpix_cells, cf.ae_local_num_queries, cf.ae_global_dim_embed] + + ### POSITIONAL EMBEDDINGS ### + len_token_seq = 1024 + self.pe_embed = torch.nn.Parameter( + torch.zeros(len_token_seq, cf.ae_local_dim_embed, dtype=self.dtype), requires_grad=False + ) + + pe = torch.zeros( + self.num_healpix_cells, + cf.ae_local_num_queries, + cf.ae_global_dim_embed, + dtype=self.dtype, + ) + self.pe_global = torch.nn.Parameter(pe, requires_grad=False) + + ### HEALPIX NEIGHBOURS ### + hlc = self.healpix_level + with warnings.catch_warnings(action="ignore"): + temp = hp.neighbours( + np.arange(self.num_healpix_cells), 2**hlc, order="nested" + ).transpose() + # fix missing nbors with references to self + for i, row in enumerate(temp): + temp[i][row == -1] = i + self.hp_nbours = torch.nn.Parameter( + torch.empty((temp.shape[0], (temp.shape[1] + 1)), dtype=torch.int32), + requires_grad=False, + ) + # self.hp_nbours = torch.empty((temp.shape[0], (temp.shape[1] + 1)), dtype=torch.int32) + + if cf.target_cell_local_prediction: + tokens_lens_value = nqs * s[2] + else: + tokens_lens_value = nqs * s[1] * s[2] + self.tokens_lens = torch.nn.Parameter( + tokens_lens_value * torch.ones(bs * s[1] + 1, dtype=torch.int32), requires_grad=False + ) + self.tokens_lens.data[0] = 0 + + self.q_cells_lens = torch.nn.Parameter( + torch.ones(self.num_healpix_cells + 1, dtype=torch.int32), requires_grad=False + ) + self.q_cells_lens.data[0] = 0 + + def create(self, cf: Config) -> "ModelParams": + self.reset_parameters(cf) + return self + + def reset_parameters(self, cf: Config) -> "ModelParams": + """Creates positional embedding for each grid point for each stream used after stream + embedding, positional embedding for all stream assimilated cell-level local embedding, + initializing queries for local-to-global adapters, HEALPix neighbourhood based parameter + initializing for target prediction. + + Sinusoidal positional encoding: Harmonic positional encoding based upon sine and cosine for + both per stream after stream embedding and per cell level for local assimilation. + + HEALPix neighbourhood structure: Determine the neighbors for each cell and initialize each + with its own cell number as well as the cell numbers of its neighbors. If a cell has + fewer than eight neighbors, use its own cell number to fill the remaining slots. + + Query len based parameter creation: Calculate parameters for the calculated token length at + each cell after local assimilation. + + Args: + cf : Configuration + """ + + # positional encodings + + dim_embed = cf.ae_local_dim_embed + len_token_seq = 1024 + self.pe_embed.data.fill_(0.0) + position = torch.arange(0, len_token_seq, device=self.pe_embed.device).unsqueeze(1) + div = torch.exp( + torch.arange(0, dim_embed, 2, device=self.pe_embed.device) + * -(math.log(len_token_seq) / dim_embed), + ) + self.pe_embed.data[:, 0::2] = torch.sin(position * div[: self.pe_embed[:, 0::2].shape[1]]) + self.pe_embed.data[:, 1::2] = torch.cos(position * div[: self.pe_embed[:, 1::2].shape[1]]) + + dim_embed = cf.ae_global_dim_embed + self.pe_global.data.fill_(0.0) + xs = 2.0 * np.pi * torch.arange(0, dim_embed, 2, device=self.pe_global.device) / dim_embed + self.pe_global.data[..., 0::2] = 0.5 * torch.sin( + torch.outer(8 * torch.arange(cf.ae_local_num_queries, device=self.pe_global.device), xs) + ) + self.pe_global.data[..., 0::2] += ( + torch.sin( + torch.outer(torch.arange(self.num_healpix_cells, device=self.pe_global.device), xs) + ) + .unsqueeze(1) + .repeat((1, cf.ae_local_num_queries, 1)) + ) + self.pe_global.data[..., 1::2] = 0.5 * torch.cos( + torch.outer(8 * torch.arange(cf.ae_local_num_queries, device=self.pe_global.device), xs) + ) + self.pe_global.data[..., 1::2] += ( + torch.cos( + torch.outer(torch.arange(self.num_healpix_cells, device=self.pe_global.device), xs) + ) + .unsqueeze(1) + .repeat((1, cf.ae_local_num_queries, 1)) + ) + + # healpix neighborhood structure + + hlc = self.healpix_level + num_healpix_cells = self.num_healpix_cells + with warnings.catch_warnings(action="ignore"): + temp = hp.neighbours(np.arange(num_healpix_cells), 2**hlc, order="nested").transpose() + # fix missing nbors with references to self + for i, row in enumerate(temp): + temp[i][row == -1] = i + # nbors *and* self + self.hp_nbours.data[:, 0] = torch.arange(temp.shape[0], device=self.hp_nbours.device) + self.hp_nbours.data[:, 1:] = torch.from_numpy(temp).to(self.hp_nbours.device) + + # varlen index set for tokens + assert cf.batch_size_per_gpu == cf.batch_size_validation_per_gpu + bs = cf.batch_size_per_gpu + nqs = 9 + s = [bs, self.num_healpix_cells, cf.ae_local_num_queries, cf.ae_global_dim_embed] + if cf.target_cell_local_prediction: + tokens_lens_value = nqs * s[2] + else: + tokens_lens_value = nqs * s[1] * s[2] + self.tokens_lens.data.fill_(tokens_lens_value) + self.tokens_lens.data[0] = 0 + + # precompute for varlen attention + self.q_cells_lens.data.fill_(1) + self.q_cells_lens.data[0] = 0 + + # ensure all params have grad set to False + + return - ######################################### - def assimilate_local( self, model_params, tokens, cell_lens) : - batch_size = self.cf.batch_size if self.training else self.cf.batch_size_validation - - s = self.q_cells.shape - # print( f'{np.prod(np.array(tokens.shape))} :: {np.prod(np.array(s))}' - # + ':: {np.prod(np.array(tokens.shape))/np.prod(np.array(s))}') - # TODO: test if positional encoding is needed here - if self.cf.ae_local_queries_per_cell : - tokens_global = (self.q_cells + model_params.pe_global).repeat( batch_size, 1, 1) - else : - tokens_global = self.q_cells.repeat( self.num_healpix_cells, 1, 1) + model_params.pe_global - q_cells_lens = torch.cat( [model_params.q_cells_lens[0].unsqueeze(0)] + [model_params.q_cells_lens[1:] - for _ in range(batch_size)] ) - - # # local assimilation model - # for block in self.ae_local_blocks : - # tokens = checkpoint( block, tokens, cell_lens, use_reentrant=False) - - # for block in self.ae_adapter : - # tokens_global = checkpoint( block, tokens_global, tokens, q_cells_lens, cell_lens, use_reentrant=False) - - # work around to bug in flash attention for hl>=5 - - cell_lens = cell_lens[1:] - clen = self.num_healpix_cells // (2 if self.cf.healpix_level<=5 else 8) - tokens_global_all = [] - zero_pad = torch.zeros( 1, device='cuda', dtype=torch.int32) - for i in range( ((cell_lens.shape[0]) // clen)) : - - # make sure we properly catch all elements in last chunk - i_end = (i+1)*clen if i < (cell_lens.shape[0] // clen)-1 else cell_lens.shape[0] - l0, l1 = (0 if i==0 else cell_lens[:i*clen].cumsum(0)[-1]), cell_lens[:i_end].cumsum(0)[-1] - - tokens_c = tokens[l0:l1] - tokens_global_c = tokens_global[ i*clen : i_end ] - cell_lens_c = torch.cat( [ zero_pad, cell_lens[ i*clen : i_end ] ]) - q_cells_lens_c = q_cells_lens[ : cell_lens_c.shape[0] ] - - if l0 == l1 or tokens_c.shape[0]==0: - tokens_global_all += [ tokens_global_c ] - continue - - for block in self.ae_local_blocks : - tokens_c = checkpoint( block, tokens_c, cell_lens_c, use_reentrant=False) - - for block in self.ae_adapter : - tokens_global_c = checkpoint( block, tokens_global_c, tokens_c, q_cells_lens_c, cell_lens_c, - use_reentrant=False) - - tokens_global_all += [tokens_global_c] - - tokens_global = torch.cat( tokens_global_all) - - # recover batch dimension and build global token list - tokens_global = (tokens_global.reshape( [batch_size, self.num_healpix_cells, s[-2], s[-1]]) + model_params.pe_global).flatten(1,2) - - return tokens_global - - ######################################### - # @torch.compile - def assimilate_global( self, model_params, tokens) : - - # global assimilation engine and adapter - for block in self.ae_global_blocks : - tokens = checkpoint( block, tokens, use_reentrant=False) - - return tokens - - ######################################### - # @torch.compile - def forecast( self, model_params, tokens) : - - for it, block in enumerate(self.fe_blocks) : - aux_info = torch.tensor([it], dtype=torch.float32, device='cuda') - tokens = checkpoint( block, tokens, aux_info, use_reentrant=False) - - return tokens - - ######################################### - def predict( self, model_params, fstep, tokens, tcs, target_coords_lens, target_coords_idxs) : - - fp32, i32 = torch.float32, torch.int32 - batch_size = self.cf.batch_size if self.training else self.cf.batch_size_validation - - s = [batch_size, self.num_healpix_cells, self.cf.ae_local_num_queries, tokens.shape[-1]] - tokens_stream = (tokens.reshape( s) + model_params.pe_global).flatten(0,1) - tokens_stream = tokens_stream[ model_params.hp_nbours.flatten() ].flatten(0,1) - - # pair with tokens from assimilation engine to obtain target tokens - preds_tokens = [] - for ii, (tte, tte_kv) in enumerate( zip( self.target_token_engines, self.pred_adapter_kv)) : - - si = self.cf.streams[ii] - tro_type = si['target_readout']['type'] if 'type' in si['target_readout'] else 'token' - tc_embed = self.embed_target_coords[ii] - - assert batch_size == 1 - - # embed token coords, concatenating along batch dimension (which is taking care of through - # the varlen attention) - if tro_type == 'obs_value' : - tc_tokens = torch.cat([checkpoint( tc_embed, tcs[fstep][i_b][ii], use_reentrant=False) - if len(tcs[fstep][i_b][ii].shape)>1 else tcs[fstep][i_b][ii] - for i_b in range(len(tcs[fstep]))]) - elif tro_type == 'token' : - tc_tokens = torch.cat( [checkpoint( tc_embed, tcs[fstep][i_b][ii].transpose(-2,-1).flatten(-2,-1), - use_reentrant=False) - if len(tcs[fstep][i_b][ii].shape)>1 else tcs[fstep][i_b][ii] - for i_b in range(len(tcs[fstep]))]) - else : - assert False - - if torch.isnan(tc_tokens).any() : - nn = si['name'] - logger.warning( f'Skipping prediction for {nn} because of {torch.isnan(tc_tokens).sum()} NaN in tc_tokens.') - preds_tokens += [ torch.tensor( [], device=tc_tokens.device) ] - continue - if tc_tokens.shape[0] == 0 : - preds_tokens += [ torch.tensor( [], device=tc_tokens.device) ] - continue - - # TODO: how to support tte_kv efficiently, generate 1-ring neighborhoods here or on a per - # stream basis - assert type(tte_kv) == torch.nn.Identity - - tcs_lens = target_coords_idxs[0][fstep][ii] - # add per-cell positional encoding - tc_tokens += model_params.pe_tc_tokens[ target_coords_idxs[1][fstep][ii] , : tc_tokens.shape[1] ] - - # coord information for learnable layer norm - tcs_aux = torch.cat( [tcs[fstep][i_b][ii] for i_b in range(len(tcs[0]))]) - - # apply prediction engine - for ib, block in enumerate(tte) : - if self.cf.pred_self_attention and ib % 3 == 1 : - tc_tokens = checkpoint( block, tc_tokens, tcs_lens, tcs_aux, use_reentrant=False) - else : - tc_tokens = checkpoint( block, tc_tokens, tokens_stream, - tcs_lens, model_params.tokens_lens, tcs_aux, - use_reentrant=False) - - # final prediction head to map back to physical space - preds_tokens += [ checkpoint( self.pred_heads[ii], tc_tokens, use_reentrant=False) ] - - return preds_tokens +#################################################################################################### +class Model(torch.nn.Module): + """WeatherGenerator model architecture + + WeatherGenerator consists of the following components: + + embeds: embedding networks: Stream specific embedding networks. + + ae_local_blocks: Local assimilation engine: transformer based network to combine different input + streams per healpix cell. + + ae_adapter: Assimilation engine adapter: Adapter to transform local assimilation engine + information to the global assimilation engine. + + ae_global_blocks: Global assimilation engine: Transformer network alternating between local and + global attention based upon global attention density rate. + + fe_blocks: Forecasting engine: Transformer network using the output of global attention to + advance the latent representation in time. + + embed_target_coords: Embedding networks for coordinates: Initializes embedding networks tailored + for metadata embedded target coordinates. The architecture is either a linear layer or a + multi-layer perceptron, determined by the configuration of the embedding target coordinate + networks. + + pred_adapter_kv: Prediction adapter: Adapter to transform the global assimilation/forecasting + engine output to the prediction engine. Uses an MLP if `cf.pred_adapter_kv` is True, + otherwise it uses an identity function. + + target_token_engines: Prediction engine: Transformer based prediction network that generates + output corresponding to target coordinates. + + pred_heads: Prediction head: Final layers using target token engines output for mapping target + coordinates to its physical space. + """ + + ######################################### + def __init__(self, cf: Config, sources_size, targets_num_channels, targets_coords_size): + """ + Args: + cf : Configuration with model parameters + sources_size : List of number of channels for models + targets_num_channels : List with size of each output sample for coordinates target + embedding + targets_coords_size : List with size of each input sample for coordinates target + embedding + """ + super(Model, self).__init__() + + self.healpix_level = cf.healpix_level + self.num_healpix_cells = 12 * 4**self.healpix_level + + self.cf = cf + self.dtype = get_dtype(self.cf.attention_dtype) + self.sources_size = sources_size + self.targets_num_channels = targets_num_channels + self.targets_coords_size = targets_coords_size + + ######################################### + def create(self) -> "Model": + """Create each individual module of the model""" + cf = self.cf + + # separate embedding networks for differnt observation types + self.embed_engine = EmbeddingEngine(cf, self.sources_size) + + ############## + # local assimilation engine + self.ae_local_engine = LocalAssimilationEngine(cf) + + if cf.latent_noise_kl_weight > 0.0: + self.interpolate_latents = LatentInterpolator( + gamma=cf.latent_noise_gamma, + dim=cf.ae_local_dim_embed, + use_additive_noise=cf.latent_noise_use_additive_noise, + deterministic=cf.latent_noise_deterministic_latents, + ) + + ############## + # local -> global assimilation engine adapter + self.ae_local_global_engine = Local2GlobalAssimilationEngine(cf) + + ############## + # learnable queries + if cf.ae_local_queries_per_cell: + s = (self.num_healpix_cells, cf.ae_local_num_queries, cf.ae_global_dim_embed) + q_cells = torch.rand(s, requires_grad=True) / cf.ae_global_dim_embed + # add meta data + q_cells[:, :, -8:-6] = ( + (torch.arange(self.num_healpix_cells) / self.num_healpix_cells) + .unsqueeze(1) + .unsqueeze(1) + .repeat((1, cf.ae_local_num_queries, 2)) + ) + theta, phi = healpy.pix2ang( + nside=2**self.healpix_level, ipix=torch.arange(self.num_healpix_cells) + ) + q_cells[:, :, -6:-3] = ( + torch.cos(theta).unsqueeze(1).unsqueeze(1).repeat((1, cf.ae_local_num_queries, 3)) + ) + q_cells[:, :, -3:] = ( + torch.sin(phi).unsqueeze(1).unsqueeze(1).repeat((1, cf.ae_local_num_queries, 3)) + ) + q_cells[:, :, -9] = torch.arange(cf.ae_local_num_queries) + q_cells[:, :, -10] = torch.arange(cf.ae_local_num_queries) + else: + s = (1, cf.ae_local_num_queries, cf.ae_global_dim_embed) + q_cells = torch.rand(s, requires_grad=True) / cf.ae_global_dim_embed + self.q_cells = torch.nn.Parameter(q_cells, requires_grad=True) + + ############## + # global assimilation engine + self.ae_global_engine = GlobalAssimilationEngine(cf, self.num_healpix_cells) + + ############### + # forecasting engine + if isinstance(cf.forecast_steps, int): + assert not (cf.forecast_steps > 0 and cf.fe_num_blocks == 0), ( + "Empty forecast engine (fe_num_blocks = 0), but forecast_steps > 0" + ) + else: + assert not (min(cf.forecast_steps) > 0 and cf.fe_num_blocks == 0), ( + "Empty forecast engine (fe_num_blocks = 0), but forecast_steps[i] > 0 for some i" + ) + + self.forecast_engine = ForecastingEngine(cf, self.num_healpix_cells) + + ############### + # embed coordinates yielding one query token for each target token + dropout_rate = cf.embed_dropout_rate + self.embed_target_coords = torch.nn.ModuleList() + self.target_token_engines = torch.nn.ModuleList() + self.pred_adapter_kv = torch.nn.ModuleList() + self.pred_heads = torch.nn.ModuleList() + + for i_obs, si in enumerate(cf.streams): + stream_name = si.get("name", i_obs) + + # extract and setup relevant parameters + etc = si["embed_target_coords"] + tro_type = si["target_readout"]["type"] if "type" in si["target_readout"] else "token" + dim_embed = si["embed_target_coords"]["dim_embed"] + dim_out = max( + dim_embed, + si["token_size"] * self.targets_num_channels[i_obs], + ) + tr = si["target_readout"] + num_layers = tr["num_layers"] + tr_mlp_hidden_factor = tr["mlp_hidden_factor"] if "mlp_hidden_factor" in tr else 2 + tr_dim_head_proj = tr["dim_head_proj"] if "dim_head_proj" in tr else None + softcap = tr["softcap"] if "softcap" in tr else 0.0 + + if tro_type == "obs_value": + # fixed dimension for obs_value type + dims_embed = [si["embed_target_coords"]["dim_embed"] for _ in range(num_layers + 1)] + else: + if cf.pred_dyadic_dims: + coord_dim = self.geoinfo_sizes[i_obs] * si["token_size"] + dims_embed = torch.tensor( + [dim_out // 2**i for i in range(num_layers - 1, -1, -1)] + [dim_out] + ) + dims_embed[dims_embed < coord_dim] = dims_embed[ + torch.where(dims_embed >= coord_dim)[0][0] + ] + dims_embed = dims_embed.tolist() + else: + dims_embed = torch.linspace( + dim_embed, dim_out, num_layers + 1, dtype=torch.int32 + ).tolist() + + if is_root(): + logger.info("{} :: coord embed: :: {}".format(si["name"], dims_embed)) + + dim_coord_in = self.targets_coords_size[i_obs] + + # embedding network for coordinates + if etc["net"] == "linear": + self.embed_target_coords.append( + NamedLinear( + f"embed_target_coords_{stream_name}", + in_features=dim_coord_in, + out_features=dims_embed[0], + bias=False, + ) + ) + elif etc["net"] == "mlp": + self.embed_target_coords.append( + MLP( + dim_coord_in, + dims_embed[0], + hidden_factor=8, + with_residual=False, + dropout_rate=dropout_rate, + norm_eps=self.cf.mlp_norm_eps, + stream_name=f"embed_target_coords_{stream_name}", + ) + ) + else: + assert False + + # obs-specific adapter for tokens + if cf.pred_adapter_kv: + self.pred_adapter_kv.append( + MLP( + cf.ae_global_dim_embed, + cf.ae_global_dim_embed, + hidden_factor=2, + with_residual=True, + dropout_rate=dropout_rate, + norm_type=cf.norm_type, + norm_eps=self.cf.mlp_norm_eps, + stream_name=f"pred_adapter_kv_{stream_name}", + ) + ) + else: + self.pred_adapter_kv.append(torch.nn.Identity()) + + # target prediction engines + tte_version = ( + TargetPredictionEngine + if cf.decoder_type != "PerceiverIOCoordConditioning" + else TargetPredictionEngineClassic + ) + tte = tte_version( + cf, + dims_embed, + dim_coord_in, + tr_dim_head_proj, + tr_mlp_hidden_factor, + softcap, + tro_type, + stream_name=stream_name, + ) + + self.target_token_engines.append(tte) + + # ensemble prediction heads to provide probabilistic prediction + final_activation = si["pred_head"].get("final_activation", "Identity") + if is_root(): + logger.debug( + f"{final_activation} activation of prediction head of {si['name']} stream" + ) + self.pred_heads.append( + EnsPredictionHead( + dims_embed[-1], + self.targets_num_channels[i_obs], + si["pred_head"]["num_layers"], + si["pred_head"]["ens_size"], + norm_type=cf.norm_type, + final_activation=final_activation, + stream_name=stream_name, + ) + ) + + return self + + def reset_parameters(self): + def _reset_params(module): + if isinstance(module, nn.Linear | nn.LayerNorm): + module.reset_parameters() + else: + pass + + self.apply(_reset_params) + + ######################################### + def print_num_parameters(self) -> None: + """Print number of parameters for entire model and each module used to build the model""" + + cf = self.cf + num_params_embed = [get_num_parameters(embed) for embed in self.embed_engine.embeds] + num_params_total = get_num_parameters(self) + num_params_ae_local = get_num_parameters(self.ae_local_engine.ae_local_blocks) + num_params_ae_global = get_num_parameters(self.ae_global_engine.ae_global_blocks) + + num_params_q_cells = np.prod(self.q_cells.shape) if self.q_cells.requires_grad else 0 + num_params_ae_adapater = get_num_parameters(self.ae_local_global_engine.ae_adapter) + + num_params_fe = get_num_parameters(self.forecast_engine.fe_blocks) + + num_params_pred_adapter = [get_num_parameters(kv) for kv in self.pred_adapter_kv] + num_params_embed_tcs = [get_num_parameters(etc) for etc in self.embed_target_coords] + num_params_tte = [get_num_parameters(tte) for tte in self.target_token_engines] + num_params_preds = [get_num_parameters(head) for head in self.pred_heads] + + print("-----------------") + print(f"Total number of trainable parameters: {num_params_total:,}") + print("Number of parameters:") + print(" Embedding networks:") + [ + print(" {} : {:,}".format(si["name"], np)) + for si, np in zip(cf.streams, num_params_embed, strict=False) + ] + print(f" Local assimilation engine: {num_params_ae_local:,}") + print(f" Local-global adapter: {num_params_ae_adapater:,}") + print(f" Learnable queries: {num_params_q_cells:,}") + print(f" Global assimilation engine: {num_params_ae_global:,}") + print(f" Forecast engine: {num_params_fe:,}") + print(" kv-adapter, coordinate embedding, prediction networks and prediction heads:") + zps = zip( + cf.streams, + num_params_pred_adapter, + num_params_embed_tcs, + num_params_tte, + num_params_preds, + strict=False, + ) + [ + print(" {} : {:,} / {:,} / {:,} / {:,}".format(si["name"], np0, np1, np2, np3)) + for si, np0, np1, np2, np3 in zps + ] + print("-----------------") + + ######################################### + def rename_old_state_dict(self, params: dict) -> dict: + """Checks if model from checkpoint is from the old model version and if so renames + the parameters accordingly to the new model version. + + Args: + params : Dictionary with (old) model parameters from checkpoint + Returns: + new_params : Dictionary with (renamed) model parameters + """ + params_cleanup = { + "embeds": "embed_engine.embeds", # EmbeddingEngine + "ae_local_blocks": "ae_local_engine.ae_local_blocks", # LocalAssimilationEngine + "ae_adapter": "ae_local_global_engine.ae_adapter", # Local2GlobalAssimilationEngine + "ae_global_blocks": "ae_global_engine.ae_global_blocks", # GlobalAssimilationEngine + "fe_blocks": "forecast_engine.fe_blocks", # ForecastingEngine + } + + new_params = {} + + for k, v in params.items(): + new_k = k + prefix = "" + + # Strip "module." (prefix for DataParallel or DistributedDataParallel) + if new_k.startswith("module."): + prefix = "module." + new_k = new_k[len(prefix) :] + + first_w, rest = new_k.split(".", 1) if "." in new_k else (new_k, "") + # Only check first word (root level modules) to avoid false matches. + if first_w in params_cleanup: + new_k = params_cleanup[first_w] + "." + rest + + new_k = prefix + new_k + new_params[new_k] = v + + return new_params + + ######################################### + def forward(self, model_params: ModelParams, batch, forecast_offset: int, forecast_steps: int): + """Performs the forward pass of the model to generate forecasts + + Tokens are processed through the model components, which were defined in the create method. + Args: + model_params : Query and embedding parameters + batch : + streams_data : Contains tokenized source data and target data for each dataset and + each stream + source_cell_lens : Used to identify range of tokens to use from generated tokens in + cell embedding + target_coords_idxs : Indices of target coordinates for each dataset. + forecast_offset : Starting index for iteration + forecast_steps : Number of forecast steps to calculate from forecast_offset + Returns: + A list containing all prediction results + """ + + (streams_data, source_cell_lens, target_coords_idxs) = batch + + # embed + tokens = self.embed_cells(model_params, streams_data) + + # local assimilation engine and adapter + tokens, posteriors = self.assimilate_local(model_params, tokens, source_cell_lens) + + tokens = self.assimilate_global(model_params, tokens) + + # roll-out in latent space + preds_all = [] + for fstep in range(forecast_offset, forecast_offset + forecast_steps): + # prediction + preds_all += [ + self.predict( + model_params, + fstep, + tokens, + streams_data, + target_coords_idxs, + ) + ] + + if self.training: + # Impute noise to the latent state + noise_std = self.cf.get("impute_latent_noise_std", 0.0) + if noise_std > 0.0: + tokens = tokens + torch.randn_like(tokens) * torch.norm(tokens) * noise_std + + tokens = self.forecast(model_params, tokens, fstep) + + # prediction for final step + preds_all += [ + self.predict( + model_params, + forecast_offset + forecast_steps, + tokens, + streams_data, + target_coords_idxs, + ) + ] + + return preds_all, posteriors + + ######################################### + def embed_cells(self, model_params: ModelParams, streams_data) -> torch.Tensor: + """Embeds input data for each stream separately and rearranges it to cell-wise order + Args: + model_params : Query and embedding parameters + streams_data : Used to initialize first tokens for pre-processing + Returns: + Tokens for local assimilation + """ + + device = next(self.parameters()).device + tokens_all = self.embed_engine(streams_data, model_params.pe_embed, self.dtype, device) + + return tokens_all + + ######################################### + def assimilate_local( + self, model_params: ModelParams, tokens: torch.Tensor, cell_lens: torch.Tensor + ) -> torch.Tensor: + """Processes embedded tokens locally and prepares them for the global assimilation + Args: + model_params : Query and embedding parameters + tokens : Input tokens to be processed by local assimilation + cell_lens : Used to identify range of tokens to use from generated tokens in cell + embedding + Returns: + Tokens for global assimilation + """ + + batch_size = ( + self.cf.batch_size_per_gpu if self.training else self.cf.batch_size_validation_per_gpu + ) + + s = self.q_cells.shape + # print( f'{np.prod(np.array(tokens.shape))} :: {np.prod(np.array(s))}' + # + ':: {np.prod(np.array(tokens.shape))/np.prod(np.array(s))}') + # TODO: test if positional encoding is needed here + if self.cf.ae_local_queries_per_cell: + tokens_global = (self.q_cells + model_params.pe_global).repeat(batch_size, 1, 1) + else: + tokens_global = ( + self.q_cells.repeat(self.num_healpix_cells, 1, 1) + model_params.pe_global + ) + q_cells_lens = torch.cat( + [model_params.q_cells_lens[0].unsqueeze(0)] + + [model_params.q_cells_lens[1:] for _ in range(batch_size)] + ) + + # local assimilation model + # for block in self.ae_local_blocks: + # tokens = checkpoint(block, tokens, cell_lens, use_reentrant=False) + + # if self.cf.latent_noise_kl_weight > 0.0: + # tokens, posteriors = self.interpolate_latents.interpolate_with_noise( + # tokens, sampling=self.training + # ) + # else: + # tokens, posteriors = tokens, 0.0 + + # for block in self.ae_adapter: + # tokens_global = checkpoint( + # block, + # tokens_global, + # tokens, + # q_cells_lens, + # cell_lens, + # use_reentrant=False, + # ) + + # work around to bug in flash attention for hl>=5 + + cell_lens = cell_lens[1:] + clen = self.num_healpix_cells // (2 if self.cf.healpix_level <= 5 else 8) + tokens_global_all = [] + posteriors = [] + zero_pad = torch.zeros(1, device=tokens.device, dtype=torch.int32) + for i in range((cell_lens.shape[0]) // clen): + # make sure we properly catch all elements in last chunk + i_end = (i + 1) * clen if i < (cell_lens.shape[0] // clen) - 1 else cell_lens.shape[0] + l0, l1 = ( + (0 if i == 0 else cell_lens[: i * clen].cumsum(0)[-1]), + cell_lens[:i_end].cumsum(0)[-1], + ) + + tokens_c = tokens[l0:l1] + tokens_global_c = tokens_global[i * clen : i_end] + cell_lens_c = torch.cat([zero_pad, cell_lens[i * clen : i_end]]) + q_cells_lens_c = q_cells_lens[: cell_lens_c.shape[0]] + + if l0 == l1 or tokens_c.shape[0] == 0: + tokens_global_all += [tokens_global_c] + continue + + # local assimilation model + tokens_c = self.ae_local_engine(tokens_c, cell_lens_c, use_reentrant=False) + + if self.cf.latent_noise_kl_weight > 0.0: + tokens_c, posteriors_c = self.interpolate_latents.interpolate_with_noise( + tokens_c, sampling=self.training + ) + posteriors += [posteriors_c] + else: + tokens_c, posteriors = tokens_c, 0.0 + + tokens_global_c = self.ae_local_global_engine( + tokens_c, tokens_global_c, q_cells_lens_c, cell_lens_c, use_reentrant=False + ) + + tokens_global_all += [tokens_global_c] + + tokens_global = torch.cat(tokens_global_all) + + # recover batch dimension and build global token list + tokens_global = ( + tokens_global.reshape([batch_size, self.num_healpix_cells, s[-2], s[-1]]) + + model_params.pe_global + ).flatten(1, 2) + + return tokens_global, posteriors + + ######################################### + def assimilate_global(self, model_params: ModelParams, tokens: torch.Tensor) -> torch.Tensor: + """Performs transformer based global assimilation in latent space + Args: + model_params : Query and embedding parameters (never used) + tokens : Input tokens to be pre-processed by global assimilation + Returns: + Latent representation of the model + """ + + # global assimilation engine and adapter + tokens = self.ae_global_engine(tokens, use_reentrant=False) + + return tokens + + ######################################### + def forecast(self, model_params: ModelParams, tokens: torch.Tensor, fstep: int) -> torch.Tensor: + """Advances latent space representation in time + + Args: + model_params : Query and embedding parameters (never used) + tokens : Input tokens to be processed by the model. + fstep: Current forecast step index (can be used as aux info). + Returns: + Processed tokens + Raises: + ValueError: For unexpected arguments in checkpoint method + """ + + tokens = self.forecast_engine(tokens, fstep) + + return tokens + + ######################################### + def predict( + self, + model_params: ModelParams, + fstep: int, + tokens: torch.Tensor, + streams_data, + target_coords_idxs, + ) -> list[torch.Tensor]: + """Predict outputs at the specific target coordinates based on the input weather state and + pre-training task and projects the latent space representation back to physical space. + + Args: + model_params : Query and embedding parameters + fstep : Number of forecast steps + tokens : Tokens from global assimilation engine + streams_data : Used to initialize target coordinates tokens and index information + target_coords_idxs : Indices of target coordinates + Returns: + Prediction output tokens in physical representation for each target_coords. + """ + + batch_size = ( + self.cf.batch_size_per_gpu if self.training else self.cf.batch_size_validation_per_gpu + ) + + s = [batch_size, self.num_healpix_cells, self.cf.ae_local_num_queries, tokens.shape[-1]] + tokens_stream = (tokens.reshape(s) + model_params.pe_global).flatten(0, 1) + tokens_stream = tokens_stream[model_params.hp_nbours.flatten()].flatten(0, 1) + + # pair with tokens from assimilation engine to obtain target tokens + preds_tokens = [] + for ii, (tte, tte_kv) in enumerate( + zip(self.target_token_engines, self.pred_adapter_kv, strict=False) + ): + si = self.cf.streams[ii] + tc_embed = self.embed_target_coords[ii] + + assert batch_size == 1 + + ## embed token coords, concatenating along batch dimension + # (which is taking care of through the varlen attention) + # arguably we should to the mixed precision policy when creating the model in FSDP + tc_tokens = torch.cat( + [ + checkpoint( + tc_embed, + streams_data[i_b][ii].target_coords[fstep], + use_reentrant=False, + ) + if len(streams_data[i_b][ii].target_coords[fstep].shape) > 1 + else streams_data[i_b][ii].target_coords[fstep] + for i_b in range(len(streams_data)) + ] + ) + + # skip when coordinate embeddings yields nan (i.e. the coord embedding network diverged) + if torch.isnan(tc_tokens).any(): + nn = si["name"] + if is_root(): + logger.warning( + ( + f"Skipping prediction for {nn} because", + f" of {torch.isnan(tc_tokens).sum()} NaN in tc_tokens.", + ) + ) + preds_tokens += [torch.tensor([], device=tc_tokens.device)] + continue + + # skip empty lengths + if tc_tokens.shape[0] == 0: + preds_tokens += [torch.tensor([], device=tc_tokens.device)] + continue + + # TODO: how to support tte_kv efficiently, + # generate 1-ring neighborhoods here or on a per stream basis + assert isinstance(tte_kv, torch.nn.Identity) + + # lens for varlen attention + tcs_lens = target_coords_idxs[ii][fstep] + # coord information for learnable layer norm + tcs_aux = torch.cat( + [streams_data[i_b][ii].target_coords[fstep] for i_b in range(len(streams_data))] + ) + + tc_tokens = tte( + latent=tokens_stream, + output=tc_tokens, + latent_lens=model_params.tokens_lens, + output_lens=tcs_lens, + coordinates=tcs_aux, + ) + + # final prediction head to map back to physical space + preds_tokens += [checkpoint(self.pred_heads[ii], tc_tokens, use_reentrant=False)] + + return preds_tokens diff --git a/src/weathergen/model/norms.py b/src/weathergen/model/norms.py index 094f56c4e..4ecbfa80a 100644 --- a/src/weathergen/model/norms.py +++ b/src/weathergen/model/norms.py @@ -1,4 +1,4 @@ -# (C) Copyright 2024 WeatherGenerator contributors. +# (C) Copyright 2025 WeatherGenerator contributors. # # This software is licensed under the terms of the Apache Licence Version 2.0 # which can be obtained at http://www.apache.org/licenses/LICENSE-2.0. @@ -7,10 +7,11 @@ # granted to it by virtue of its status as an intergovernmental organisation # nor does it submit to any jurisdiction. -import code import torch -from typing import Dict, Optional, Tuple +import torch.nn as nn +import torch.nn.functional as F + # from https://github.com/meta-llama/llama/blob/main/llama/model.py class RMSNorm(torch.nn.Module): @@ -20,7 +21,8 @@ def __init__(self, dim: int, eps: float = 1e-6): Args: dim (int): The dimension of the input tensor. - eps (float, optional): A small value added to the denominator for numerical stability. Default is 1e-6. + eps (float, optional): A small value added to the denominator for numerical stability. + Default is 1e-6. Attributes: eps (float): A small value added to the denominator for numerical stability. @@ -65,28 +67,107 @@ class AdaLayerNorm(torch.nn.Module): """ def __init__( - self, - dim_embed_x, - dim_aux, - norm_elementwise_affine: bool = False, - norm_eps: float = 1e-5 + self, dim_embed_x, dim_aux, norm_elementwise_affine: bool = False, norm_eps: float = 1e-5 ): super().__init__() # simple 2-layer MLP for embedding auxiliary information self.embed_aux = torch.nn.ModuleList() - self.embed_aux.append( torch.nn.Linear( dim_aux, 4*dim_aux)) - self.embed_aux.append( torch.nn.SiLU()) - self.embed_aux.append( torch.nn.Linear( 4*dim_aux, 2*dim_embed_x)) - - self.norm = torch.nn.LayerNorm( dim_embed_x, norm_eps, norm_elementwise_affine) + self.embed_aux.append(torch.nn.Linear(dim_aux, 4 * dim_aux)) + self.embed_aux.append(torch.nn.SiLU()) + self.embed_aux.append(torch.nn.Linear(4 * dim_aux, 2 * dim_embed_x)) - def forward( self, x: torch.Tensor, aux: Optional[torch.Tensor] = None) -> torch.Tensor: + self.norm = torch.nn.LayerNorm(dim_embed_x, norm_eps, norm_elementwise_affine) - for block in self.embed_aux : - aux = block( aux) - scale, shift = aux.split( aux.shape[-1]//2, dim=-1) + def forward(self, x: torch.Tensor, aux: torch.Tensor | None = None) -> torch.Tensor: + for block in self.embed_aux: + aux = block(aux) + scale, shift = aux.split(aux.shape[-1] // 2, dim=-1) x = self.norm(x) * (1 + scale) + shift return x + + +def modulate(x, shift, scale): + return x * (1 + scale) + shift + + +class SwiGLU(nn.Module): + def __init__(self): + super(SwiGLU, self).__init__() + + def forward(self, x): + x1, x2 = x.chunk(2, dim=-1) + return x2 * F.silu(x1) + + +class AdaLayerNormLayer(torch.nn.Module): + """ + AdaLayerNorm for embedding auxiliary information as done in DiT (Peebles & Xie) with zero + initialisation https://arxiv.org/pdf/2212.09748 + + This module thus wraps a layer (e.g. self-attention or feedforward nn) and applies LayerNorm + followed by scale and shift before the layer and a final scaling after the layer as well as the + final residual layer. + + layer is a function that takes 2 arguments the first the latent and the second is the + conditioning signal + """ + + def __init__( + self, + dim, + dim_aux, + layer, + norm_eps: float = 1e-6, + dropout_rate: float = 0.0, + ): + super().__init__() + + self.dim = dim + self.adaLN_modulation = nn.Sequential(nn.SiLU(), nn.Linear(dim_aux, 3 * dim, bias=True)) + + self.ln = nn.LayerNorm(dim, elementwise_affine=False, eps=norm_eps) + self.layer = layer + + # Initialize weights to zero for modulation and gating layers + self.initialise_weights() + + def initialise_weights(self): + nn.init.zeros_(self.adaLN_modulation[-1].weight) + nn.init.zeros_(self.adaLN_modulation[-1].bias) + + def forward(self, x: torch.Tensor, c: torch.Tensor, x_lens, **kwargs) -> torch.Tensor: + # the -1 in torch.repeat_interleave(..) is because x_lens is designed for use with flash + # attention and thus has a spurious 0 at the beginning to satisfy the flash attention api + shift, scale, gate = self.adaLN_modulation(c)[torch.repeat_interleave(x_lens) - 1].chunk( + 3, dim=1 + ) + kwargs["x_lens"] = x_lens + return ( + gate + * self.layer( + modulate( + self.ln(x), + shift, + scale, + ), + **kwargs, + ) + + x + ) + + +class SaturateEncodings(nn.Module): + """A common alternative to a KL regularisation prevent outliers in the latent space when + learning an auto-encoder for latent generative model, an example value for the scale factor is 5 + """ + + def __init__(self, scale_factor): + super().__init__() + + self.scale_factor_squared = scale_factor**2 + + def forward(self, x): + return x / torch.sqrt(1 + (x**2 / self.scale_factor_squared)) diff --git a/src/weathergen/model/parametrised_prob_dist.py b/src/weathergen/model/parametrised_prob_dist.py new file mode 100644 index 000000000..4b28f9b0a --- /dev/null +++ b/src/weathergen/model/parametrised_prob_dist.py @@ -0,0 +1,126 @@ +# (C) Copyright 2025 WeatherGenerator contributors. +# +# This software is licensed under the terms of the Apache Licence Version 2.0 +# which can be obtained at http://www.apache.org/licenses/LICENSE-2.0. +# +# In applying this licence, ECMWF does not waive the privileges and immunities +# granted to it by virtue of its status as an intergovernmental organisation +# nor does it submit to any jurisdiction. + + +import numpy as np +import torch +import torch.nn as nn + +from weathergen.model.norms import SaturateEncodings + + +class DiagonalGaussianDistribution: + """ + Used to represent a learned Gaussian Distribution as typical in a VAE + Code taken and adapted from: https://github.com/Jiawei-Yang/DeTok/tree/main + """ + + def __init__(self, deterministic=False, channel_dim=1): + self.deterministic = deterministic + self.channel_dim = channel_dim + + def reset_parameters(self, parameters): + self.parameters = parameters.float() + self.mean, self.logvar = torch.chunk(parameters, 2, dim=self.channel_dim) + self.sum_dims = tuple(range(1, self.mean.dim())) + self.logvar = torch.clamp(self.logvar, -30.0, 20.0) + self.std = torch.exp(0.5 * self.logvar) + self.var = torch.exp(self.logvar) + if self.deterministic: + self.var = self.std = torch.zeros_like(self.mean).to(device=self.parameters.device) + + def sample(self): + x = self.mean + self.std * torch.randn(self.mean.shape).to(device=self.parameters.device) + return x + + def kl(self, other=None): + if self.deterministic: + return torch.Tensor([0.0]) + else: + if other is None: + return 0.5 * torch.sum( + torch.pow(self.mean, 2) + self.var - 1.0 - self.logvar, + dim=self.sum_dims, + ) + else: + return 0.5 * torch.sum( + torch.pow(self.mean - other.mean, 2) / other.var + + self.var / other.var + - 1.0 + - self.logvar + + other.logvar, + dim=self.sum_dims, + ) + + def nll(self, sample, dims=None): + if self.deterministic: + return torch.Tensor([0.0]) + logtwopi = np.log(2.0 * np.pi) + return 0.5 * torch.sum( + logtwopi + self.logvar + torch.pow(sample - self.mean, 2) / self.var, + dim=dims or self.sum_dims, + ) + + def mode(self): + return self.mean + + +class LatentInterpolator(nn.Module): + """ + Code taken and adapted from: https://github.com/Jiawei-Yang/DeTok/tree/main + """ + + def __init__( + self, + gamma, + dim, + use_additive_noise=False, + deterministic=False, + saturate_encodings=None, + ): + super().__init__() + + assert deterministic or saturate_encodings is None, ( + "Cannot use saturate_encodings without deterministic" + ) + self.gamma = gamma + self.saturate_encodings = saturate_encodings + self.use_additive_noise = use_additive_noise + self.diag_gaussian = DiagonalGaussianDistribution( + deterministic=deterministic, channel_dim=-1 + ) + self.mean_and_var = nn.Sequential( + nn.Linear(dim, 2 * dim, bias=False), + SaturateEncodings(saturate_encodings) + if saturate_encodings is not None + else nn.Identity(), + ) + + def interpolate_with_noise(self, z, batch_size=1, sampling=False, noise_level=-1): + assert batch_size == 1, ( + "Given how we chunk in assimilate_local, dealing with batch_size greater than 1 is not " + + "supported at the moment" + ) + self.diag_gaussian.reset_parameters(self.mean_and_var(z)) + z_latents = self.diag_gaussian.sample() if sampling else self.diag_gaussian.mean + + if self.training and self.gamma > 0.0: + device = z_latents.device + s = z_latents.shape + if noise_level > 0.0: + noise_level_tensor = torch.full(batch_size, noise_level, device=device) + else: + noise_level_tensor = torch.rand(batch_size, device=device) + noise = torch.randn(s, device=device) * self.gamma + if self.use_additive_noise: + z_latents = z_latents + noise_level_tensor * noise + else: + z_latents = (1 - noise_level_tensor) * z_latents + noise_level_tensor * noise + + return z_latents, self.diag_gaussian diff --git a/src/weathergen/model/positional_encoding.py b/src/weathergen/model/positional_encoding.py index 086656308..88df67fa3 100644 --- a/src/weathergen/model/positional_encoding.py +++ b/src/weathergen/model/positional_encoding.py @@ -1,4 +1,4 @@ -# (C) Copyright 2024 WeatherGenerator contributors. +# (C) Copyright 2025 WeatherGenerator contributors. # # This software is licensed under the terms of the Apache Licence Version 2.0 # which can be obtained at http://www.apache.org/licenses/LICENSE-2.0. @@ -7,75 +7,90 @@ # granted to it by virtue of its status as an intergovernmental organisation # nor does it submit to any jurisdiction. +import math + import numpy as np import torch -import math -import code + #################################################################################################### -def positional_encoding_harmonic( x) : - '''space time harmonic positional encoding''' +def positional_encoding_harmonic(x): + """space time harmonic positional encoding""" - dim_embed = x.shape[-1] - dev = x.device + dim_embed = x.shape[-1] + dev = x.device + dtype = x.dtype - len_token_seq = x.shape[-2] - pe = torch.zeros( len_token_seq, dim_embed, device=dev) - position = torch.arange( 0, len_token_seq).unsqueeze(1) - div = torch.exp(torch.arange( 0, dim_embed, 2) * -(math.log(10000) / dim_embed)) + len_token_seq = x.shape[-2] + pe = torch.zeros(len_token_seq, dim_embed, device=dev, dtype=dtype) + position = torch.arange(0, len_token_seq, device=dev, dtype=dtype).unsqueeze(1) + div = torch.exp( + torch.arange(0, dim_embed, 2, device=dev, dtype=dtype) * -(math.log(10000) / dim_embed) + ) - pe[:, 0::2] = torch.sin( position * div[ : pe[:, 0::2].shape[1] ]) - pe[:, 1::2] = torch.cos( position * div[ : pe[:, 1::2].shape[1] ]) - x = x + pe + pe[:, 0::2] = torch.sin(position * div[: pe[:, 0::2].shape[1]]) + pe[:, 1::2] = torch.cos(position * div[: pe[:, 1::2].shape[1]]) + x = x + pe - return x + return x -#################################################################################################### -def positional_encoding_harmonic_idx( x, s_idx) : - '''space time harmonic positional encoding''' - dim_embed = x.shape[-1] - dev = x.device +#################################################################################################### +def positional_encoding_harmonic_idx(x, s_idx): + """space time harmonic positional encoding""" - len_token_seq = x.shape[0] - pe = torch.zeros( x.shape[-2:], device=dev) - pos = (s_idx+1) * torch.ones( len_token_seq, device=dev) - xs = (2. * np.pi * torch.arange( 0, dim_embed, 2, device=dev) / dim_embed) + dim_embed = x.shape[-1] + dev = x.device - pe[:,0::2] = torch.sin( torch.outer( pos, xs)) - pe[:,1::2] = torch.cos( torch.outer( pos, xs)) - x = x + pe + len_token_seq = x.shape[0] + pe = torch.zeros(x.shape[-2:], device=dev) + pos = (s_idx + 1) * torch.ones(len_token_seq, device=dev) + xs = 2.0 * np.pi * torch.arange(0, dim_embed, 2, device=dev) / dim_embed - return x + pe[:, 0::2] = torch.sin(torch.outer(pos, xs)) + pe[:, 1::2] = torch.cos(torch.outer(pos, xs)) + x = x + pe -#################################################################################################### -def positional_encoding_harmonic_global( x) : - '''space time harmonic positional encoding''' + return x - dim_embed = x.shape[-1] - dev = x.device - pe = torch.zeros( x.shape[-3], x.shape[-2], dim_embed, device=dev) - xs = (2. * np.pi * torch.arange( 0, dim_embed, 2, device=dev) / dim_embed) - pe[ ..., 0::2] = 0.5 * torch.sin( torch.outer( 8 * torch.arange( x.shape[-2], device=dev), xs) ) - pe[ ..., 0::2] += torch.sin( torch.outer( torch.arange( x.shape[-3], device=dev), xs) ).unsqueeze(1).repeat( (1,x.shape[-2],1)) - pe[ ..., 1::2] = 0.5 * torch.cos( torch.outer( 8 * torch.arange( x.shape[-2], device=dev), xs) ) - pe[ ..., 1::2] += torch.cos( torch.outer( torch.arange( x.shape[-3], device=dev), xs) ).unsqueeze(1).repeat( (1,x.shape[-2],1)) - x = x + pe +#################################################################################################### +def positional_encoding_harmonic_global(x): + """space time harmonic positional encoding""" + + dim_embed = x.shape[-1] + dev = x.device + + pe = torch.zeros(x.shape[-3], x.shape[-2], dim_embed, device=dev) + xs = 2.0 * np.pi * torch.arange(0, dim_embed, 2, device=dev) / dim_embed + pe[..., 0::2] = 0.5 * torch.sin(torch.outer(8 * torch.arange(x.shape[-2], device=dev), xs)) + pe[..., 0::2] += ( + torch.sin(torch.outer(torch.arange(x.shape[-3], device=dev), xs)) + .unsqueeze(1) + .repeat((1, x.shape[-2], 1)) + ) + pe[..., 1::2] = 0.5 * torch.cos(torch.outer(8 * torch.arange(x.shape[-2], device=dev), xs)) + pe[..., 1::2] += ( + torch.cos(torch.outer(torch.arange(x.shape[-3], device=dev), xs)) + .unsqueeze(1) + .repeat((1, x.shape[-2], 1)) + ) + x = x + pe + + return x - return x #################################################################################################### -def positional_encoding_harmonic_coord( x, lats, lons) : - '''space time harmonic positional encoding''' +def positional_encoding_harmonic_coord(x, lats, lons): + """space time harmonic positional encoding""" - dim_embed = x.shape[-1] - dev = x.device + dim_embed = x.shape[-1] + dev = x.device - pe = torch.zeros( x.shape[0], dim_embed, device=dev) - xs = (2. * np.pi * torch.arange( 0, dim_embed, 2, device=dev) / dim_embed) - pe[ ..., 0::2] = 0.5 * torch.sin( torch.outer( lats, xs) ) - pe[ ..., 1::2] = 0.5 * torch.cos( torch.outer( lons, xs) )[ ... , : pe[ ..., 1::2].shape[-1] ] - x = x + pe + pe = torch.zeros(x.shape[0], dim_embed, device=dev) + xs = 2.0 * np.pi * torch.arange(0, dim_embed, 2, device=dev) / dim_embed + pe[..., 0::2] = 0.5 * torch.sin(torch.outer(lats, xs)) + pe[..., 1::2] = 0.5 * torch.cos(torch.outer(lons, xs))[..., : pe[..., 1::2].shape[-1]] + x = x + pe - return x \ No newline at end of file + return x diff --git a/src/weathergen/model/stream_embed_linear.py b/src/weathergen/model/stream_embed_linear.py deleted file mode 100644 index cc327729b..000000000 --- a/src/weathergen/model/stream_embed_linear.py +++ /dev/null @@ -1,27 +0,0 @@ -# (C) Copyright 2024 WeatherGenerator contributors. -# -# This software is licensed under the terms of the Apache Licence Version 2.0 -# which can be obtained at http://www.apache.org/licenses/LICENSE-2.0. -# -# In applying this licence, ECMWF does not waive the privileges and immunities -# granted to it by virtue of its status as an intergovernmental organisation -# nor does it submit to any jurisdiction. - -import torch -from torch.utils.checkpoint import checkpoint - -class StreamEmbedLinear( torch.nn.Module) : - - def __init__(self, dim_in, dim_out) : - '''Constructor''' - - super( StreamEmbedLinear, self).__init__() - - self.layer = torch.nn.Linear( dim_in, dim_out) - - def forward( self, x) : - - # x = checkpoint( self.layer, x.flatten( -2, -1), use_reentrant=True) - x = self.layer( x.flatten( -2, -1)) - - return x diff --git a/src/weathergen/model/stream_embed_transformer.py b/src/weathergen/model/stream_embed_transformer.py deleted file mode 100644 index adc407ac3..000000000 --- a/src/weathergen/model/stream_embed_transformer.py +++ /dev/null @@ -1,147 +0,0 @@ -# (C) Copyright 2024 WeatherGenerator contributors. -# -# This software is licensed under the terms of the Apache Licence Version 2.0 -# which can be obtained at http://www.apache.org/licenses/LICENSE-2.0. -# -# In applying this licence, ECMWF does not waive the privileges and immunities -# granted to it by virtue of its status as an intergovernmental organisation -# nor does it submit to any jurisdiction. - -import math -import code - -import numpy as np -import torch -from torch.utils.checkpoint import checkpoint - -from weathergen.model.attention import MultiSelfAttentionHead -from weathergen.model.mlp import MLP -from weathergen.model.norms import RMSNorm -from weathergen.model.positional_encoding import positional_encoding_harmonic -from weathergen.model.positional_encoding import positional_encoding_harmonic_coord - -from weathergen.model.utils import get_num_parameters - -class StreamEmbedTransformer( torch.nn.Module) : - - def __init__(self, mode, num_tokens, token_size, num_channels, dim_embed, dim_out, - num_blocks, num_heads, norm_type = 'LayerNorm', embed_size_centroids=64, - unembed_mode = 'full') : - '''Constructor - - unembed_mode : { 'full' , 'block'} - full : monolithic (and correspondingly large) unembedding network that maps from - (num_tokens x dim_embed) to dim_out, allowing for mixing between channels/columns - block : per-channel/column unembedding network (which is hence a block-sparse form of full) - ''' - - super( StreamEmbedTransformer, self).__init__() - - self.num_tokens = num_tokens - self.num_channels = num_channels - self.dim_in = token_size if mode=='channels' else num_channels - self.dim_embed = dim_embed - self.dim_out = dim_out - self.num_blocks = num_blocks - self.num_heads = num_heads - self.embed_size_centroids = embed_size_centroids - self.unembed_mode = unembed_mode - - norm = torch.nn.LayerNorm if norm_type == 'LayerNorm' else RMSNorm - - self.embed = torch.nn.Linear( self.dim_in, self.dim_embed) - - self.layers = torch.nn.ModuleList() - for _ in range( self.num_blocks) : - self.layers.append( MultiSelfAttentionHead( self.dim_embed, self.num_heads, dropout_rate=0.1, - with_qk_lnorm=True, with_flash=True)) - self.layers.append( MLP( self.dim_embed, self.dim_embed, hidden_factor=2, dropout_rate=0.1, - with_residual=True)) - - if mode == 'channels' : - - if self.unembed_mode == 'full' : - self.ln_final = norm( num_channels*self.dim_embed) - self.unembed = torch.nn.Linear( num_channels*self.dim_embed, - self.num_tokens*self.dim_out - embed_size_centroids) - - elif self.unembed_mode == 'block' : - # modify embed_size_centroids to ensure no additional padding is needed - rem = (self.num_tokens*self.dim_out - embed_size_centroids) % num_channels - embed_size_centroids += rem - dim_out = (self.num_tokens*self.dim_out - embed_size_centroids) // num_channels - Linear = torch.nn.Linear - self.unembed = torch.nn.ModuleList([Linear(dim_embed,dim_out) for _ in range(num_channels)]) - self.ln_final = torch.nn.ModuleList( [norm( dim_embed) for _ in range(num_channels)]) - - else : - assert False - - self.forward = self.forward_channels - - elif mode == 'columns' : - assert self.unembed_mode == 'block' # only supported mode at the moment - # padding needed if the unembedded columns cannot be concatenated to dim_out (e.g GPSRO) - self.pad = (self.dim_out-embed_size_centroids) % token_size - self.out_pad = torch.nn.Parameter( torch.zeros( self.pad)) - self.unembed = torch.nn.Linear( self.dim_embed, - self.num_tokens * ((self.dim_out-embed_size_centroids)//token_size)) - self.ln_final = norm( dim_out) - self.forward = self.forward_columns - - else : - assert False - - self.dropout_final = torch.nn.Dropout( 0.1) - self.embed_centroids = torch.nn.Linear( 5, embed_size_centroids) - - def forward_channels( self, x_in, centroids) : - - peh = positional_encoding_harmonic - - # embed provided input data - x = peh( checkpoint( self.embed, x_in.transpose( -2, -1), use_reentrant=False)) - - for layer in self.layers : - x = checkpoint( layer, x, use_reentrant=False) - - # read out - if self.unembed_mode == 'full' : - out = checkpoint( self.unembed, self.ln_final( x.flatten( -2,-1)), use_reentrant=False) - elif self.unembed_mode == 'block' : - out = [checkpoint( ue, ln(x[:,i]), use_reentrant=False) - for i,(ue,ln) in enumerate(zip(self.unembed,self.ln_final))] - out = torch.stack( out, dim=1).flatten( -2, -1) - else : - assert False - - # append centroids - if self.embed_size_centroids > 0 : - out = torch.cat([ out, self.embed_centroids(centroids)], -1) - # final reshape - out = self.dropout_final( out.reshape(-1,self.num_tokens,self.dim_out)) - - return out - - # @torch.compile( dynamic=True) - def forward_columns( self, x_in, centroids) : - - # embed provided input data - x = positional_encoding_harmonic( checkpoint( self.embed, x_in, use_reentrant=False)) - - for layer in self.layers : - x = checkpoint( layer, x, use_reentrant=False) - - # append centroids - # unembed and reshape - out = checkpoint( self.unembed, x, use_reentrant=False) - out = out.flatten(-2,-1).reshape(x.shape[0],self.num_tokens,-1) - # TODO: unsqueeze will not work with num_tokens > 1 - out = torch.cat( [out, self.embed_centroids(centroids).unsqueeze(1)], -1) - # pad to uniform dim_out (that has to be uniform across streams) - if self.pad > 0 : - out = torch.cat( (out, self.out_pad.repeat( (x.shape[0],self.num_tokens,1))), -1) - # also encode centroids with overlayed positional encoding - out = self.dropout_final( self.ln_final( out)) - - return out diff --git a/src/weathergen/model/utils.py b/src/weathergen/model/utils.py index ca149eebe..a13b0e49c 100644 --- a/src/weathergen/model/utils.py +++ b/src/weathergen/model/utils.py @@ -1,4 +1,4 @@ -# (C) Copyright 2024 WeatherGenerator contributors. +# (C) Copyright 2025 WeatherGenerator contributors. # # This software is licensed under the terms of the Apache Licence Version 2.0 # which can be obtained at http://www.apache.org/licenses/LICENSE-2.0. @@ -8,13 +8,45 @@ # nor does it submit to any jurisdiction. import torch +import torch.nn as nn + + +######################################### +def get_num_parameters(block): + nps = filter(lambda p: p.requires_grad, block.parameters()) + return sum([torch.prod(torch.tensor(p.size())) for p in nps]) + ######################################### -def get_num_parameters( block) : - nps = filter(lambda p: p.requires_grad, block.parameters()) - return sum([torch.prod(torch.tensor(p.size())) for p in nps]) +def freeze_weights(block): + for p in block.parameters(): + p.requires_grad = False + ######################################### -def freeze_weights( block) : - for p in block.parameters() : - p.requires_grad = False +class ActivationFactory: + _registry = { + "identity": nn.Identity, + "tanh": nn.Tanh, + "softmax": nn.Softmax, + "sigmoid": nn.Sigmoid, + "gelu": nn.GELU, + "relu": nn.ReLU, + "leakyrelu": nn.LeakyReLU, + "elu": nn.ELU, + "selu": nn.SELU, + "prelu": nn.PReLU, + "softplus": nn.Softplus, + "linear": nn.Linear, + "logsoftmax": nn.LogSoftmax, + "silu": nn.SiLU, + "swish": nn.SiLU, + } + + @classmethod + def get(cls, name: str, **kwargs): + name = name.lower() + if name not in cls._registry: + raise ValueError(f"Unsupported activation type: '{name}'") + fn = cls._registry[name] + return fn(**kwargs) if callable(fn) else fn diff --git a/src/weathergen/run_train.py b/src/weathergen/run_train.py new file mode 100644 index 000000000..fde2d3a66 --- /dev/null +++ b/src/weathergen/run_train.py @@ -0,0 +1,203 @@ +# (C) Copyright 2025 WeatherGenerator contributors. +# +# This software is licensed under the terms of the Apache Licence Version 2.0 +# which can be obtained at http://www.apache.org/licenses/LICENSE-2.0. +# +# In applying this licence, ECMWF does not waive the privileges and immunities +# granted to it by virtue of its status as an intergovernmental organisation +# nor does it submit to any jurisdiction. + +""" +The entry point for training and inference weathergen-atmo +""" + +import logging +import pdb +import sys +import time +import traceback +from pathlib import Path + +import weathergen.common.config as config +import weathergen.utils.cli as cli +from weathergen.train.trainer import Trainer +from weathergen.utils.logger import init_loggers + +logger = logging.getLogger(__name__) + + +def inference(): + # By default, arguments from the command line are read. + inference_from_args(sys.argv[1:]) + + +def inference_from_args(argl: list[str]): + """ + Inference function for WeatherGenerator model. + Entry point for calling the inference code from the command line. + + When running integration tests, the arguments are directly provided. + """ + parser = cli.get_inference_parser() + args = parser.parse_args(argl) + + inference_overwrite = dict( + shuffle=False, + start_date_val=args.start_date, + end_date_val=args.end_date, + samples_per_validation=args.samples, + log_validation=args.samples if args.save_samples else 0, + streams_output=args.streams_output, + ) + + cli_overwrite = config.from_cli_arglist(args.options) + cf = config.load_config( + args.private_config, + args.from_run_id, + args.mini_epoch, + *args.config, + inference_overwrite, + cli_overwrite, + ) + cf = config.set_run_id(cf, args.run_id, args.reuse_run_id) + + devices = Trainer.init_torch() + cf = Trainer.init_ddp(cf) + + init_loggers(cf.run_id) + + logger.info(f"DDP initialization: rank={cf.rank}, world_size={cf.world_size}") + + cf.run_history += [(args.from_run_id, cf.istep)] + + trainer = Trainer(cf.train_log_freq) + trainer.inference(cf, devices, args.from_run_id, args.mini_epoch) + + +#################################################################################################### +def train_continue() -> None: + """ + Function to continue training for WeatherGenerator model. + Entry point for calling train_continue from the command line. + Configurations are set in the function body. + + Args: + from_run_id (str): Run/model id of pretrained WeatherGenerator model to + continue training. Defaults to None. + Note: All model configurations are set in the function body. + """ + train_continue_from_args(sys.argv[1:]) + + +def train_continue_from_args(argl: list[str]): + parser = cli.get_continue_parser() + args = parser.parse_args(argl) + + if args.finetune_forecast: + finetune_overwrite = dict( + training_mode="forecast", + forecast_delta_hrs=0, # 12 + forecast_steps=1, # [j for j in range(1,9) for i in range(4)] + forecast_policy="fixed", # 'sequential_random' # 'fixed' #'sequential' #_random' + forecast_att_dense_rate=1.0, # 0.25 + fe_num_blocks=8, + fe_num_heads=16, + fe_dropout_rate=0.1, + fe_with_qk_lnorm=True, + lr_start=0.000001, + lr_max=0.00003, + lr_final_decay=0.00003, + lr_final=0.0, + lr_steps_warmup=1024, + lr_steps_cooldown=4096, + lr_policy_warmup="cosine", + lr_policy_decay="linear", + lr_policy_cooldown="linear", + num_mini_epochs=12, # len(cf.forecast_steps) + 4 + istep=0, + ) + else: + finetune_overwrite = dict() + + cli_overwrite = config.from_cli_arglist(args.options) + cf = config.load_config( + args.private_config, + args.from_run_id, + args.mini_epoch, + finetune_overwrite, + *args.config, + cli_overwrite, + ) + cf = config.set_run_id(cf, args.run_id, args.reuse_run_id) + + devices = Trainer.init_torch() + cf = Trainer.init_ddp(cf) + + init_loggers(cf.run_id) + + # track history of run to ensure traceability of results + cf.run_history += [(args.from_run_id, cf.istep)] + + trainer = Trainer(cf.train_log_freq) + trainer.run(cf, devices, args.from_run_id, args.mini_epoch) + + +#################################################################################################### +def train() -> None: + """ + Training function for WeatherGenerator model. + Entry point for calling the training code from the command line. + Configurations are set in the function body. + + Args: + run_id (str, optional): Run/model id of pretrained WeatherGenerator model to + continue training. Defaults to None. + Note: All model configurations are set in the function body. + """ + train_with_args(sys.argv[1:], None) + + +def train_with_args(argl: list[str], stream_dir: str | None): + """ + Training function for WeatherGenerator model.""" + parser = cli.get_train_parser() + args = parser.parse_args(argl) + + cli_overwrite = config.from_cli_arglist(args.options) + + cf = config.load_config(args.private_config, None, None, *args.config, cli_overwrite) + cf = config.set_run_id(cf, args.run_id, False) + + cf.data_loader_rng_seed = int(time.time()) + devices = Trainer.init_torch() + cf = Trainer.init_ddp(cf) + + # if cf.rank == 0: + # this line should probably come after the processes have been sorted out else we get lots + # of duplication due to multiple process in the multiGPU case + init_loggers(cf.run_id) + + logger.info(f"DDP initialization: rank={cf.rank}, world_size={cf.world_size}") + + cf.streams = config.load_streams(Path(cf.streams_directory)) + + if cf.with_flash_attention: + assert cf.with_mixed_precision + + trainer = Trainer(cf.train_log_freq) + + try: + trainer.run(cf, devices) + except Exception: + extype, value, tb = sys.exc_info() + traceback.print_exc() + pdb.post_mortem(tb) + + +if __name__ == "__main__": + # Entry point for slurm script. + # Check whether --from_run_id passed as argument. + if next((True for arg in sys.argv if "--from_run_id" in arg), False): + train_continue() + else: + train() diff --git a/src/weathergen/train/loss.py b/src/weathergen/train/loss.py index 33bb83bbb..406cd051c 100644 --- a/src/weathergen/train/loss.py +++ b/src/weathergen/train/loss.py @@ -1,4 +1,4 @@ -# (C) Copyright 2024 WeatherGenerator contributors. +# (C) Copyright 2025 WeatherGenerator contributors. # # This software is licensed under the terms of the Apache Licence Version 2.0 # which can be obtained at http://www.apache.org/licenses/LICENSE-2.0. @@ -7,77 +7,191 @@ # granted to it by virtue of its status as an intergovernmental organisation # nor does it submit to any jurisdiction. -import code import numpy as np import torch +stat_loss_fcts = ["stats", "kernel_crps"] # Names of loss functions that need std computed -#################################################################################################### -def Gaussian( x, mu=0., std_dev=1.) : - # unnormalized Gaussian where maximum is one - return torch.exp( -0.5 * (x-mu)*(x-mu) / (std_dev*std_dev)) - -#################################################################################################### -def normalized_Gaussian( x, mu=0., std_dev=1.) : - return (1 / (std_dev*np.sqrt(2.*np.pi))) * torch.exp( -0.5 * (x-mu)*(x-mu) / (std_dev*std_dev)) - -def erf( x, mu=0., std_dev=1.) : - c1 = torch.sqrt( torch.tensor(0.5 * np.pi) ) - c2 = torch.sqrt( 1. / torch.tensor(std_dev * std_dev)) - c3 = torch.sqrt( torch.tensor( 2.) ) - val = c1 * ( 1./c2 - std_dev * torch.special.erf( (mu - x) / (c3 * std_dev) ) ) - return val - -#################################################################################################### -def gaussian_crps( target, ens, mu, stddev) : - # see Eq. A2 in S. Rasp and S. Lerch. Neural networks for postprocessing ensemble weather - # forecasts. Monthly Weather Review, 146(11):3885 – 3900, 2018. - c1 = np.sqrt(1./np.pi) - t1 = 2. * erf( (target-mu) / stddev) - 1. - t2 = 2. * normalized_Gaussian( (target-mu) / stddev) - val = stddev * ( (target-mu)/stddev * t1 + t2 - c1 ) - return torch.mean(val) # + torch.mean( torch.sqrt( stddev) ) - -#################################################################################################### -def stats( target, ens, mu, stddev) : - diff = Gaussian( target, mu, stddev) - 1. - return torch.mean( diff * diff) + torch.mean( torch.sqrt( stddev) ) - -#################################################################################################### -def stats_normalized( target, ens, mu, stddev) : - a = normalized_Gaussian( target, mu, stddev) - max = 1 / (np.sqrt(2 * np.pi) * stddev) - d = a - max - return torch.mean( d*d) + torch.mean( torch.sqrt( stddev) ) - -#################################################################################################### -def stats_normalized_erf( target, ens, mu, stddev) : - delta = -torch.abs(target - mu) - d = 0.5 + torch.special.erf(delta / (np.sqrt(2.) * stddev)) - return torch.mean( d*d) #+ torch.mean( torch.sqrt( stddev) ) - -#################################################################################################### -def mse( target, ens, mu, *kwargs) : - return torch.nn.functional.mse_loss( target, mu) - -#################################################################################################### -def mse_ens( target, ens, mu, stddev) : - mse_loss = torch.nn.functional.mse_loss - return torch.stack( [mse_loss( target, mem) for mem in ens], 0).mean() - -#################################################################################################### -def kernel_crps( target, ens, mu, stddev, fair = True) : - - ens_size = ens.shape[0] - mae = torch.stack( [(target - mem).abs().mean() for mem in ens], 0).mean() - - if ens_size == 1: - return mae - - coef = -1.0 / (2.0 * ens_size * (ens_size - 1)) if fair else -1.0 / (2.0 * ens_size**2) - ens_var = coef * torch.tensor( [(p1 - p2).abs().sum() for p1 in ens for p2 in ens]).sum() - ens_var /= ens.shape[1] - - return mae + ens_var +def gaussian(x, mu=0.0, std_dev=1.0): + # unnormalized Gaussian where maximum is one + return torch.exp(-0.5 * (x - mu) * (x - mu) / (std_dev * std_dev)) + + +def normalized_gaussian(x, mu=0.0, std_dev=1.0): + return (1 / (std_dev * np.sqrt(2.0 * np.pi))) * torch.exp( + -0.5 * (x - mu) * (x - mu) / (std_dev * std_dev) + ) + + +def erf(x, mu=0.0, std_dev=1.0): + c1 = torch.sqrt(torch.tensor(0.5 * np.pi)) + c2 = torch.sqrt(1.0 / torch.tensor(std_dev * std_dev)) + c3 = torch.sqrt(torch.tensor(2.0)) + val = c1 * (1.0 / c2 - std_dev * torch.special.erf((mu - x) / (c3 * std_dev))) + return val + + +def gaussian_crps(target, ens, mu, stddev): + # see Eq. A2 in S. Rasp and S. Lerch. Neural networks for postprocessing ensemble weather + # forecasts. Monthly Weather Review, 146(11):3885 – 3900, 2018. + c1 = np.sqrt(1.0 / np.pi) + t1 = 2.0 * erf((target - mu) / stddev) - 1.0 + t2 = 2.0 * normalized_gaussian((target - mu) / stddev) + val = stddev * ((target - mu) / stddev * t1 + t2 - c1) + return torch.mean(val) # + torch.mean( torch.sqrt( stddev) ) + + +def stats(target, ens, mu, stddev): + diff = gaussian(target, mu, stddev) - 1.0 + return torch.mean(diff * diff) + torch.mean(torch.sqrt(stddev)) + + +def stats_normalized(target, ens, mu, stddev): + a = normalized_gaussian(target, mu, stddev) + max = 1 / (np.sqrt(2 * np.pi) * stddev) + d = a - max + return torch.mean(d * d) + torch.mean(torch.sqrt(stddev)) + + +def stats_normalized_erf(target, ens, mu, stddev): + delta = -torch.abs(target - mu) + d = 0.5 + torch.special.erf(delta / (np.sqrt(2.0) * stddev)) + return torch.mean(d * d) # + torch.mean( torch.sqrt( stddev) ) + + +def mse(target, ens, mu, *kwargs): + return torch.nn.functional.mse_loss(target, mu) + + +def mse_ens(target, ens, mu, stddev): + mse_loss = torch.nn.functional.mse_loss + return torch.stack([mse_loss(target, mem) for mem in ens], 0).mean() + + +def kernel_crps( + targets, + preds, + weights_channels: torch.Tensor | None, + weights_points: torch.Tensor | None, + fair=True, +): + """ + Compute kernel CRPS + + Params: + target : shape ( num_data_points , num_channels ) + pred : shape ( ens_dim , num_data_points , num_channels) + weights_channels : shape = (num_channels,) + weights_points : shape = (num_data_points) + + Returns: + loss: scalar - overall weighted CRPS + loss_chs: [C] - per-channel CRPS (location-weighted, not channel-weighted) + """ + + ens_size = preds.shape[0] + assert ens_size > 1, "Ensemble size has to be greater than 1 for kernel CRPS." + assert len(preds.shape) == 3, "if data has batch dimension, remove unsqueeze() below" + + # replace NaN by 0 + mask_nan = ~torch.isnan(targets) + targets = torch.where(mask_nan, targets, 0) + preds = torch.where(mask_nan, preds, 0) + + # permute to enable/simply broadcasting and contractions below + preds = preds.permute([2, 1, 0]).unsqueeze(0).to(torch.float32) + targets = targets.permute([1, 0]).unsqueeze(0).to(torch.float32) + + mae = torch.mean(torch.abs(targets[..., None] - preds), dim=-1) + + ens_n = -1.0 / (ens_size * (ens_size - 1)) if fair else -1.0 / (ens_size**2) + abs = torch.abs + ens_var = torch.zeros(size=preds.shape[:-1], device=preds.device) + # loop to reduce memory usage + for i in range(ens_size): + ens_var += torch.sum(ens_n * abs(preds[..., i].unsqueeze(-1) - preds[..., i + 1 :]), dim=-1) + + kcrps_locs_chs = mae + ens_var + + # apply point weighting + if weights_points is not None: + kcrps_locs_chs = kcrps_locs_chs * weights_points + # apply channel weighting + kcrps_chs = torch.mean(torch.mean(kcrps_locs_chs, 0), -1) + if weights_channels is not None: + kcrps_chs = kcrps_chs * weights_channels + + return torch.mean(kcrps_chs), kcrps_chs + + +def mse_channel_location_weighted( + target: torch.Tensor, + pred: torch.Tensor, + weights_channels: torch.Tensor | None, + weights_points: torch.Tensor | None, +): + """ + Compute weighted MSE loss for one window or step + + The function implements: + + loss = Mean_{channels}( weight_channels * Mean_{data_pts}( (target - pred) * weights_points )) + + Geometrically, + + ------------------------ - + | | | | + | | | | + | | | | + | target - pred | x |wp| + | | | | + | | | | + | | | | + ------------------------ - + x + ------------------------ + | wc | + ------------------------ + + where wp = weights_points and wc = weights_channels and "x" denotes row/col-wise multiplication. + + The computations are: + 1. weight the rows of (target - pred) by wp = weights_points + 2. take the mean over the row + 3. weight the collapsed cols by wc = weights_channels + 4. take the mean over the channel-weighted cols + + Params: + target : shape ( num_data_points , num_channels ) + target : shape ( ens_dim , num_data_points , num_channels) + weights_channels : shape = (num_channels,) + weights_points : shape = (num_data_points) + + Return: + loss : weight loss for gradient computation + loss_chs : losses per channel with location weighting but no channel weighting + """ + + mask_nan = ~torch.isnan(target) + pred = pred[0] if pred.shape[0] == 0 else pred.mean(0) + + diff2 = torch.square(torch.where(mask_nan, target, 0) - torch.where(mask_nan, pred, 0)) + if weights_points is not None: + diff2 = (diff2.transpose(1, 0) * weights_points).transpose(1, 0) + loss_chs = diff2.mean(0) + loss = torch.mean(loss_chs * weights_channels if weights_channels is not None else loss_chs) + + return loss, loss_chs + + +def cosine_latitude(stream_data, forecast_offset, fstep, min_value=1e-3, max_value=1.0): + latitudes_radian = stream_data.target_coords_raw[forecast_offset + fstep][:, 0] * np.pi / 180 + return (max_value - min_value) * np.cos(latitudes_radian) + min_value + + +def gamma_decay(forecast_steps, gamma): + fsteps = np.arange(forecast_steps) + weights = gamma**fsteps + return weights * (len(fsteps) / np.sum(weights)) diff --git a/src/weathergen/train/loss_calculator.py b/src/weathergen/train/loss_calculator.py new file mode 100644 index 000000000..f457d6454 --- /dev/null +++ b/src/weathergen/train/loss_calculator.py @@ -0,0 +1,320 @@ +# (C) Copyright 2025 WeatherGenerator contributors. +# +# This software is licensed under the terms of the Apache Licence Version 2.0 +# which can be obtained at http://www.apache.org/licenses/LICENSE-2.0. +# +# In applying this licence, ECMWF does not waive the privileges and immunities +# granted to it by virtue of its status as an intergovernmental organisation +# nor does it submit to any jurisdiction. + +import dataclasses +import logging + +import numpy as np +import torch +from omegaconf import DictConfig +from torch import Tensor + +import weathergen.train.loss as losses +from weathergen.train.loss import stat_loss_fcts +from weathergen.utils.train_logger import TRAIN, VAL, Stage + +_logger = logging.getLogger(__name__) + + +@dataclasses.dataclass +class LossValues: + """ + A dataclass to encapsulate the various loss components computed by the LossCalculator. + + This provides a structured way to return the primary loss used for optimization, + along with detailed per-stream/per-channel/per-loss-function losses for logging, + and standard deviations for ensemble scenarios. + """ + + # The primary scalar loss value for optimization. + loss: Tensor + # Dictionaries containing detailed loss values for each stream, channel, and loss function, as + # well as standard deviations when operating with ensembles (e.g., when training with CRPS). + losses_all: dict[str, Tensor] + stddev_all: dict[str, Tensor] + + +class LossCalculator: + """ + Manages and computes the overall loss for a WeatherGenerator model during + training and validation stages. + + This class handles the initialization and application of various loss functions, + applies channel-specific weights, constructs masks for missing data, and + aggregates losses across different data streams, channels, and forecast steps. + It provides both the main loss for backpropagation and detailed loss metrics for logging. + """ + + def __init__( + self, + cf: DictConfig, + stage: Stage, + device: str, + ): + """ + Initializes the LossCalculator. + + This sets up the configuration, the operational stage (training or validation), + the device for tensor operations, and initializes the list of loss functions + based on the provided configuration. + + Args: + cf: The OmegaConf DictConfig object containing model and training configurations. + It should specify 'loss_fcts' for training and 'loss_fcts_val' for validation. + stage: The current operational stage, either TRAIN or VAL. + This dictates which set of loss functions (training or validation) will be used. + device: The computation device, such as 'cpu' or 'cuda:0', where tensors will reside. + """ + self.cf = cf + self.stage = stage + self.device = device + + # Dynamically load loss functions based on configuration and stage + loss_fcts = cf.loss_fcts if stage == TRAIN else cf.loss_fcts_val + self.loss_fcts = [ + [getattr(losses, name if name != "mse" else "mse_channel_location_weighted"), w] + for name, w in loss_fcts + ] + + def _get_weights(self, stream_info): + """ + Get weights for current stream + """ + + device = self.device + + # Determine stream and channel loss weights based on the current stage + if self.stage == TRAIN: + # set loss_weights to 1. when not specified + stream_info_loss_weight = stream_info.get("loss_weight", 1.0) + weights_channels = ( + torch.tensor(stream_info["target_channel_weights"]).to( + device=device, non_blocking=True + ) + if "target_channel_weights" in stream_info + else None + ) + elif self.stage == VAL: + # in validation mode, always unweighted loss + stream_info_loss_weight = 1.0 + weights_channels = None + + return stream_info_loss_weight, weights_channels + + def _get_fstep_weights(self, forecast_steps): + timestep_weight_config = self.cf.get("timestep_weight") + if timestep_weight_config is None: + return [1.0 for _ in range(forecast_steps)] + weights_timestep_fct = getattr(losses, timestep_weight_config[0]) + return weights_timestep_fct(forecast_steps, timestep_weight_config[1]) + + def _get_location_weights(self, stream_info, stream_data, forecast_offset, fstep): + location_weight_type = stream_info.get("location_weight", None) + if location_weight_type is None: + return None + weights_locations_fct = getattr(losses, location_weight_type) + weights_locations = weights_locations_fct(stream_data, forecast_offset, fstep) + weights_locations = weights_locations.to(device=self.device, non_blocking=True) + + return weights_locations + + def _get_substep_masks(self, stream_info, fstep, stream_data): + """ + Find substeps and create corresponding masks (reused across loss functions) + """ + + tok_spacetime = stream_info.get("tokenize_spacetime", None) + target_times = stream_data.target_times_raw[self.cf.forecast_offset + fstep] + target_times_unique = np.unique(target_times) if tok_spacetime else [target_times] + substep_masks = [] + for t in target_times_unique: + # find substep + mask_t = torch.tensor(t == target_times).to(self.device, non_blocking=True) + substep_masks.append(mask_t) + + return substep_masks + + @staticmethod + def _loss_per_loss_function( + loss_fct, + stream_info, + target: torch.Tensor, + pred: torch.Tensor, + substep_masks: list[torch.Tensor], + weights_channels: torch.Tensor, + weights_locations: torch.Tensor, + ): + """ + Compute loss for given loss function + """ + + loss_lfct = torch.tensor(0.0, device=target.device, requires_grad=True) + losses_chs = torch.zeros(target.shape[-1], device=target.device, dtype=torch.float32) + + ctr_substeps = 0 + for mask_t in substep_masks: + assert mask_t.sum() == len(weights_locations) if weights_locations is not None else True + + loss, loss_chs = loss_fct( + target[mask_t], pred[:, mask_t], weights_channels, weights_locations + ) + + # accumulate loss + loss_lfct = loss_lfct + loss + losses_chs = losses_chs + loss_chs.detach() if len(loss_chs) > 0 else losses_chs + ctr_substeps += 1 if loss > 0.0 else 0 + + # normalize over forecast steps in window + losses_chs /= ctr_substeps if ctr_substeps > 0 else 1.0 + + # TODO: substep weight + loss_lfct = loss_lfct / (ctr_substeps if ctr_substeps > 0 else 1.0) + + return loss_lfct, losses_chs + + def compute_loss( + self, + preds: list[list[Tensor]], + streams_data: list[list[any]], + ) -> LossValues: + """ + Computes the total loss for a given batch of predictions and corresponding + stream data. + + The computed loss is: + + Mean_{stream}( Mean_{fsteps}( Mean_{loss_fcts}( loss_fct( target, pred, weigths) ))) + + This method orchestrates the calculation of the overall loss by iterating through + different data streams, forecast steps, channels, and configured loss functions. + It applies weighting, handles NaN values through masking, and accumulates + detailed loss metrics for logging. + + Args: + preds: A nested list of prediction tensors. The outer list represents forecast steps, + the inner list represents streams. Each tensor contains predictions for that + step and stream. + streams_data: A nested list representing the input batch data. The outer list is for + batch items, the inner list for streams. Each element provides an object + (e.g., dataclass instance) containing target data and metadata. + + Returns: + A ModelLoss dataclass instance containing: + - loss: The loss for back-propagation. + - losses_all: A dictionary mapping stream names to a tensor of per-channel and + per-loss-function losses, normalized by non-empty targets/forecast steps. + - stddev_all: A dictionary mapping stream names to a tensor of mean standard deviations + of predictions for channels with statistical loss functions, normalized. + """ + + # gradient loss + loss = torch.tensor(0.0, device=self.device, requires_grad=True) + # counter for non-empty targets + ctr_streams = 0 + + # initialize dictionaries for detailed loss tracking and standard deviation statistics + # create tensor for each stream + losses_all: dict[str, Tensor] = { + st.name: torch.zeros( + (len(st[str(self.stage) + "_target_channels"]), len(self.loss_fcts)), + device=self.device, + ) + for st in self.cf.streams + } + stddev_all: dict[str, Tensor] = { + st.name: torch.zeros(len(stat_loss_fcts), device=self.device) for st in self.cf.streams + } + + # TODO: iterate over batch dimension + i_batch = 0 + for i_stream_info, stream_info in enumerate(self.cf.streams): + # extract target tokens for current stream from the specified forecast offset onwards + targets = streams_data[i_batch][i_stream_info].target_tokens[self.cf.forecast_offset :] + + stream_data = streams_data[i_batch][i_stream_info] + + fstep_loss_weights = self._get_fstep_weights(len(targets)) + + loss_fsteps = torch.tensor(0.0, device=self.device, requires_grad=True) + ctr_fsteps = 0 + + stream_is_spoof = streams_data[i_batch][i_stream_info].is_spoof() + if stream_is_spoof: + spoof_weight = torch.tensor(0.0, device=self.device, requires_grad=False) + else: + spoof_weight = torch.tensor(1.0, device=self.device, requires_grad=False) + + for fstep, (target, fstep_weight) in enumerate( + zip(targets, fstep_loss_weights, strict=False) + ): + # skip if either target or prediction has no data points + pred = preds[fstep][i_stream_info] + if not (target.shape[0] > 0 and pred.shape[0] > 0): + continue + + # reshape prediction tensor to match target's dimensions: extract data/coords and + # remove token dimension if it exists. + # expected final shape of pred is [ensemble_size, num_samples, num_channels]. + pred = pred.reshape([pred.shape[0], *target.shape]) + assert pred.shape[1] > 0 + + # get weigths for current streams + stream_loss_weight, weights_channels = self._get_weights(stream_info) + + # get weights for locations + weights_locations = self._get_location_weights( + stream_info, stream_data, self.cf.forecast_offset, fstep + ) + + # get masks for sub-time steps + substep_masks = self._get_substep_masks(stream_info, fstep, stream_data) + + # accumulate loss from different loss functions + loss_fstep = torch.tensor(0.0, device=self.device, requires_grad=True) + ctr_loss_fcts = 0 + for i_lfct, (loss_fct, loss_fct_weight) in enumerate(self.loss_fcts): + # loss for current loss function + loss_lfct, loss_lfct_chs = LossCalculator._loss_per_loss_function( + loss_fct, + stream_info, + target, + pred, + substep_masks, + weights_channels, + weights_locations, + ) + losses_all[stream_info.name][:, i_lfct] += spoof_weight * loss_lfct_chs + + # Add the weighted and normalized loss from this loss function to the total + # batch loss + loss_fstep = loss_fstep + ( + loss_fct_weight * loss_lfct * stream_loss_weight * fstep_weight + ) + ctr_loss_fcts += 1 if loss_lfct > 0.0 else 0 + + loss_fsteps = loss_fsteps + (loss_fstep / ctr_loss_fcts if ctr_loss_fcts > 0 else 0) + ctr_fsteps += 1 if ctr_loss_fcts > 0 else 0 + + loss = loss + ((spoof_weight * loss_fsteps) / (ctr_fsteps if ctr_fsteps > 0 else 1.0)) + ctr_streams += 1 if ctr_fsteps > 0 and not stream_is_spoof else 0 + + # normalize by forecast step + losses_all[stream_info.name] /= ctr_fsteps if ctr_fsteps > 0 else 1.0 + stddev_all[stream_info.name] /= ctr_fsteps if ctr_fsteps > 0 else 1.0 + + # replace channels without information by nan to exclude from further computations + losses_all[stream_info.name][losses_all[stream_info.name] == 0.0] = torch.nan + stddev_all[stream_info.name][stddev_all[stream_info.name] == 0.0] = torch.nan + + # normalize by all targets and forecast steps that were non-empty + # (with each having an expected loss of 1 for an uninitalized neural net) + loss = loss / ctr_streams + + # Return all computed loss components encapsulated in a ModelLoss dataclass + return LossValues(loss=loss, losses_all=losses_all, stddev_all=stddev_all) diff --git a/src/weathergen/train/lr_scheduler.py b/src/weathergen/train/lr_scheduler.py index 6355afa06..f6ba7ab5d 100644 --- a/src/weathergen/train/lr_scheduler.py +++ b/src/weathergen/train/lr_scheduler.py @@ -1,4 +1,4 @@ -# (C) Copyright 2024 WeatherGenerator contributors. +# (C) Copyright 2025 WeatherGenerator contributors. # # This software is licensed under the terms of the Apache Licence Version 2.0 # which can be obtained at http://www.apache.org/licenses/LICENSE-2.0. @@ -7,233 +7,308 @@ # granted to it by virtue of its status as an intergovernmental organisation # nor does it submit to any jurisdiction. -import numpy as np -import torch import logging -# import matplotlib.pyplot as plt -import warnings -import code - -from torch.optim.lr_scheduler import LinearLR -from torch.optim.lr_scheduler import OneCycleLR -from torch.optim.lr_scheduler import ExponentialLR - - -class LearningRateScheduler : - - def __init__( self, optimizer, batch_size, num_ranks, - lr_start, lr_max, lr_final_decay, lr_final, - n_steps_warmup, n_steps_decay, n_steps_cooldown, - policy_warmup, policy_decay, policy_cooldown, - step_contd = -1, scaling_policy='sqrt') : - # ''' - # Three-phase learning rate schedule - - # optimizer : - # ''' - - # TODO: implement cool down mode that continues a run but performs just cooldown - # from current learning rate, see https://arxiv.org/abs/2106.04560 - - assert lr_final_decay >= lr_final - - self.optimizer = optimizer - self.batch_size = batch_size - self.num_ranks = num_ranks - - self.n_steps_warmup = n_steps_warmup - self.n_steps_decay = n_steps_decay - self.n_steps_cooldown = n_steps_cooldown - - if scaling_policy=='const' : - kappa = 1 - elif scaling_policy=='sqrt' : - kappa = np.sqrt(batch_size * self.num_ranks) - elif scaling_policy=='linear' : - kappa = batch_size * self.num_ranks - else : - assert False, 'unsupported learning rate policy' - - self.lr_max_scaled = kappa * lr_max - lr_final_decay_scaled = kappa * lr_final_decay - - self.policy_warmup = policy_warmup - self.policy_decay = policy_decay - self.policy_cooldown = policy_cooldown - - self.step_contd = step_contd - - # create learning rate schedulers - - ########################## - # warmup - if policy_warmup == 'linear' : - self.scheduler_warmup = LinearLR( optimizer, start_factor=lr_start/self.lr_max_scaled, - end_factor=1.0, total_iters=n_steps_warmup) - - elif policy_warmup == 'cosine' : - n_steps = n_steps_warmup + n_steps_decay + 1 - pct_start = n_steps_warmup / n_steps - self.scheduler_warmup = OneCycleLR( optimizer, max_lr=self.lr_max_scaled, - total_steps=n_steps, pct_start=pct_start, - div_factor=self.lr_max_scaled/lr_start, - final_div_factor=lr_final_decay_scaled/lr_start) - else : - if n_steps_warmup > 0 : - assert False, 'Unsupported warmup policy for learning rate scheduler' - - ########################## - # decay - if policy_decay == 'linear' : - self.scheduler_decay = LinearLR( optimizer, start_factor=1.0, - end_factor=lr_final_decay/self.lr_max_scaled, - total_iters=n_steps_decay) - - elif policy_decay == 'exponential' : - gamma = np.power(np.float64(lr_final_decay/self.lr_max_scaled), 1.0/np.float64(n_steps_decay)) - self.scheduler_decay = ExponentialLR( optimizer, gamma=gamma) - - elif policy_decay == 'cosine' : - # OneCycleLR has global state so more work needed to have independent ones - assert policy_decay == policy_warmup - self.scheduler_decay = self.scheduler_warmup - - elif policy_decay == 'sqrt' : - self.decay_factor = self.lr_max_scaled * np.sqrt( n_steps_warmup) - self.scheduler_decay = None - - else : - assert False, 'Unsupported decay policy for learning rate scheduler' - - ########################## - # cool down - if policy_cooldown == 'linear' : - self.scheduler_cooldown = LinearLR( optimizer, start_factor=lr_start/self.lr_max_scaled, - end_factor=lr_final/lr_final_decay if lr_final_decay>0. else 0., - total_iters=n_steps_cooldown) - # TODO: this overwrites the cosine scheduler for warmup (seems there are some global vars ) - # elif policy_cooldown == 'cosine' : - # self.scheduler_cooldown = torch.optim.lr_scheduler.OneCycleLR( optimizer, - # max_lr=lr_final_decay, - # total_steps=n_steps_cooldown, - # pct_start=0.0) - else : - if n_steps_cooldown > 0 : - assert 'Unsupported cooldown policy for learning rate scheduler' - - # set initial scheduler - self.cur_scheduler = self.scheduler_warmup if n_steps_warmup > 0 else self.scheduler_decay - - # explicitly track steps to be able to switch between optimizers - self.i_step = 0 - self.lr = self.cur_scheduler.get_last_lr() - - # advance manually to step_contd (last_epoch parameter for schedulers is not working and - # this is also more brittle with the different phases) - # optimizer.step() as required by torch; won't have a material effect since grads are zero at - # this point - if self.step_contd > 0 : - optimizer.step() - for _ in range( step_contd) : - self.step() - - ####################################### - def step( self) : - ''' - Perform one step of learning rate schedule - ''' - - # keep final learning rate - if self.i_step >= (self.n_steps_warmup + self.n_steps_decay + self.n_steps_cooldown) : - return self.lr - - if (self.policy_decay == 'sqrt' and self.i_step > self.n_steps_warmup - and self.i_step < self.n_steps_warmup + self.n_steps_decay) : - self.lr = (self.decay_factor/np.sqrt(self.i_step)) if self.i_step > 0 else self.lr_max_scaled - for g in self.optimizer.param_groups: - g['lr'] = self.lr - else : - self.cur_scheduler.step() - self.lr = self.cur_scheduler.get_last_lr()[0] - - # switch scheduler when learning rate regime completed - if self.i_step == self.n_steps_warmup : - self.cur_scheduler = self.scheduler_decay - str = f'Switching scheduler to {self.cur_scheduler} at scheduler step = {self.i_step}.' - logging.getLogger('obslearn').info( str) - - # switch scheduler when learning rate completed - if self.i_step == self.n_steps_warmup + self.n_steps_decay : - self.cur_scheduler = self.scheduler_cooldown - str = f'Switching scheduler to {self.cur_scheduler} at scheduler step = {self.i_step}.' - logging.getLogger('obslearn').info( str) - - self.i_step += 1 - - return self.lr - - ####################################### - def get_lr( self) : - return self.lr - - ####################################### - @staticmethod - def plot() : - ''' - Generate plot of learning rate schedule - - Use as LearningRateScheduler.plot() - ''' - - num_epochs = 42 - num_samples_per_epoch = 4096 - - lr_start = 0.000001 - lr_max = 0.000015 - lr_final_decay = 0.000001 - lr_final = 0.0 - lr_steps_warmup = 256 - lr_steps_cooldown = 1024 - lr_steps_warmup = 256 - lr_steps_cooldown = 4096 - lr_policy_warmup = 'cosine' - lr_policy_decay = 'linear' - lr_policy_cooldown = 'linear' - - model = torch.nn.Linear(2, 1) - optimizer = torch.optim.SGD(model.parameters(), lr=lr_max) - - scheduler = LearningRateScheduler( optimizer, 1, 1, lr_start, lr_max, lr_final_decay, lr_final, - lr_steps_warmup, num_epochs*num_samples_per_epoch, lr_steps_cooldown, - lr_policy_warmup, lr_policy_decay, lr_policy_cooldown) - lrs = [] - - for i in range( num_epochs*num_samples_per_epoch + lr_steps_warmup + lr_steps_cooldown + 1023): - optimizer.step() - lrs.append( optimizer.param_groups[0]['lr']) - scheduler.step() - - plt.plot(lrs, 'b') - # plt.savefig( './plots/lr_schedule.png') - - # second strategy for comparison - - lr_policy_decay = 'cosine' - - model = torch.nn.Linear(2, 1) - optimizer = torch.optim.SGD(model.parameters(), lr=lr_max) - - scheduler = LearningRateScheduler( optimizer, 1, 1, lr_start, lr_max, lr_final_decay, lr_final, - lr_steps_warmup, num_epochs*num_samples_per_epoch, lr_steps_cooldown, - lr_policy_warmup, lr_policy_decay, lr_policy_cooldown) - lrs = [] - - for i in range( num_epochs*num_samples_per_epoch + lr_steps_warmup + lr_steps_cooldown + 1023): - optimizer.step() - lrs.append( optimizer.param_groups[0]['lr']) - scheduler.step() - - plt.plot(lrs, 'r') - plt.savefig( './plots/lr_schedule.png') - +import matplotlib.pyplot as plt +import numpy as np +import torch +from torch.optim.lr_scheduler import ExponentialLR, LinearLR, OneCycleLR + + +class LearningRateScheduler: + def __init__( + self, + optimizer, + batch_size, + world_size, + lr_start, + lr_max, + lr_final_decay, + lr_final, + n_steps_warmup, + n_steps_decay, + n_steps_cooldown, + policy_warmup, + policy_decay, + policy_cooldown, + step_contd=-1, + scaling_policy="sqrt", + ): + # ''' + # Three-phase learning rate schedule + + # optimizer : + # ''' + + # TODO: implement cool down mode that continues a run but performs just cooldown + # from current learning rate, see https://arxiv.org/abs/2106.04560 + + assert lr_final_decay >= lr_final + + self.optimizer = optimizer + self.batch_size = batch_size + self.world_size = world_size + + self.n_steps_warmup = n_steps_warmup + self.n_steps_decay = n_steps_decay + self.n_steps_cooldown = n_steps_cooldown + + if scaling_policy == "const": + kappa = 1 + elif scaling_policy == "sqrt": + kappa = np.sqrt(batch_size * self.world_size) + elif scaling_policy == "linear": + kappa = batch_size * self.world_size + else: + assert False, "unsupported learning rate policy" + + self.lr_max_scaled = kappa * lr_max + lr_final_decay_scaled = kappa * lr_final_decay + + self.policy_warmup = policy_warmup + self.policy_decay = policy_decay + self.policy_cooldown = policy_cooldown + + self.step_contd = step_contd + + # create learning rate schedulers + + ########################## + # warmup + if policy_warmup == "linear": + self.scheduler_warmup = LinearLR( + optimizer, + start_factor=lr_start / self.lr_max_scaled, + end_factor=1.0, + total_iters=n_steps_warmup, + ) + + elif policy_warmup == "cosine": + n_steps = n_steps_warmup + n_steps_decay + 1 + pct_start = n_steps_warmup / n_steps + self.scheduler_warmup = OneCycleLR( + optimizer, + max_lr=self.lr_max_scaled, + total_steps=n_steps, + pct_start=pct_start, + div_factor=self.lr_max_scaled / lr_start, + final_div_factor=lr_final_decay_scaled / lr_start, + ) + else: + if n_steps_warmup > 0: + assert False, "Unsupported warmup policy for learning rate scheduler" + + ########################## + # decay + if policy_decay == "linear": + self.scheduler_decay = LinearLR( + optimizer, + start_factor=1.0, + end_factor=lr_final_decay / self.lr_max_scaled, + total_iters=n_steps_decay, + ) + + elif policy_decay == "exponential": + gamma = np.power( + np.float64(lr_final_decay / self.lr_max_scaled), 1.0 / np.float64(n_steps_decay) + ) + self.scheduler_decay = ExponentialLR(optimizer, gamma=gamma) + + elif policy_decay == "cosine": + # OneCycleLR has global state so more work needed to have independent ones + assert policy_decay == policy_warmup + self.scheduler_decay = self.scheduler_warmup + + elif policy_decay == "sqrt": + self.decay_factor = self.lr_max_scaled * np.sqrt(n_steps_warmup) + self.scheduler_decay = None + + elif policy_decay == "constant": + self.decay_factor = 0.0 + self.scheduler_decay = None + + else: + assert False, "Unsupported decay policy for learning rate scheduler" + + ########################## + # cool down + if policy_cooldown == "linear": + self.scheduler_cooldown = LinearLR( + optimizer, + start_factor=lr_start / self.lr_max_scaled, + end_factor=lr_final / lr_final_decay if lr_final_decay > 0.0 else 0.0, + total_iters=n_steps_cooldown, + ) + # TODO: this overwrites the cosine scheduler for warmup (seems there are some global vars ) + # elif policy_cooldown == 'cosine' : + # self.scheduler_cooldown = torch.optim.lr_scheduler.OneCycleLR( + # optimizer, + # max_lr=lr_final_decay, + # total_steps=n_steps_cooldown, + # pct_start=0.0, + # ) + else: + if n_steps_cooldown > 0: + assert "Unsupported cooldown policy for learning rate scheduler" + + # set initial scheduler + self.cur_scheduler = self.scheduler_warmup if n_steps_warmup > 0 else self.scheduler_decay + + # explicitly track steps to be able to switch between optimizers + self.i_step = 0 + self.lr = self.cur_scheduler.get_last_lr() + + # advance manually to step_contd (last_mini_epoch parameter for schedulers is not working + # and this is also more brittle with the different phases) + # optimizer.step() as required by torch; + # won't have a material effect since grads are zero at this point + if self.step_contd > 0: + optimizer.step() + for _ in range(step_contd): + self.step() + + ####################################### + def step(self): + """ + Perform one step of learning rate schedule + """ + + # keep final learning rate + if self.i_step >= (self.n_steps_warmup + self.n_steps_decay + self.n_steps_cooldown): + return self.lr + + end_decay = self.n_steps_warmup + self.n_steps_decay + phase_decay = (self.i_step > self.n_steps_warmup) and (self.i_step <= end_decay) + + if self.policy_decay == "sqrt" and phase_decay: + self.lr = ( + (self.decay_factor / np.sqrt(self.i_step)) + if self.i_step > 0 + else self.lr_max_scaled + ) + for g in self.optimizer.param_groups: + g["lr"] = self.lr + elif self.policy_decay == "constant" and phase_decay: + cur_lr = self.lr + self.lr = self.lr_max_scaled + # make sure lr_max_scaled rate is used if warm-up end is not lr_max_scaled + if cur_lr < self.lr: + for g in self.optimizer.param_groups: + g["lr"] = self.lr + else: + self.cur_scheduler.step() + self.lr = self.cur_scheduler.get_last_lr()[0] + + # switch scheduler when learning rate regime completed + if self.i_step == self.n_steps_warmup: + self.cur_scheduler = self.scheduler_decay + str = f"Switching scheduler to {self.cur_scheduler} at scheduler step = {self.i_step}." + logging.getLogger("obslearn").info(str) + + # switch scheduler when learning rate completed + if self.i_step == self.n_steps_warmup + self.n_steps_decay: + self.cur_scheduler = self.scheduler_cooldown + str = f"Switching scheduler to {self.cur_scheduler} at scheduler step = {self.i_step}." + logging.getLogger("obslearn").info(str) + + self.i_step += 1 + + return self.lr + + ####################################### + def get_lr(self): + return self.lr + + ####################################### + @staticmethod + def plot(): + """ + Generate plot of learning rate schedule + + Use as LearningRateScheduler.plot() + """ + + num_mini_epochs = 42 + num_samples_per_mini_epoch = 4096 + + lr_start = 0.000001 + lr_max = 0.000015 + lr_final_decay = 0.000001 + lr_final = 0.0 + lr_steps_warmup = 256 + lr_steps_cooldown = 1024 + lr_steps_warmup = 256 + lr_steps_cooldown = 4096 + lr_policy_warmup = "cosine" + lr_policy_decay = "linear" + lr_policy_cooldown = "linear" + + model = torch.nn.Linear(2, 1) + optimizer = torch.optim.SGD(model.parameters(), lr=lr_max) + + scheduler = LearningRateScheduler( + optimizer, + 1, + 1, + lr_start, + lr_max, + lr_final_decay, + lr_final, + lr_steps_warmup, + num_mini_epochs * num_samples_per_mini_epoch, + lr_steps_cooldown, + lr_policy_warmup, + lr_policy_decay, + lr_policy_cooldown, + ) + lrs = [] + + for _ in range( + num_mini_epochs * num_samples_per_mini_epoch + + lr_steps_warmup + + lr_steps_cooldown + + 1023 + ): + optimizer.step() + lrs.append(optimizer.param_groups[0]["lr"]) + scheduler.step() + + plt.plot(lrs, "b") + # plt.savefig( './plots/lr_schedule.png') + + # second strategy for comparison + + lr_policy_decay = "cosine" + + model = torch.nn.Linear(2, 1) + optimizer = torch.optim.SGD(model.parameters(), lr=lr_max) + + scheduler = LearningRateScheduler( + optimizer, + 1, + 1, + lr_start, + lr_max, + lr_final_decay, + lr_final, + lr_steps_warmup, + num_mini_epochs * num_samples_per_mini_epoch, + lr_steps_cooldown, + lr_policy_warmup, + lr_policy_decay, + lr_policy_cooldown, + ) + lrs = [] + + for _ in range( + num_mini_epochs * num_samples_per_mini_epoch + + lr_steps_warmup + + lr_steps_cooldown + + 1023 + ): + optimizer.step() + lrs.append(optimizer.param_groups[0]["lr"]) + scheduler.step() + + plt.plot(lrs, "r") + plt.savefig("./plots/lr_schedule.png") diff --git a/src/weathergen/train/trainer.py b/src/weathergen/train/trainer.py index 1ec1dfbdb..1d40cabb7 100644 --- a/src/weathergen/train/trainer.py +++ b/src/weathergen/train/trainer.py @@ -1,4 +1,6 @@ -# (C) Copyright 2024 WeatherGenerator contributors. +# ruff: noqa: T201 + +# (C) Copyright 2025 WeatherGenerator contributors. # # This software is licensed under the terms of the Apache Licence Version 2.0 # which can be obtained at http://www.apache.org/licenses/LICENSE-2.0. @@ -6,784 +8,1037 @@ # In applying this licence, ECMWF does not waive the privileges and immunities # granted to it by virtue of its status as an intergovernmental organisation # nor does it submit to any jurisdiction. - -import os -import code +import itertools +import logging +import re import time -import string -import random -import functools +from pathlib import Path +from typing import Any -import math import numpy as np +import omegaconf import torch -import logging -from functools import partial - +import torch.nn as nn import tqdm - -import zarr - -import torch.distributed as dist -import torch.utils.data.distributed - -from torch.distributed.fsdp import FullyShardedDataParallel as FSDP -from torch.distributed.fsdp.fully_sharded_data_parallel import ( - CPUOffload, - ShardingStrategy, - MixedPrecision, - BackwardPrefetch, -) -from torch.distributed.fsdp.wrap import ( - size_based_auto_wrap_policy, - # default_auto_wrap_policy, - enable_wrap, - wrap, +from numpy.typing import NDArray +from omegaconf import OmegaConf +from torch import Tensor + +# FSDP2 +from torch.distributed.fsdp import ( + MixedPrecisionPolicy, + fully_shard, ) -from torch.distributed.fsdp import FullyShardedDataParallel as FSDP -from torch.distributed.fsdp import StateDictType, FullStateDictConfig, FullOptimStateDictConfig - -from weathergen.train.trainer_base import Trainer_Base -from weathergen.train.lr_scheduler import LearningRateScheduler +from torch.distributed.tensor import DTensor, distribute_tensor +import weathergen.common.config as config +from weathergen.common.config import Config from weathergen.datasets.multi_stream_data_sampler import MultiStreamDataSampler +from weathergen.datasets.stream_data import StreamData +from weathergen.model.attention import ( + MultiCrossAttentionHeadVarlen, + MultiCrossAttentionHeadVarlenSlicedQ, + MultiSelfAttentionHead, + MultiSelfAttentionHeadLocal, + MultiSelfAttentionHeadVarlen, +) +from weathergen.model.ema import EMAModel +from weathergen.model.layers import MLP from weathergen.model.model import Model, ModelParams -from weathergen.utils.config import Config -from weathergen.utils.logger import logger -from weathergen.utils.train_logger import TrainLogger -from weathergen.utils.validation_io import write_validation -from weathergen.train.utils import get_run_id - -import weathergen.train.loss as losses - - -class Trainer( Trainer_Base) : - - ########################################### - def __init__( self, log_freq = 20, checkpoint_freq = 250, print_freq = 10) : - Trainer_Base.__init__( self) - - assert print_freq < log_freq - self.log_freq = log_freq - self.checkpoint_freq = checkpoint_freq - self.print_freq = print_freq - - ########################################### - def init( self, cf, run_id_contd = None, epoch_contd = None, run_id_new=False, - run_mode='training') : - - self.cf = cf - - if isinstance( run_id_new, str) : - cf.run_id = run_id_new - elif run_id_new or cf.run_id is None: - cf.run_id = get_run_id() - elif run_id_contd is not None and run_id_new==False : - cf.run_id = run_id_contd - assert cf.run_id is not None - - assert cf.samples_per_epoch % cf.batch_size == 0 - assert cf.samples_per_validation % cf.batch_size_validation == 0 - - self.devices = self.init_torch() - - self.init_ddp( cf) - - # read configuration of data streams - cf = self.init_streams( cf, run_id_contd) - - # self.init_mlflow( cf, self.cf.rank, run_id_contd, run_id_new) - - # create output directory - path_run = './results/' + cf.run_id + '/' - path_model = './models/' + cf.run_id + '/' - if 0 == self.cf.rank : - os.makedirs( path_run, exist_ok=True) - os.makedirs( path_model, exist_ok=True) - # save config - cf.save() - if run_mode == 'training' : - cf.print() - self.path_run = path_run - - self.init_perf_monitoring() - - self.train_logger = TrainLogger( cf, self.path_run) - - # TODO: adapt to this info read from yaml files - # if self.cf.loss_chs is not None : - # self.loss_chs = [[l_ch[0] for l_ch in lcs] for lcs in self.cf.loss_chs] - # self.loss_chs_weights = [[l_ch[1] for l_ch in lcs] for lcs in self.cf.loss_chs] - - ########################################### - def evaluate( self, cf, run_id_trained, epoch, run_id_new=False) : - - # general initalization - self.init( cf, run_id_trained, epoch, run_id_new, run_mode='evaluate') - - self.dataset_val = MultiStreamDataSampler( cf.data_path, cf.rank, cf.num_ranks, cf.streams, - cf.start_date_val, cf.end_date_val, cf.len_hrs, - cf.step_hrs, cf.batch_size_validation, - cf.masking_mode, cf.masking_rate, cf.masking_rate_sampling, - cf.shuffle, - forecast_delta_hrs = cf.forecast_delta_hrs, - forecast_steps = cf.forecast_steps, - forecast_policy = cf.forecast_policy, - healpix_level = cf.healpix_level, - samples_per_epoch = cf.samples_per_validation, - input_window_steps = cf.input_window_steps, - embed_local_coords = cf.embed_local_coords, - embed_centroids_local_coords = cf.embed_centroids_local_coords, - target_coords_local = cf.target_coords_local, - sampling_rate_target = cf.sampling_rate_target ) - - loader_params = { 'batch_size': None, 'batch_sampler': None, 'shuffle': False, - 'num_workers': cf.loader_num_workers, 'pin_memory': True} - self.data_loader_validation = torch.utils.data.DataLoader( self.dataset_val, - **loader_params, sampler = None) - - num_channels = self.dataset_val.get_num_chs() - self.geoinfo_sizes = self.dataset_val.get_geoinfo_sizes() - - self.model = Model( cf, num_channels, self.geoinfo_sizes).create().to( self.devices[0]) - self.model.load( run_id_trained, epoch) - print( f'Loaded model {run_id_trained} at epoch {epoch}.') - self.ddp_model = self.model - self.model_params = ModelParams().create( cf).to( self.devices[0]) - logging.getLogger('obslearn').info( f'Loaded model id={run_id_trained} at epoch={epoch}.') - - self.loss_fcts_val = [] - for name, w in cf.loss_fcts_val : - self.loss_fcts_val += [ [ getattr( losses, name), w] ] - - # evaluate validation set - self.validate( epoch=0) - print( f'Finished evaluation run with id: {cf.run_id}') - - # mlflow.end_run() - - ########################################### - def evaluate_jac( self, cf, run_id, epoch, mode='row', - date=None, obs_id=0, sample_id=0) : - '''Computes a row or column of the Jacobian as determined by mode ('row' or 'col'), i.e. - determines sensitivities with respect to outputs or inputs - ''' - - # general initalization - self.init( cf, run_id, epoch, run_id_new=True, run_mode='offline') - - self.dataset = MultiStreamDataSampler( cf.streams, cf.start_date_val, cf.end_date_val, - cf.delta_time, 1, - cf.masking_mode, cf.masking_rate_sampling, - cf.t_win_hour, cf.loss_chs, - shuffle=False, source_chs=cf.source_chs, - forecast_steps = cf.forecast_steps, - forecast_policy = cf.forecast_policy, - healpix_level = cf.healpix_level ) - - num_channels = self.dataset.get_num_chs() - - self.model = Model( cf, num_channels).create().to( self.devices[0]) - self.model.load( run_id, epoch) - print( f'Loaded model id={run_id}.') - - # TODO: support loading of specific data - dataset_iter = iter(self.dataset) - (sources, targets, targets_idxs, s_lens) = next(dataset_iter) - - dev = self.devices[0] - sources = [source.to(dev,non_blocking=True) for source in sources] - targets = [[toks.to(dev,non_blocking=True) for toks in target] for target in targets] - - # evaluate model - with torch.autocast(device_type='cuda',dtype=torch.float16,enabled=cf.with_mixed_precision): - - if 'row' == mode : - sources_in = [*sources, s_lens.to(torch.float32)] - y = self.model( sources, s_lens ) - # vectors used to extract row from Jacobian - vs_sources = [torch.zeros_like(y_obs) for y_obs in y[0]] - vs_sources[obs_id][sample_id] = 1. - # evaluate - out = torch.autograd.functional.vjp( self.model.forward_jac, tuple(sources_in), - tuple(vs_sources) ) - - elif 'col' == mode : - # vectors used to extract col from Jacobian - vs_sources = [torch.zeros_like(s_obs) for s_obs in sources] - vs_sources[obs_id][sample_id] = 1. - vs_s_lens = torch.zeros_like( s_lens, dtype=torch.float32) - # provide one tuple in the end - sources_in = [*sources, s_lens.to(torch.float32)] - vs_sources.append(vs_s_lens) - # evaluate - out = torch.autograd.functional.jvp( self.model.forward_jac, tuple(sources_in), - tuple(vs_sources) ) - else : - assert False, 'Unsupported mode.' - - # extract and write output - # TODO: refactor and try to combine with the code in compute_loss - - preds = out[0] - jac = [j_obs.cpu().detach().numpy() for j_obs in out[1]] - - sources_all, preds_all = [[] for _ in cf.streams], [[] for _ in cf.streams] - targets_all, targets_coords_all = [[] for _ in cf.streams], [[] for _ in cf.streams] - targets_idxs_all = [[] for _ in cf.streams] - sources_lens = [toks.shape[0] for toks in sources] - targets_lens = [ [toks.shape[0] for toks in target] for target in targets] - - for i_obs, b_targets_idxs in enumerate(targets_idxs) : - for i_b, target_idxs_obs in enumerate( b_targets_idxs) : # 1 batch - - if len(targets[i_obs][i_b]) == 0 : - continue - - gs = self.cf.geoinfo_size - target_i_obs = torch.cat( [t[:,gs:].unsqueeze(0) for t in targets[i_obs][i_b]], 0) - preds_i_obs = preds[i_obs][target_idxs_obs] - preds_i_obs = preds_i_obs.reshape( [*preds_i_obs.shape[:2], *target_i_obs.shape[1:]]) - - if self.cf.loss_chs is not None : - if len( self.cf.loss_chs[i_obs]) == 0 : - continue - target_i_obs = target_i_obs[...,self.cf.loss_chs[i_obs]] - preds_i_obs = preds_i_obs[...,self.cf.loss_chs[i_obs]] - - ds_val = self.dataset - n = self.cf.geoinfo_size - - sources[i_obs][:,:,n:] = ds_val.denormalize_data( i_obs, sources[i_obs][:,:,n:]) - sources[i_obs][:,:,:n] = ds_val.denormalize_coords( i_obs, sources[i_obs][:,:,:n]) - sources_all[i_obs] += [ sources[i_obs].detach().cpu() ] - - preds_all[i_obs] += [ ds_val.denormalize_data( i_obs, preds_i_obs).detach().cpu() ] - targets_all[i_obs] += [ ds_val.denormalize_data( i_obs, target_i_obs).detach().cpu() ] - - target_i_coords = torch.cat( [t[:,:n].unsqueeze(0) for t in targets[i_obs][i_b]], 0).detach().cpu() - targets_coords_all[i_obs] += [ ds_val.denormalize_coords( i_obs, target_i_coords).detach().cpu() ] - targets_idxs_all[i_obs] += [ target_idxs_obs ] - - cols = [ds[0][0].colnames for ds in dataset_val.obs_datasets_norm] - write_validation( self.cf, self.path_run, self.cf.rank, epoch, cols, - sources_all, preds_all, targets_all, - targets_coords_all, targets_idxs_all, - sources_lens, targets_lens, jac) - - ########################################### - def run( self, cf, run_id_contd = None, epoch_contd = None, run_id_new = False) : - - # general initalization - self.init( cf, run_id_contd, epoch_contd, run_id_new) - - self.dataset = MultiStreamDataSampler( cf.data_path, cf.rank, cf.num_ranks, cf.streams, - cf.start_date, cf.end_date, cf.len_hrs, cf.step_hrs, - cf.batch_size, - cf.masking_mode, cf.masking_rate, cf.masking_rate_sampling, - shuffle=True, rng_seed=cf.data_loader_rng_seed, - forecast_delta_hrs = cf.forecast_delta_hrs, - forecast_steps = cf.forecast_steps, - forecast_policy = cf.forecast_policy, - healpix_level = cf.healpix_level, - samples_per_epoch=cf.samples_per_epoch, - input_window_steps = cf.input_window_steps, - embed_local_coords = cf.embed_local_coords, - embed_centroids_local_coords = cf.embed_centroids_local_coords, - target_coords_local = cf.target_coords_local, - sampling_rate_target = cf.sampling_rate_target) - self.dataset_val = MultiStreamDataSampler( cf.data_path, cf.rank, cf.num_ranks, cf.streams, - cf.start_date_val, cf.end_date_val, cf.len_hrs, - cf.step_hrs, cf.batch_size_validation, - cf.masking_mode, - # validation mode is always full forecasting - masking_rate=0.0, masking_rate_sampling=False, - shuffle=True, rng_seed=cf.data_loader_rng_seed, - forecast_delta_hrs = cf.forecast_delta_hrs, - forecast_steps = cf.forecast_steps, - forecast_policy = cf.forecast_policy, - healpix_level = cf.healpix_level, - samples_per_epoch=max( 32, cf.samples_per_validation//cf.num_ranks), - input_window_steps = cf.input_window_steps, - embed_local_coords = cf.embed_local_coords, - embed_centroids_local_coords = cf.embed_centroids_local_coords, - target_coords_local = cf.target_coords_local, - sampling_rate_target = cf.sampling_rate_target) - - loader_params = { 'batch_size': None, 'batch_sampler': None, 'shuffle': False, - 'num_workers': cf.loader_num_workers, 'pin_memory': True} - self.data_loader = torch.utils.data.DataLoader( self.dataset, **loader_params, sampler = None) - self.data_loader_validation = torch.utils.data.DataLoader( self.dataset_val, - **loader_params, sampler = None) - - num_channels = self.dataset.get_num_chs() - self.geoinfo_sizes = self.dataset.get_geoinfo_sizes() - - self.model = Model( cf, num_channels, self.geoinfo_sizes).create() - # load model if specified - if run_id_contd is not None : - self.model.load( run_id_contd, epoch_contd) - print( f'Loaded model id={run_id_contd}.') - - if cf.forecast_freeze_model : - self.model = self.model.freeze_weights_forecast() - - self.model = self.model.to( self.devices[0]) - - if cf.compile_model : - self.model = torch.compile( self.model, dynamic=True) - - self.ddp_model = self.model - if cf.with_ddp and not cf.with_fsdp : - self.ddp_model = torch.nn.parallel.DistributedDataParallel( self.model, - broadcast_buffers=True, - find_unused_parameters=True, - gradient_as_bucket_view=True, - bucket_cap_mb=512) - - if cf.with_ddp and cf.with_fsdp : - mp = None if not cf.with_mixed_precision else MixedPrecision( param_dtype=torch.float16, - cast_forward_inputs=True) - mp = None - self.ddp_model = FSDP( self.model, auto_wrap_policy=size_based_auto_wrap_policy, - sharding_strategy=ShardingStrategy.FULL_SHARD, - cpu_offload=None, - sync_module_states=(run_id_contd is not None), - mixed_precision=mp) - - self.model_params = ModelParams().create( cf).to('cuda') - - # if with_fsdp then parameter count is unreliable - if (0 == self.cf.rank and not cf.with_fsdp) or not cf.with_ddp : - self.model.print_num_parameters() - - # TODO: learning rate schedule - # https://www.cs.princeton.edu/~smalladi/blog/2024/01/22/SDEs-ScalingRules/ - kappa = (cf.batch_size * cf.num_ranks) - beta1 = max( 0.5, 1. - kappa*(1. - 0.9)) - beta2 = 1. - kappa*(1. - 0.999) - eps = 1e-08 / np.sqrt(kappa) - # beta1, beta2, eps = 0.125, 0.125, 1e-08 - self.optimizer = torch.optim.AdamW( self.ddp_model.parameters(), lr=cf.lr_start, - weight_decay=cf.weight_decay, - betas=(beta1,beta2), eps=eps) - self.grad_scaler = torch.amp.GradScaler('cuda') - - # lr is updated after each batch so account for this - cf.lr_steps = int( (len(self.dataset) * cf.num_epochs) / cf.batch_size ) - steps_decay = cf.lr_steps - cf.lr_steps_warmup - cf.lr_steps_cooldown - # ensure that steps_decay has a reasonable value - if steps_decay < int(0.2 * cf.lr_steps) : - cf.lr_steps_warmup = int( 0.1 * cf.lr_steps) - cf.lr_steps_cooldown = int( 0.05 * cf.lr_steps) - steps_decay = cf.lr_steps - cf.lr_steps_warmup - cf.lr_steps_cooldown - str =f'cf.lr_steps_warmup and cf.lr_steps_cooldown were larger than cf.lr_steps={cf.lr_steps}' - str += '. The value have been adjusted to cf.lr_steps_warmup={cf.lr_steps_warmup} and ' - str += ' cf.lr_steps_cooldown={cf.lr_steps_cooldown} so that steps_decay={steps_decay}.' - logging.getLogger('obslearn').warning( f'') - self.lr_scheduler = LearningRateScheduler( self.optimizer, cf.batch_size, cf.num_ranks, - cf.lr_start, cf.lr_max, cf.lr_final_decay,cf.lr_final, - cf.lr_steps_warmup, steps_decay, cf.lr_steps_cooldown, - cf.lr_policy_warmup, cf.lr_policy_decay, - cf.lr_policy_cooldown, cf.istep, - cf.lr_scaling_policy ) - - if self.cf.istep > 0 and 0 == self.cf.rank : - str = f'Continuing run with learning rate: {self.lr_scheduler.get_lr()}' - logging.getLogger('obslearn').info( str) - - # get function handles for loss function terms - self.loss_fcts = [ [ getattr( losses, name), w] for name, w in cf.loss_fcts ] - self.loss_fcts_val = [ [ getattr( losses, name), w] for name, w in cf.loss_fcts_val] - - # recover epoch when continuing run - epoch_base = int( self.cf.istep / len(self.data_loader)) - - # torch.autograd.set_detect_anomaly(True) - if cf.forecast_policy is not None : - torch._dynamo.config.optimize_ddp=False - - # training loop - - # validate once at the beginning as reference - if cf.val_initial : - self.validate( -1) - - for epoch in range( epoch_base, cf.num_epochs) : - - self.train( epoch) - - self.validate( epoch) - - self.save_model( epoch) - - # log final model - self.save_model( self.num_epochs) - - ########################################### - def compute_loss( self, loss_fcts, sources, targets, targets_coords, targets_token_lens, preds, - losses_all, stddev_all, - preds_all = None, targets_all = None, - targets_coords_all = None, targets_lens = None, - mode='training') : - - rng = np.random.default_rng() - - # merge across batch dimension (and keep streams and ) - targets_rt = [[torch.cat([t[i] for t in targets[fstep]]) for i in range(len(targets[0][0]))] - for fstep in range(len(targets))] - targets_coords_rt = [[torch.cat([t[i] for t in targets_coords[fstep]]) - for i in range(len(targets_coords[0][0]))] - for fstep in range(len(targets_coords))] - targets_token_lens = [[torch.cat([t[i] for t in targets_token_lens[fstep]]) - for i in range(len(targets_token_lens[0][0]))] - for fstep in range(len(targets_token_lens))] - - ctr = 0 - loss = torch.tensor( 0., device=self.devices[0], requires_grad=True) - - # assert len(targets_rt) == len(preds) and len(preds) == len(self.cf.streams) - for fstep in range(len(targets_rt)) : - for i_obs, (target, target_coords2, si) in enumerate( zip(targets_rt[fstep], - targets_coords_rt[fstep], - self.cf.streams)) : - - pred = preds[fstep][i_obs] - - gs = self.geoinfo_sizes[i_obs] - num_channels = target[ ... , gs : ].shape[-1] - - # set obs_loss_weight = 1. when not specified - obs_loss_weight = si['loss_weight'] if 'loss_weight' in si else 1. - channel_loss_weight = si['channel_weight'] if 'channel_weight' in si else np.ones(num_channels) - # in validation mode, always unweighted loss is computed - obs_loss_weight = 1. if mode=='validation' else obs_loss_weight - channel_loss_weight = np.ones(num_channels) if mode=='validation' else channel_loss_weight - - tok_spacetime = si['tokenize_spacetime'] if 'tokenize_spacetime' in si else False - - if target.shape[0] > 0 and pred.shape[0] > 0 : - - # extract content if tokens have been padded - if targets_token_lens[fstep][i_obs].shape[0] > 0 : - sl = targets_token_lens[fstep][i_obs].to(torch.int64) # TODO: why is it sometimes not torch.int - tro_type = si['target_readout']['type'] if 'type' in si['target_readout'] else 'token' - if tro_type=='token' : - pred = pred.reshape( [ *pred.shape[:2], target.shape[-2], target.shape[-1]-gs]) - pred = torch.cat( [pred[:,i,:l] for i,l in enumerate(sl)], 1) - else : - pred = pred.reshape( [pred.shape[0], -1, target.shape[-1]-gs]) - # extract data/coords and remove token dimension if it exists - target_coords = target[ ... , : gs].flatten(0, -2) - target_coords[:,1:3] = target_coords2[ ... , 1:3] # copy local time - target_data = target[ ... , gs : ].flatten( 0, -2) - pred = pred.reshape( [pred.shape[0], *target_data.shape]) - - mask_nan = ~torch.isnan( target_data) - - assert pred.shape[1] > 0 - if pred[:,mask_nan].shape[1] == 0 : - continue - ens = pred.shape[0] > 1 - - # accumulate loss from different loss functions and channels - for j, (loss_fct, w) in enumerate( loss_fcts) : - - # compute per channel loss - # val_uw is unweighted loss for logging - val, val_uw, ctr = torch.tensor( 0., device=self.devices[0], requires_grad=True), 0., 0. - for i in range(target_data.shape[-1]) : - - if tok_spacetime : - # iterate over time steps and compute loss separately for each - t_unique = torch.unique( target_coords[:,1]) - # tw = np.linspace( 1.0, 2.0, len(t_unique)) - for jj, t in enumerate( t_unique) : - # if jj < len(t_unique)//2 and rng.uniform() < 0.5 and mode!='validation': - # continue - mask_t = t == target_coords[:,1] - mask = torch.logical_and( mask_t, mask_nan[:,i]) - if mask.sum().item() > 0 : - temp = loss_fct( target_data[mask,i], pred[:,mask,i], - pred[:,mask,i].mean(0), - pred[:,mask,i].std(0) if ens else torch.zeros( 1) ) - val_uw += temp.item() - val = val + channel_loss_weight[i] * temp # * tw[jj] - ctr += 1 - - else : - # only compute loss is there are non-NaN values - if mask_nan[:,i].sum().item() > 0 : - temp = loss_fct( target_data[mask_nan[:,i],i], pred[:,mask_nan[:,i],i], - pred[:,mask_nan[:,i],i].mean(0), - pred[:,mask_nan[:,i],i].std(0) if ens else torch.zeros( 1) ) - val_uw += temp.item() - val = val + channel_loss_weight[i] * temp - ctr += 1 - val = val / ctr if (ctr > 0) else val - val_uw = val_uw / ctr if (ctr > 0) else val_uw - - losses_all[j,i_obs] = val_uw - if self.cf.loss_fcts[j][0] == 'stats' or self.cf.loss_fcts[j][0] == 'kcrps' : - stddev_all[i_obs] = pred[:,mask_nan].std(0).mean().item() - # ignore NaNs so that training can continue even if one pred-net diverges - loss = loss + ((w * val * obs_loss_weight) if not torch.isnan(val) - else torch.tensor(0., requires_grad=True)) - ctr += 1 - - # log data for analysis - if preds_all is not None: - - targets_lens[i_obs] += [ target_data.shape[0] ] - dn_data, dn_coords = self.dataset_val.denormalize_data,self.dataset_val.denormalize_coords - - fp32 = torch.float32 - preds_all[i_obs] += [ dn_data( i_obs, pred.to(fp32), False).detach().cpu() ] - targets_all[i_obs] += [ dn_data( i_obs, target_data.to(fp32), False).detach().cpu()] - targets_coords_all[i_obs] += [ dn_coords( i_obs, target_coords.to(fp32)).detach().cpu() ] - - return loss / ctr - - ########################################### - def train( self, epoch) : - - cf = self.cf - self.ddp_model.train() - - dataset_iter = iter( self.data_loader) - - self.optimizer.zero_grad() - self.losses_hist, self.stddev_hist = [], [] - - # training loop - self.t_start = time.time() - for bidx, data in enumerate(dataset_iter) : - - data = self.input_to_device( data) - (_, source_tokens_cells, source_tokens_lens, source_centroids, source_cell_lens, source_idxs_embed, - target_tokens, target_token_lens, targets_coords, targets_coords_lens, targets_coords_idxs, forecast_dt) = data - - losses_all = torch.ones( (len(self.loss_fcts_val), len(cf.streams)) ) * torch.nan - stddev_all = torch.zeros( len(cf.streams)) * torch.nan - - # evaluate model - with torch.autocast(device_type='cuda', dtype=torch.float16, enabled=cf.with_mixed_precision): - - preds = self.ddp_model( self.model_params, source_tokens_cells, source_tokens_lens, source_centroids, - source_cell_lens, source_idxs_embed, - targets_coords, targets_coords_lens, targets_coords_idxs, forecast_dt) - - loss = self.compute_loss( self.loss_fcts, source_tokens_cells, target_tokens, targets_coords, - target_token_lens, preds, losses_all, stddev_all) - - # backward pass - self.grad_scaler.scale(loss).backward() - - # gradient clipping - self.grad_scaler.unscale_( self.optimizer) - torch.nn.utils.clip_grad_norm_( self.ddp_model.parameters(), max_norm=cf.grad_clip) - - # optimizer step - self.grad_scaler.step(self.optimizer) - self.grad_scaler.update() - self.optimizer.zero_grad() - - # update learning rate - self.lr_scheduler.step() - - self.losses_hist += [ losses_all ] - self.stddev_hist += [ stddev_all ] - - perf_gpu, perf_mem = self.get_perf() - self.perf_gpu = self.ddp_average( torch.tensor( [perf_gpu])).item() - self.perf_mem = self.ddp_average( torch.tensor( [perf_mem])).item() - - self.log_terminal( bidx, epoch) - self.log( bidx, epoch) - # model checkpoint - if (bidx % self.checkpoint_freq == 0) : - self.save_model() - - self.cf.istep += cf.batch_size - - self.dataset.advance() - - ########################################### - def validate( self, epoch) : - - cf = self.cf - self.ddp_model.eval() - - dataset_val_iter = iter(self.data_loader_validation) - self.losses_hist, self.stddev_hist = [], [] - - with torch.no_grad(): - # print progress bar but only in interactive mode, i.e. when without ddp - with tqdm.tqdm(total=len(self.data_loader_validation), disable=self.cf.with_ddp) as pbar: - for bidx, data in enumerate(dataset_val_iter) : - - data = self.input_to_device( data) - (sources, source_tokens_cells, source_tokens_lens, source_centroids, source_cell_lens, source_idxs_embed, - target_tokens, target_token_lens, targets_coords, targets_coords_lens, targets_coords_idxs, forecast_dt) = data - - losses_all = torch.ones( (len(self.loss_fcts_val), len(cf.streams)) ) * torch.nan - stddev_all = torch.zeros( len(cf.streams)) * torch.nan - - # evaluate model - with torch.autocast(device_type='cuda', dtype=torch.float16, enabled=cf.with_mixed_precision): - preds = self.ddp_model( self.model_params, source_tokens_cells, source_tokens_lens, source_centroids, - source_cell_lens, source_idxs_embed, - targets_coords, targets_coords_lens, targets_coords_idxs, forecast_dt) - - # compute loss and log output - if bidx < cf.log_validation : - - preds_all = [[] for _ in cf.streams] - targets_all, targets_coords_all = [[] for _ in cf.streams], [[] for _ in cf.streams] - targets_lens = [[] for _ in cf.streams] - - self.compute_loss( self.loss_fcts_val, source_tokens_cells, - target_tokens, targets_coords, target_token_lens, preds, - losses_all, stddev_all, preds_all, targets_all, - targets_coords_all, targets_lens, - mode='validation') - - cols = [ds[0][0].colnames for ds in self.dataset_val.obs_datasets_norm] - write_validation( self.cf, self.path_run, self.cf.rank, epoch, cols, - sources, preds_all, targets_all, targets_coords_all, targets_lens) - - else : - - self.compute_loss( self.loss_fcts_val, source_tokens_cells, target_tokens, - targets_coords, target_token_lens, preds, losses_all, stddev_all, - mode='validation') - - self.losses_hist += [ losses_all ] - self.stddev_hist += [ stddev_all ] - - pbar.update( self.cf.batch_size_validation) - - - losses_all = self.ddp_average( torch.stack( self.losses_hist).to(torch.float64).nanmean(0)) - stddev_all = self.ddp_average( torch.stack( self.stddev_hist).to(torch.float64).nanmean(0)) - - if 0 == self.cf.rank and self.cf.istep >= 0: - loss_dict = {} - for j, (lname,_) in enumerate(cf.loss_fcts_val) : - loss_dict[f'validation {lname}'] = torch.nanmean(losses_all[j]).item() - loss_dict['validation std_dev'] = torch.nanmean(stddev_all.mean()).item() - for i_obs, rt in enumerate( cf.streams) : - loss_dict['validation {}'.format(rt['name'].replace(',',''))] = float(losses_all[0,i_obs]) - - # mlflow.log_metrics( loss_dict, step=(cf.istep * cf.batch_size * cf.num_ranks)) - # add data to plain logger - samples = cf.istep * cf.batch_size * cf.num_ranks - self.train_logger.add_val( samples, losses_all, stddev_all) - - if 0 == self.cf.rank : - print( 'validation ({}) : {:03d} : loss = {:.4E}'.format( cf.run_id, epoch, - torch.nanmean(losses_all[0])), flush=True) - for i_obs, rt in enumerate( cf.streams) : - print('{}'.format(rt['name'])+f' : {losses_all[0,i_obs]:0.4E}') - - - # avoid that there is a systematic bias in the validation subset - self.dataset_val.advance() - - ########################################### - def input_to_device( self, data) : - - (source, source_tokens_cells, source_tokens_lens, source_centroids, source_cell_lens, source_idxs_embed, - target_tokens, target_token_lens, targets_coords, targets_coords_lens, targets_coords_idxs, forecast_dt) = data - - dev = self.devices[0] - - # source data - source_tokens_cells =[[s.to(dev,non_blocking=True) for s in ss] for ss in source_tokens_cells] - source_centroids = [[c.to( dev,non_blocking=True) for c in cb] for cb in source_centroids] - source_cell_lens = source_cell_lens.to( dev, non_blocking=True) - source_tokens_lens = source_tokens_lens.to( dev, non_blocking=True) - source_idxs_embed[0] = [[s.to(dev,non_blocking=True) for s in ss] for ss in source_idxs_embed[0]] - - # target data - targets_coords = [[[t.to(dev,non_blocking=True) for t in tt] for tt in ttt] for ttt in targets_coords] - target_tokens = [[[t.to(dev,non_blocking=True) for t in tt] for tt in ttt] for ttt in target_tokens] - targets_coords_idxs[0] = [[s.to(dev,non_blocking=True) for s in ss] for ss in targets_coords_idxs[0]] - targets_coords_idxs[1] = [[s.to(dev,non_blocking=True) for s in ss] for ss in targets_coords_idxs[1]] - - return (source, source_tokens_cells, source_tokens_lens, source_centroids, source_cell_lens, source_idxs_embed, - target_tokens, target_token_lens, targets_coords, targets_coords_lens, targets_coords_idxs, forecast_dt) - - ########################################### - def save_model( self, epoch=-1, name=None) : - - file_out = './models/' + self.cf.run_id + '/{}_'.format( self.cf.run_id) - file_out += 'latest' if epoch==-1 else 'epoch{:05d}'.format( epoch) - file_out += ('_' + name) if name is not None else '' - file_out += '{}.chkpt' - - if self.cf.with_ddp and self.cf.with_fsdp : - cfg = FullStateDictConfig( offload_to_cpu=True, rank0_only=True) - with FSDP.state_dict_type( self.ddp_model, StateDictType.FULL_STATE_DICT, - FullStateDictConfig(rank0_only=True, offload_to_cpu=True)): - state = self.ddp_model.state_dict() - else : - state = self.ddp_model.state_dict() - - if 0 == self.cf.rank : - # save temp file (slow) - torch.save( state, file_out.format('_temp')) - # move file (which is changing the link in the file system and very fast) - os.replace( file_out.format('_temp'), file_out.format('')) - # save config - self.cf.save( epoch) - - ########################################### - def log( self, bidx, epoch) : - - if bidx % self.log_freq == 0 and bidx > 0 : - - l_avg = self.ddp_average( torch.nanmean( torch.stack( self.losses_hist), axis=0)) - stddev_avg = self.ddp_average( torch.nanmean( torch.stack( self.stddev_hist), axis=0)) - samples = self.cf.istep * self.cf.batch_size * self.cf.num_ranks - - if 0 == self.cf.rank : - - # mlflow logging - loss_dict = { 'training mse': float(torch.nanmean(l_avg[0])), - 'lr' : self.lr_scheduler.get_lr() } - for i_obs, rt in enumerate( self.cf.streams) : - loss_dict['training {}'.format(rt['name'].replace(',',''))] = float(l_avg[0,i_obs]) - # mlflow.log_metrics( loss_dict, step=samples) - - # plain logger - self.train_logger.add_train( samples, self.lr_scheduler.get_lr(), l_avg, stddev_avg, - self.perf_gpu, self.perf_mem) - - self.losses_hist, self.stddev_hist = [], [] - - ########################################### - def log_terminal( self, bidx, epoch) : - - if bidx % self.print_freq == 0 and bidx > 0 : - - # compute from last iteration - nanmean = torch.nanmean - l_avg = self.ddp_average( nanmean( torch.stack(self.losses_hist[-self.print_freq:]),axis=0)) - - if 0 == self.cf.rank : - - # samples per sec - dt = time.time() - self.t_start - pstr = '{:03d} : {:05d}/{:05d} : {:06d} : loss = {:.4E} ' - pstr += '(lr={:.2E}, s/sec={:.3f})' - len_dataset = len(self.data_loader) // self.cf.batch_size - print( pstr.format( epoch, bidx, len_dataset, self.cf.istep, - np.nanmean(l_avg[0]), self.lr_scheduler.get_lr(), - (self.print_freq*self.cf.batch_size) / dt ), flush=True) - print( '\t', end='') - for i_obs, rt in enumerate( self.cf.streams) : - print('{}'.format(rt['name'])+f' : {l_avg[0,i_obs]:0.4E} \t', end='') - print( '\n', flush=True) - - self.t_start = time.time() +from weathergen.model.utils import freeze_weights +from weathergen.train.loss_calculator import LossCalculator +from weathergen.train.lr_scheduler import LearningRateScheduler +from weathergen.train.trainer_base import TrainerBase +from weathergen.utils.distributed import all_gather_vlen, ddp_average, is_root +from weathergen.utils.train_logger import TRAIN, VAL, Stage, TrainLogger +from weathergen.utils.utils import get_dtype +from weathergen.utils.validation_io import write_output + +logger = logging.getLogger(__name__) + + +class Trainer(TrainerBase): + def __init__(self, train_log_freq: Config): + TrainerBase.__init__(self) + + self.train_log_freq = train_log_freq + + def init(self, cf: Config, devices): + self.cf = OmegaConf.merge( + OmegaConf.create( + { + "latent_noise_kl_weight": 0.0, + "latent_noise_gamma": 2.0, + "latent_noise_use_additive_noise": False, + "latent_noise_deterministic_latents": True, + "latent_noise_saturate_encodings": 5, + } + ), + cf, + ) + cf = self.cf + + self.freeze_modules = cf.get("freeze_modules", "") + + assert cf.samples_per_mini_epoch % cf.batch_size_per_gpu == 0 + assert cf.samples_per_validation % cf.batch_size_validation_per_gpu == 0 + config.validate_forecast_policy_and_steps(cf=cf) + + self.mixed_precision_dtype = get_dtype(cf.attention_dtype) + + self.devices = devices + + # Get world_size of previous, to be continued run before + # world_size gets overwritten by current setting during init_ddp() + self.world_size_original = cf.get("world_size", None) + + self.log_grad_norms = cf.get("log_grad_norms", False) + + # create output directory + if is_root(): + config.get_path_run(cf).mkdir(exist_ok=True, parents=True) + config.get_path_model(cf).mkdir(exist_ok=True, parents=True) + + self.init_perf_monitoring() + self.train_logger = TrainLogger(cf, config.get_path_run(self.cf)) + + def init_model_and_shard(self, cf, run_id_contd, mini_epoch_contd, devices): + sources_size = self.dataset.get_sources_size() + targets_num_channels = self.dataset.get_targets_num_channels() + targets_coords_size = self.dataset.get_targets_coords_size() + + if cf.with_ddp and cf.with_fsdp: + with torch.device("meta"): + model = Model(cf, sources_size, targets_num_channels, targets_coords_size).create() + else: + model = Model(cf, sources_size, targets_num_channels, targets_coords_size).create() + model = model.to("cuda") + + # freeze request model part + for name, module in model.named_modules(): + name = module.name if hasattr(module, "name") else name + # avoid the whole model element which has name '' + if name == "": + continue + if re.fullmatch(self.freeze_modules, name) is not None: + freeze_weights(module) + + if cf.with_ddp and not cf.with_fsdp: + # create DDP model if running without FSDP + model = torch.nn.parallel.DistributedDataParallel( + model, + broadcast_buffers=True, + find_unused_parameters=True, + gradient_as_bucket_view=True, + bucket_cap_mb=512, + ) + + elif cf.with_ddp and cf.with_fsdp: + # with DDP *and() FSDP + fsdp_kwargs = { + "mp_policy": ( + MixedPrecisionPolicy( + param_dtype=self.mixed_precision_dtype, + reduce_dtype=torch.float32, + ) + if cf.with_mixed_precision + else None + ), + } + modules_to_shard = ( + MLP, + MultiSelfAttentionHeadLocal, + MultiSelfAttentionHead, + MultiCrossAttentionHeadVarlen, + MultiCrossAttentionHeadVarlenSlicedQ, + MultiSelfAttentionHeadVarlen, + ) + + for module in model.ae_local_engine.ae_local_blocks.modules(): + if isinstance(module, modules_to_shard): + fully_shard(module, **fsdp_kwargs) + + for module in model.ae_local_global_engine.ae_adapter.modules(): + if isinstance(module, modules_to_shard): + fully_shard(module, **fsdp_kwargs) + + for module in model.ae_global_engine.ae_global_blocks.modules(): + if isinstance(module, modules_to_shard): + fully_shard(module, **fsdp_kwargs) + + for module in model.forecast_engine.fe_blocks.modules(): + if isinstance(module, modules_to_shard): + fully_shard(module, **fsdp_kwargs) + + full_precision_fsdp_kwargs = { + "mp_policy": ( + MixedPrecisionPolicy( + param_dtype=torch.float32, + reduce_dtype=torch.float32, + ) + if cf.with_mixed_precision + else None + ), + } + for module in model.pred_adapter_kv.modules(): + if isinstance(module, modules_to_shard): + fully_shard(module, **full_precision_fsdp_kwargs) + + for module in model.target_token_engines.modules(): + if isinstance(module, modules_to_shard): + fully_shard(module, **full_precision_fsdp_kwargs) + + model_params = ModelParams(cf).create(cf) + + if cf.with_ddp and cf.with_fsdp: + fully_shard(model) + for tensor in itertools.chain(model.parameters(), model.buffers()): + assert tensor.device == torch.device("meta") + + # For reasons we do not yet fully understand, when using train continue in some + # instances, FSDP2 does not register the forward_channels and forward_columns + # functions in the embedding engine as forward functions. Thus, yielding a crash + # because the input tensors are not converted to DTensors. This seems to primarily + # occur during validation. + for embed in model.embed_engine.embeds: + torch.distributed.fsdp.register_fsdp_forward_method(embed, "forward_channels") + torch.distributed.fsdp.register_fsdp_forward_method(embed, "forward_columns") + + # complete initalization and load model if inference/continuing a run + if run_id_contd is None: + if cf.with_ddp and cf.with_fsdp: + model.to_empty(device="cuda") + if cf.with_fsdp: + model.reset_parameters() + else: + if is_root(): + logger.info( + f"Continuing run with id={run_id_contd} at mini_epoch {mini_epoch_contd}." + ) + model = self.load_model(model, run_id_contd, mini_epoch_contd) + model_params.reset_parameters(cf) + model_params = model_params.to(self.device) + + return model, model_params + + def inference(self, cf, devices, run_id_contd, mini_epoch_contd): + # general initalization + self.init(cf, devices) + + cf = self.cf + self.device_type = torch.accelerator.current_accelerator() + self.device = torch.device(f"{self.device_type}:{cf.local_rank}") + self.ema_model = None + + # create data loader + # only one needed since we only run the validation code path + self.dataset = MultiStreamDataSampler( + cf, + cf.start_date_val, + cf.end_date_val, + cf.batch_size_validation_per_gpu, + cf.samples_per_validation, + stage=VAL, + shuffle=cf.shuffle, + ) + self.dataset_val = self.dataset + + # make sure number of loaders does not exceed requested samples + loader_num_workers = min(cf.samples_per_validation, cf.loader_num_workers) + loader_params = { + "batch_size": None, + "batch_sampler": None, + "shuffle": False, + "num_workers": loader_num_workers, + "pin_memory": True, + } + self.data_loader_validation = torch.utils.data.DataLoader( + self.dataset, **loader_params, sampler=None + ) + + self.model, self.model_params = self.init_model_and_shard( + cf, run_id_contd, mini_epoch_contd, devices + ) + + self.loss_calculator_val = LossCalculator(cf=cf, stage=VAL, device=self.devices[0]) + + if is_root(): + config.save(self.cf, mini_epoch=0) + + logger.info(f"Starting inference with id={self.cf.run_id}.") + + # inference validation set + self.validate(mini_epoch=0) + logger.info(f"Finished inference run with id: {cf.run_id}") + + def run(self, cf, devices, run_id_contd=None, mini_epoch_contd=None): + # general initalization + self.init(cf, devices) + cf = self.cf + + # TODO: do not define new members outside of the init!! + self.device_type = torch.accelerator.current_accelerator() + self.device = torch.device(f"{self.device_type}:{cf.local_rank}") + + # create data loaders + self.dataset = MultiStreamDataSampler( + cf, + cf.start_date, + cf.end_date, + cf.batch_size_per_gpu, + cf.samples_per_mini_epoch, + stage=TRAIN, + shuffle=cf.shuffle, + ) + self.dataset_val = MultiStreamDataSampler( + cf, + cf.start_date_val, + cf.end_date_val, + cf.batch_size_validation_per_gpu, + cf.samples_per_validation, + stage=VAL, + shuffle=True, + ) + + loader_params = { + "batch_size": None, + "batch_sampler": None, + "shuffle": False, + "num_workers": cf.loader_num_workers, + "pin_memory": True, + } + self.data_loader = torch.utils.data.DataLoader(self.dataset, **loader_params, sampler=None) + self.data_loader_validation = torch.utils.data.DataLoader( + self.dataset_val, **loader_params, sampler=None + ) + + self.model, self.model_params = self.init_model_and_shard( + cf, run_id_contd, mini_epoch_contd, devices + ) + + if cf.compile_model: + self.model = torch.compile(self.model, dynamic=True) + + self.validate_with_ema = cf.get("validate_with_ema", False) + self.ema_model = None + if self.validate_with_ema: + meta_ema = self.init_model_and_shard(cf, run_id_contd, mini_epoch_contd, devices)[0] + self.ema_model = EMAModel( + self.model, + meta_ema, + halflife_steps=cf.get("ema_halflife_in_thousands", 1e-3), + rampup_ratio=cf.get("ema_ramp_up_ratio", 0.09), + is_model_sharded=(cf.with_ddp and cf.with_fsdp), + ) + + # if with_fsdp then parameter count is unreliable + if is_root() and not cf.with_fsdp and not cf.with_ddp: + self.model.print_num_parameters() + + # https://www.cs.princeton.edu/~smalladi/blog/2024/01/22/SDEs-ScalingRules/ + # aiming for beta1=0.9 and beta2=0.95 following the MAE paper https://arxiv.org/pdf/2111.06377 + kappa = ( + cf.batch_size_per_gpu * cf.world_size + ) # I doubt this holds for us from some anecdotal runs + beta1 = max( + 0.5, 1.0 - kappa * (1.0 - 0.975) + ) # aiming for beta1 = 0.9 at one node, ie kappa=B=4 + beta2 = 1.0 - kappa * (1.0 - 0.9875) # aiming for beta2 = 0.95 at one node, ie B=4 + eps = 2e-08 / np.sqrt(kappa) + + self.optimizer = torch.optim.AdamW( + self.model.parameters(), + lr=cf.lr_start, + weight_decay=cf.weight_decay, + betas=(beta1, beta2), + eps=eps, + ) + self.grad_scaler = torch.amp.GradScaler("cuda") + + assert len(self.dataset) > 0, f"No data found in {self.dataset}" + + # lr is updated after each batch so account for this + # TODO: conf should be read-only, do not modify the conf in flight + cf.lr_steps = int((len(self.dataset) * cf.num_mini_epochs) / cf.batch_size_per_gpu) + + steps_decay = cf.lr_steps - cf.lr_steps_warmup - cf.lr_steps_cooldown + if is_root(): + logger.debug(f"steps_decay={steps_decay} lr_steps={cf.lr_steps}") + # ensure that steps_decay has a reasonable value + if steps_decay < int(0.2 * cf.lr_steps): + cf.lr_steps_warmup = int(0.1 * cf.lr_steps) + cf.lr_steps_cooldown = int(0.05 * cf.lr_steps) + steps_decay = cf.lr_steps - cf.lr_steps_warmup - cf.lr_steps_cooldown + s = ( + "cf.lr_steps_warmup and cf.lr_steps_cooldown", + f" were larger than cf.lr_steps={cf.lr_steps}", + ) + s += ( + f". The value have been adjusted to cf.lr_steps_warmup={cf.lr_steps_warmup} and ", + ) + s += ( + f" cf.lr_steps_cooldown={cf.lr_steps_cooldown} so that steps_decay={steps_decay}.", + ) + if is_root(): + logger.warning(s) + self.lr_scheduler = LearningRateScheduler( + self.optimizer, + cf.batch_size_per_gpu, + cf.world_size, + cf.lr_start, + cf.lr_max, + cf.lr_final_decay, + cf.lr_final, + cf.lr_steps_warmup, + steps_decay, + cf.lr_steps_cooldown, + cf.lr_policy_warmup, + cf.lr_policy_decay, + cf.lr_policy_cooldown, + cf.istep, + cf.lr_scaling_policy, + ) + + if self.cf.istep > 0 and is_root(): + str = f"Continuing run with learning rate: {self.lr_scheduler.get_lr()}" + if is_root(): + logger.info(str) + + # Instantiate loss calculator modules to compute losses + self.loss_calculator = LossCalculator(cf=cf, stage=TRAIN, device=self.device) + self.loss_calculator_val = LossCalculator(cf=cf, stage=VAL, device=self.device) + + # recover mini_epoch when continuing run + if self.world_size_original is None: + mini_epoch_base = int(self.cf.istep / len(self.data_loader)) + else: + len_per_rank = ( + len(self.dataset) // (self.world_size_original * cf.batch_size_per_gpu) + ) * cf.batch_size_per_gpu + mini_epoch_base = int( + self.cf.istep + / (min(len_per_rank, cf.samples_per_mini_epoch) * self.world_size_original) + ) + + # torch.autograd.set_detect_anomaly(True) + if cf.forecast_policy is not None: + torch._dynamo.config.optimize_ddp = False + + if is_root(): + config.save(self.cf, None) + logger.info(config.format_cf(self.cf)) + + # training loop + + # validate once at the beginning as reference + if cf.val_initial: + self.validate(-1) + + for mini_epoch in range(mini_epoch_base, cf.num_mini_epochs): + logger.info(f"Mini_epoch {mini_epoch} of {cf.num_mini_epochs}: train.") + self.train(mini_epoch) + + logger.info(f"Mini_epoch {mini_epoch} of {cf.num_mini_epochs}: validate.") + self.validate(mini_epoch) + + logger.info(f"Mini_epoch {mini_epoch} of {cf.num_mini_epochs}: save_model.") + self.save_model(mini_epoch) + + # log final model + self.save_model(cf.num_mini_epochs) + + ########################################### + def _prepare_logging( + self, + preds: list[list[Tensor]], + forecast_offset: int, + forecast_steps: int, + streams_data: list[list[Any]], + ): + """Collects and denormalizes prediction and target data for logging. + + This function processes target and prediction tensors, extracts relevant + coordinates and timestamps, denormalizes the data, and organizes it + into a structured format suitable for logging or further analysis. It + handles potential empty tensors and NaN values. + + Args: + preds: A list of lists, where the outer list + corresponds to forecast steps, and the inner list contains prediction + tensors for each observation stream. Each prediction tensor is + expected to be in the normalized latent or observation space, + depending on the model's output. + targets: A list of lists, where the outer list + corresponds to forecast steps, and the inner list contains target + tensors for each observation stream. Each target tensor is expected + to be in the normalized observation space. + forecast_offset: The starting offset for the forecast steps + relative to the original data. + forecast_steps: The number of forecast steps to consider. + streams_data: A list of lists, where each inner list + contains data objects (e.g., `BatchItem` instances) for each stream + at a specific time step. These objects are expected to have + `target_coords_raw` and `target_times_raw` attributes. + + Returns: + tuple: A tuple containing: + - preds_all: Denormalized + predictions, organized by forecast step and observation stream. + - targets_all: Denormalized + targets, organized by forecast step and observation stream. + - targets_coords_raw: Raw target coordinates, + extracted and concatenated for each forecast step and stream. + - targets_times_raw: Raw target timestamps, + extracted and concatenated for each forecast step and stream. + - targets_lens: A list of lists, where each + inner list contains the original lengths (shape[0]) of the target + tensors before any filtering. + """ + + # handle case when forecast_steps is a list + if type(forecast_steps) is omegaconf.listconfig.ListConfig: + forecast_range = np.array(forecast_steps) + else: + forecast_range = np.arange(forecast_offset, forecast_offset + forecast_steps + 1) + + #''' + # TODO: Remove this function and port functionality to write_validation(), which then + # extracts preds_all, targets_all,... itself directly from stream_data. + # TODO: Undo list resorting + # The following list operations realize a reshaping of the original tensors in streams_data + # from shape [batch_sample][stream][fstep] into shape [fstep][stream][batch_sample]. When + # removing the reshaping, make sure to index the tensors starting at forecast_offset, e.g., + # target_times_raw = streams_data[i_batch][i_strm].target_times_raw[forecast_offset+fstep], + # when iterating over batch, stream, and fsteps. + targets_rt = [ + [ + torch.cat([t[i].target_tokens[fstep] for t in streams_data]) + for i in range(len(self.cf.streams)) + ] + for fstep in forecast_range + ] + # TODO: Undo list resorting + targets_coords_raw = [ + [ + torch.cat([t[i].target_coords_raw[fstep] for t in streams_data]) + for i in range(len(self.cf.streams)) + ] + for fstep in forecast_range + ] + # TODO: Undo list resorting + targets_times_raw = [ + [ + np.concatenate([t[i].target_times_raw[fstep] for t in streams_data]) + for i in range(len(self.cf.streams)) + ] + for fstep in forecast_range + ] + + # assert len(targets_rt) == len(preds) and len(preds) == len(self.cf.streams) + fsteps = len(targets_rt) + preds_all: list[list[list[NDArray]]] = [ + [[] for _ in self.cf.streams] for _ in range(fsteps) + ] + targets_all: list[list[list[NDArray]]] = [ + [[] for _ in self.cf.streams] for _ in range(fsteps) + ] + targets_lens: list[list[list[int]]] = [[[] for _ in self.cf.streams] for _ in range(fsteps)] + + # TODO: iterate over batches here in future, and change loop order to batch, stream, fstep + for fstep in range(len(targets_rt)): + if len(preds[fstep]) == 0: + continue + + for i_strm, target in enumerate(targets_rt[fstep]): + pred = preds[fstep][i_strm] + + if not (target.shape[0] > 0 and pred.shape[0] > 0): + continue + + # extract data/coords and remove token dimension if it exists + pred = pred.reshape([pred.shape[0], *target.shape]) + assert pred.shape[1] > 0 + + mask_nan = ~torch.isnan(target) + if pred[:, mask_nan].shape[1] == 0: + continue + + targets_lens[fstep][i_strm] += [target.shape[0]] + dn_data = self.dataset_val.denormalize_target_channels + + f32 = torch.float32 + preds_all[fstep][i_strm] += [ + np.asarray(dn_data(i_strm, pred.to(f32)).detach().cpu()) + ] + targets_all[fstep][i_strm] += [ + np.asarray(dn_data(i_strm, target.to(f32)).detach().cpu()) + ] + + return ( + preds_all, + targets_all, + targets_coords_raw, + targets_times_raw, + targets_lens, + ) + + def train(self, mini_epoch): + cf = self.cf + self.model.train() + # torch.autograd.set_detect_anomaly(True) + + dataset_iter = iter(self.data_loader) + + self.optimizer.zero_grad() + + # Unweighted loss, real weighted loss, std for losses that need it + self.loss_unweighted_hist, self.loss_model_hist, self.stdev_unweighted_hist = [], [], [] + + # training loop + self.t_start = time.time() + for bidx, batch in enumerate(dataset_iter): + forecast_steps = batch[-1] + batch = self.batch_to_device(batch) + + # evaluate model + with torch.autocast( + device_type=f"cuda:{cf.local_rank}", + dtype=self.mixed_precision_dtype, + enabled=cf.with_mixed_precision, + ): + preds, posteriors = self.model( + self.model_params, batch, cf.forecast_offset, forecast_steps + ) + loss_values = self.loss_calculator.compute_loss( + preds=preds, + streams_data=batch[0], + ) + if cf.latent_noise_kl_weight > 0.0: + kl = torch.cat([posterior.kl() for posterior in posteriors]) + loss_values.loss += cf.latent_noise_kl_weight * kl.mean() + + # backward pass + self.optimizer.zero_grad() + self.grad_scaler.scale(loss_values.loss).backward() + # loss_values.loss.backward() + + # gradient clipping + self.grad_scaler.unscale_(self.optimizer) + total_norm = torch.nn.utils.clip_grad_norm_( + self.model.parameters(), max_norm=cf.grad_clip + ) + + # log gradient norms + if self.log_grad_norms: + if bidx % self.train_log_freq.terminal == 0: + self.last_grad_norm = self._get_tensor_item(total_norm) + if bidx % self.train_log_freq.metrics == 0: + self._log_instant_grad_norms(TRAIN) + + # optimizer step + self.grad_scaler.step(self.optimizer) + self.grad_scaler.update() + # self.optimizer.step() + + # update learning rate + self.lr_scheduler.step() + + # EMA update + if self.validate_with_ema: + self.ema_model.update( + self.cf.istep * self.world_size_original * self.cf.batch_size_per_gpu, + self.world_size_original * self.cf.batch_size_per_gpu, + ) + + self.loss_unweighted_hist += [loss_values.losses_all] + self.loss_model_hist += [loss_values.loss.item()] + self.stdev_unweighted_hist += [loss_values.stddev_all] + + perf_gpu, perf_mem = self.get_perf() + self.perf_gpu = ddp_average(torch.tensor([perf_gpu], device=self.device)).item() + self.perf_mem = ddp_average(torch.tensor([perf_mem], device=self.device)).item() + + self._log_terminal(bidx, mini_epoch, TRAIN) + if bidx % self.train_log_freq.metrics == 0: + self._log(TRAIN) + + # save model checkpoint (with designation _latest) + if bidx % self.train_log_freq.checkpoint == 0 and bidx > 0: + self.save_model(-1) + + self.cf.istep += 1 + + self.dataset.advance() + + def validate(self, mini_epoch): + cf = self.cf + self.model.eval() + + dataset_val_iter = iter(self.data_loader_validation) + self.loss_unweighted_hist, self.loss_model_hist, self.stdev_unweighted_hist = [], [], [] + + with torch.no_grad(): + # print progress bar but only in interactive mode, i.e. when without ddp + with tqdm.tqdm( + total=len(self.data_loader_validation), disable=self.cf.with_ddp + ) as pbar: + for bidx, batch in enumerate(dataset_val_iter): + forecast_steps = batch[-1] + batch = self.batch_to_device(batch) + + # evaluate model + with torch.autocast( + device_type=f"cuda:{cf.local_rank}", + dtype=self.mixed_precision_dtype, + enabled=cf.with_mixed_precision, + ): + model_forward = ( + self.model.forward + if self.ema_model is None + else self.ema_model.forward_eval + ) + preds, _ = model_forward( + self.model_params, batch, cf.forecast_offset, forecast_steps + ) + + streams_data: list[list[StreamData]] = batch[0] + # compute loss and log output + if bidx < cf.log_validation: + loss_values = self.loss_calculator_val.compute_loss( + preds=preds, + streams_data=streams_data, + ) + + # TODO: Move _prepare_logging into write_validation by passing streams_data + ( + preds_all, + targets_all, + targets_coords_all, + targets_times_all, + targets_lens, + ) = self._prepare_logging( + preds=preds, + forecast_offset=cf.forecast_offset, + forecast_steps=cf.forecast_steps, + streams_data=streams_data, + ) + sources = [[item.source_raw for item in stream] for stream in streams_data] + # sample idx should be the same across streams => select first + sample_idxs = [item.sample_idx for item in streams_data[0]] + write_output( + self.cf, + mini_epoch, + bidx, + sources, + preds_all, + targets_all, + targets_coords_all, + targets_times_all, + targets_lens, + sample_idxs, + ) + + else: + loss_values = self.loss_calculator_val.compute_loss( + preds=preds, + streams_data=streams_data, + ) + + self.loss_unweighted_hist += [loss_values.losses_all] + self.loss_model_hist += [loss_values.loss.item()] + self.stdev_unweighted_hist += [loss_values.stddev_all] + + pbar.update(self.cf.batch_size_validation_per_gpu) + + self._log_terminal(bidx, mini_epoch, VAL) + self._log(VAL) + + # avoid that there is a systematic bias in the validation subset + self.dataset_val.advance() + + def batch_to_device(self, batch): + # TODO: do not define new members outside of the init!! + self.device_type = torch.accelerator.current_accelerator() + self.device = torch.device(f"{self.device_type}:{self.cf.local_rank}") + # forecast_steps is dropped here from the batch + return ( + [[d.to_device(self.device) for d in db] for db in batch[0]], + batch[1].to(self.device), + [[b.to(self.device) for b in bf] for bf in batch[2]], + ) + + def load_model(self, model, run_id: str, mini_epoch=-1): + """Loads model state from checkpoint and checks for missing and unused keys. + Args: + run_id : model_id of the trained model + mini_epoch : The mini_epoch to load. Default (-1) is the latest mini_epoch + """ + + path_run = Path(self.cf.model_path) / run_id + mini_epoch_id = ( + f"chkpt{mini_epoch:05d}" if mini_epoch != -1 and mini_epoch is not None else "latest" + ) + filename = f"{run_id}_{mini_epoch_id}.chkpt" + + if not (path_run / filename).exists(): + mini_epoch_id = f"epoch{mini_epoch:05d}" + filename = f"{run_id}_{mini_epoch_id}.chkpt" + + params = torch.load( + path_run / filename, map_location=torch.device("cpu"), mmap=True, weights_only=True + ) + + is_model_sharded = self.cf.with_ddp and self.cf.with_fsdp + if is_model_sharded: + meta_sharded_sd = model.state_dict() + maybe_sharded_sd = {} + for param_name, full_tensor in params.items(): + sharded_meta_param = meta_sharded_sd.get(param_name) + sharded_tensor = distribute_tensor( + full_tensor, + sharded_meta_param.device_mesh, + sharded_meta_param.placements, + ) + # maybe_sharded_sd[param_name.replace("module.", "")] = nn.Parameter(sharded_tensor) + maybe_sharded_sd[param_name] = nn.Parameter(sharded_tensor) + # choose `assign=True` for sharded model since we cannot call `copy_` on meta tensor + mkeys, ukeys = model.load_state_dict(maybe_sharded_sd, strict=False, assign=True) + + # new network parts (e.g. for fine-tuning) + if mkeys: + # Get the unique parent modules for the missing parameters + new_modules_to_init = {key.rsplit(".", 1)[0] for key in mkeys} + + # Find the highest-level "root" new modules to avoid redundant initializations + root_new_modules = set() + for path in sorted(list(new_modules_to_init)): + if not any(path.startswith(root + ".") for root in root_new_modules): + root_new_modules.add(path) + + # Get all modules for quick lookup and initialize the new ones + all_modules = dict(model.named_modules()) + for path in root_new_modules: + if is_root(): + logger.info(f"Initializing new module not found in checkpoint: {path}") + module_to_init = all_modules[path] + module_to_init.to_empty(device="cuda") + module_to_init.reset_parameters() + + else: + if not self.cf.with_ddp: + params_temp = {} + for k in params.keys(): + params_temp[k.replace("module.", "")] = params[k] + params = params_temp + mkeys, ukeys = model.load_state_dict(params, strict=False) + model = model.to(self.device) + + # warn about difference in checkpoint and model + if len(mkeys) == 0 and len(ukeys) == 0: + logger.info(f"Checkpoint {filename} loaded successfully with all weights matching.") + if len(mkeys) > 0: + logger.warning(f"Missing keys when loading model: {mkeys}") + if len(ukeys) > 0: + logger.warning(f"Unused keys when loading model: {mkeys}") + + return model + + def _get_full_model_state_dict(self): + maybe_sharded_sd = ( + self.model.state_dict() if self.ema_model is None else self.ema_model.state_dict() + ) + if self.cf.with_ddp and self.cf.with_fsdp: + cpu_state_dict = {} + for param_name, sharded_param in maybe_sharded_sd.items(): + full_param = sharded_param.full_tensor() + if is_root(): + cpu_state_dict[param_name] = full_param.cpu() + else: + del full_param + return cpu_state_dict + else: + return maybe_sharded_sd + + def _get_full_optimizer_state_dict(self): + is_rank_zero = is_root() + sharded_sd = self.optimizer.state_dict() + sharded_state = sharded_sd["state"] + full_state = {} + for group_id, sharded_group in sharded_state.items(): + group_state = {} + for attr, sharded_tensor in sharded_group.items(): + if isinstance(sharded_tensor, DTensor): + # "exp_avg" in AdamW is `DTensor` + full_tensor = sharded_tensor.full_tensor() + else: + # "step" in AdamW is plain tensor + full_tensor = sharded_tensor + if is_rank_zero: + group_state[attr] = full_tensor.cpu() + else: + del full_tensor + if is_rank_zero: + full_state[group_id] = group_state + else: + del group_state + if is_rank_zero: + return { + "param_groups": sharded_sd["param_groups"], + "state": full_state, + } + else: + return {} + + def save_model(self, mini_epoch: int, name=None): + # Saving at mini_epoch == max_mini_epoch means that we are saving the latest checkpoint. + max_mini_epoch = self.cf.num_mini_epochs + assert mini_epoch <= max_mini_epoch, (mini_epoch, max_mini_epoch) + model_state_dict = self._get_full_model_state_dict() + + if is_root(): + filename = "".join( + [ + self.cf.run_id, + "_", + "latest" if mini_epoch == -1 else f"chkpt{mini_epoch:05d}", + ("_" + name) if name is not None else "", + ] + ) + base_path = config.get_path_model(self.cf) + file_out = base_path / (filename + ".chkpt") + file_tmp = base_path / (filename + "_tmp.chkpt") + # save temp file (slow) + torch.save(model_state_dict, file_tmp) + # move file (which is changing the link in the file system and very fast) + file_tmp.replace(file_out) + if is_root(): + logger.info(f"Saved model to {file_out}") + + # save config + config.save(self.cf, mini_epoch) + + def _prepare_losses_for_logging( + self, + ) -> tuple[torch.Tensor, dict[str, torch.Tensor], dict[str, torch.Tensor]]: + """ + Aggregates across ranks loss and standard deviation data for logging. + + Returns: + real_loss (torch.Tensor): The scalar loss used for backpropagation. + losses_all (dict[str, torch.Tensor]): Dictionary mapping each stream name to its + per-channel loss tensor. + stddev_all (dict[str, torch.Tensor]): Dictionary mapping each stream name to its + per-channel standard deviation tensor. + """ + losses_all: dict[str, Tensor] = {} + stddev_all: dict[str, Tensor] = {} + + # Make list of losses into a tensor. This is individual tensor per rank + real_loss = torch.tensor(self.loss_model_hist, device=self.device) + # Gather all tensors from all ranks into a list and stack them into one tensor again + real_loss = torch.cat(all_gather_vlen(real_loss)) + + for stream in self.cf.streams: # Loop over all streams + stream_hist = [losses_all[stream.name] for losses_all in self.loss_unweighted_hist] + stream_all = torch.stack(stream_hist).to(torch.float64) + losses_all[stream.name] = torch.cat(all_gather_vlen(stream_all)) + stream_hist = [stddev_all[stream.name] for stddev_all in self.stdev_unweighted_hist] + stream_all = torch.stack(stream_hist).to(torch.float64) + stddev_all[stream.name] = torch.cat(all_gather_vlen(stream_all)) + + return real_loss, losses_all, stddev_all + + def _log(self, stage: Stage): + """ + Logs training or validation metrics. + + Args: + stage: Stage Is it's VAL, logs are treated as validation logs. + If TRAIN, logs are treated as training logs + + Notes: + - This method only executes logging on the main process (rank 0). + - After logging, historical loss and standard deviation records are cleared. + """ + avg_loss, losses_all, stddev_all = self._prepare_losses_for_logging() + samples = self.cf.istep * self.cf.batch_size_per_gpu * self.cf.world_size + + if is_root(): + # plain logger + if stage == VAL: + self.train_logger.add_val(samples, losses_all, stddev_all) + + elif self.cf.istep >= 0: + self.train_logger.add_train( + samples, + self.lr_scheduler.get_lr(), + avg_loss, + losses_all, + stddev_all, + self.perf_gpu, + self.perf_mem, + ) + + self.loss_unweighted_hist, self.loss_model_hist, self.stdev_unweighted_hist = [], [], [] + + def _get_tensor_item(self, tensor): + """ + When using FSDP2, tensor is a DTensor and we need full_tensor().item() instead of .item(), + see here: https://gist.github.com/Kai-46/a9835ef3f36e76d06afee6c11f388144 + """ + return tensor.full_tensor().item() if isinstance(tensor, DTensor) else tensor.item() + + def _log_instant_grad_norms(self, stage: Stage): + """ + Log instantaneous grad norms, we do not average because of the cost and because we want to + measure the actual values. + """ + grad_norms = {"grad_norm.total": self.last_grad_norm} + for name, param in self.model.named_parameters(): + if param.grad is not None: + grad_norms["grad_norm." + name] = self._get_tensor_item(param.grad.norm()) + + if is_root(): + self.train_logger.log_metrics(stage, grad_norms) + + def _log_terminal(self, bidx: int, mini_epoch: int, stage: Stage): + print_freq = self.train_log_freq.terminal + if bidx % print_freq == 0 and bidx > 0 or stage == VAL: + # compute from last iteration + avg_loss, losses_all, _ = self._prepare_losses_for_logging() + + if is_root(): + if stage == VAL: + logger.info( + f"""validation ({self.cf.run_id}) : {mini_epoch:03d} : + {avg_loss.nanmean().item()}""" + ) + for _, st in enumerate(self.cf.streams): + logger.info( + "{}".format(st["name"]) + + f" : {losses_all[st['name']].nanmean():0.4E} \t", + ) + logger.info("\n") + + elif stage == TRAIN: + # samples per sec + dt = time.time() - self.t_start + len_dataset = len(self.data_loader) // self.cf.batch_size_per_gpu + pstr = ( + f"{mini_epoch:03d} : {bidx:05d}/{len_dataset:05d} : " + + f"{self.cf.istep:06d} : loss = {avg_loss.nanmean().item():.4E} " + + f"(lr={self.lr_scheduler.get_lr():.2E}, " + ) + if self.log_grad_norms: + pstr += f"gradient norm={self.last_grad_norm:.3f}, " + pstr += f"s/sec={(print_freq * self.cf.batch_size_per_gpu) / dt:.3f})" + logger.info(pstr) + logger.info("\t") + for _, st in enumerate(self.cf.streams): + logger.info( + "{}".format(st["name"]) + + f" : {losses_all[st['name']].nanmean():0.4E} \t", + ) + logger.info("\n") + + self.t_start = time.time() diff --git a/src/weathergen/train/trainer_base.py b/src/weathergen/train/trainer_base.py index 68f583dbd..684b3b54b 100644 --- a/src/weathergen/train/trainer_base.py +++ b/src/weathergen/train/trainer_base.py @@ -1,4 +1,6 @@ -# (C) Copyright 2024 WeatherGenerator contributors. +# ruff: noqa: T201 + +# (C) Copyright 2025 WeatherGenerator contributors. # # This software is licensed under the terms of the Apache Licence Version 2.0 # which can be obtained at http://www.apache.org/licenses/LICENSE-2.0. @@ -8,219 +10,160 @@ # nor does it submit to any jurisdiction. import os -import datetime -import string -import random -import pathlib -import itertools -import logging -import json -import yaml -import logging -import code - -import numpy as np -import torch -# import mlflow import pynvml - +import torch import torch.distributed as dist -import torch.utils.data.distributed - -from weathergen.utils.config import Config -import weathergen.utils.logger -from weathergen.train.utils import get_run_id, str_to_tensor, tensor_to_str, json_to_dict - - -class Trainer_Base() : - - def __init__( self) : - pass - - ########################################### - @staticmethod - def init_mlflow( cf, rank, run_id_contd = None, run_id_new = False, - project='obs_learn_kas_cell_forecast') : - - if 0 == rank : - - run_id = cf.run_id - - slurm_job_id_node = os.environ.get('SLURM_JOB_ID', '-1') - if slurm_job_id_node != '-1' : - cf.slurm_job_id = slurm_job_id_node - - # check if offline mode is requested through environment variable or in config - mlflow_offline_env = os.environ.get('MLFLOW_OFFLINE', '-1') - if not hasattr( cf, 'mlflow_offline') : - cf.mlflow_offline = True if mlflow_offline_env != '-1' else False - - rs_uri = './mlflow/' if cf.mlflow_offline else 'https://mlflow.ecmwf.int/' - mlflow.set_tracking_uri( rs_uri) - mlflow.set_experiment( project) - - # # we separate the mlflow_id and the run_id/run_name, which is used for all local bookkeeping - # ml_id = None if run_id_contd is None or run_id_new else cf.mlflow_id - # mlflow_run = mlflow.start_run( run_id=ml_id, run_name=run_id, - # log_system_metrics=True) - # cf.mlflow_id = mlflow_run.info.run_id - - # log config (cannot be overwritten so log only at the first start) - if run_id_contd is None or run_id_new : - mlflow.log_params( cf.__dict__) - - if run_id_contd is not None and run_id_new : - str = f'Continuing run {run_id_contd} at step={cf.istep} as run {run_id}.' - logging.getLogger('obslearn').info( str) - elif run_id_contd is not None : - logging.getLogger('obslearn').info( f'Continuing run {run_id_contd} at step={cf.istep}.') - - ########################################### - @staticmethod - def init_torch( use_cuda = True, num_accs_per_task = 1) : - - torch.set_printoptions( linewidth=120) - - torch.backends.cuda.matmul.allow_tf32 = True - - use_cuda = torch.cuda.is_available() - if not use_cuda : - return torch.device( 'cpu') - - local_id_node = os.environ.get('SLURM_LOCALID', '-1') - if local_id_node == '-1' : - devices = ['cuda'] - else : - devices = ['cuda:{}'.format(int(local_id_node) * num_accs_per_task + i) - for i in range(num_accs_per_task)] - torch.cuda.set_device( int(local_id_node) * num_accs_per_task ) - - return devices - - ########################################### - @staticmethod - def init_ddp( cf) : - - rank = 0 - num_ranks = 1 - - master_node = os.environ.get('MASTER_ADDR', '-1') - if '-1' == master_node : - cf.with_ddp=False; cf.rank=rank; cf.num_ranks=num_ranks - return - - local_rank = int(os.environ.get("SLURM_LOCALID")) - ranks_per_node = int( os.environ.get('SLURM_TASKS_PER_NODE', '1')[0] ) - rank = int(os.environ.get("SLURM_NODEID")) * ranks_per_node + local_rank - num_ranks = int(os.environ.get("SLURM_NTASKS")) - - dist.init_process_group( backend='nccl', init_method='tcp://' + master_node + ':1345', - timeout=datetime.timedelta(seconds=10*8192), - world_size = num_ranks, rank = rank) - - # communicate run id to all nodes - run_id_int = torch.zeros( 8, dtype=torch.int32).cuda() - if 0 == rank : - run_id_int = str_to_tensor( cf.run_id).cuda() - dist.all_reduce( run_id_int, op=torch.distributed.ReduceOp.SUM ) - cf.run_id = tensor_to_str( run_id_int) - - # communicate data_loader_rng_seed - if hasattr( cf, 'data_loader_rng_seed') : - if cf.data_loader_rng_seed is not None : - l_seed = torch.tensor([cf.data_loader_rng_seed if 0==rank else 0], dtype=torch.int32).cuda() - dist.all_reduce( l_seed, op=torch.distributed.ReduceOp.SUM ) - cf.data_loader_rng_seed = l_seed.item() - - cf.rank = rank - cf.num_ranks = num_ranks - cf.with_ddp = True - - return - - ########################################### - @staticmethod - def init_streams( cf : Config, run_id_contd ) : - - if not hasattr( cf, 'streams_directory'): - return cf - - # use previously specified streams when continuing a run - if run_id_contd is not None : - return cf - - if not hasattr( cf, 'streams'): - cf.streams = [ ] - elif not isinstance( cf.streams, list) : - cf.streams = [ ] - - # warn if specified dir does not exist - if not os.path.isdir( cf.streams_directory) : - sd = cf.streams_directory - logging.getLogger('obslearn').warning( f'Streams directory {sd} does not exist.') - - # read all reportypes from directory, append to existing ones - temp = {} - for fh in sorted( pathlib.Path( cf.streams_directory).rglob( '*.yml')) : - stream_parsed = yaml.safe_load( fh.read_text()) - if stream_parsed is not None : - temp.update( stream_parsed) - for k,v in temp.items() : - v['name'] = k - cf.streams.append( v) - - # sanity checking (at some point, the dict should be parsed into a class) - rts = [ rt['filenames'] for rt in cf.streams] - # flatten list - rts = list( itertools.chain.from_iterable( rts)) - if len(rts) != len( list(set( rts))) : - logging.getLogger('obslearn').warning( 'Duplicate reportypes specified.') - - cf.num_obs_types = 3 - - return cf - - ########################################### - def init_perf_monitoring( self) : - - self.device_handles, self.device_names = [], [] - - pynvml.nvmlInit() - device_count = pynvml.nvmlDeviceGetCount() - - for i in range(device_count): - handle = pynvml.nvmlDeviceGetHandleByIndex(i) - self.device_names += [ pynvml.nvmlDeviceGetName(handle) ] - self.device_handles += [ handle ] - - ########################################### - def get_perf( self) : - - perf_gpu, perf_mem = 0.0, 0.0 - if len(self.device_handles) > 0 : - for handle in self.device_handles : - perf = pynvml.nvmlDeviceGetUtilizationRates( handle) - perf_gpu += perf.gpu - perf_mem += perf.memory - perf_gpu /= len(self.device_handles) - perf_mem /= len(self.device_handles) - - return perf_gpu, perf_mem - - ########################################### - def ddp_average( self, val) : - if self.cf.with_ddp : - dist.all_reduce( val.cuda(), op=torch.distributed.ReduceOp.AVG ) - return val.cpu() - -#################################################################################################### -if __name__ == '__main__' : - - from weathergen.utils.config import Config - from weathergen.train.trainer_base import Trainer_Base - - cf = Config() - cf.sources_dir = './sources' - - cf = Trainer_Base.init_reportypes( cf) +import torch.multiprocessing + +from weathergen.common.config import Config +from weathergen.train.utils import str_to_tensor, tensor_to_str +from weathergen.utils.distributed import is_root + +PORT = 1345 + + +class TrainerBase: + def __init__(self): + self.device_handles = [] + self.device_names = [] + self.cf: Config | None = None + + @staticmethod + def init_torch(use_cuda=True, num_accs_per_task=1, multiprocessing_method="fork"): + """ + Initialize torch, set device and multiprocessing method. + + NOTE: If using the Nvidia profiler, + the multiprocessing method must be set to "spawn". + The default for linux systems is "fork", + which prevents traces from being generated with DDP. + """ + torch.set_printoptions(linewidth=120) + + # This strategy is required by the nvidia profiles + # to properly trace events in worker processes. + # This may cause issues with logging. Alternative: "fork" + torch.multiprocessing.set_start_method(multiprocessing_method, force=True) + + torch.backends.cuda.matmul.allow_tf32 = True + + use_cuda = torch.cuda.is_available() + if not use_cuda: + return torch.device("cpu") + + local_id_node = os.environ.get("SLURM_LOCALID", "-1") + if local_id_node == "-1": + devices = ["cuda"] + else: + devices = [ + f"cuda:{int(local_id_node) * num_accs_per_task + i}" + for i in range(num_accs_per_task) + ] + torch.cuda.set_device(int(local_id_node) * num_accs_per_task) + + return devices + + @staticmethod + def init_ddp(cf): + """Initializes the distributed environment.""" + rank = 0 + local_rank = 0 + + if not dist.is_available(): + print("Distributed training is not available.") + return + + # dist.set_debug_level(dist.DebugLevel.DETAIL) + world_size = int(os.environ.get("WORLD_SIZE", "-1")) + if world_size == -1: + # Called using SLURM instead of torchrun + world_size = int(os.environ.get("SLURM_NTASKS", "1")) + + if not dist.is_initialized() and world_size > 1: + # These environment variables are typically set by the launch utility + # (e.g., torchrun, Slurm) + local_rank = int(os.environ.get("LOCAL_RANK", "-1")) + if local_rank == -1: + # Called using SLURM instead of torchrun + local_rank = int(os.environ.get("SLURM_LOCALID")) + rank = int(os.environ.get("RANK", "-1")) + if rank == -1: + ranks_per_node = int(os.environ.get("SLURM_TASKS_PER_NODE", "1")[0]) + rank = int(os.environ.get("SLURM_NODEID")) * ranks_per_node + local_rank + master_addr = os.environ.get("MASTER_ADDR", "localhost") + master_port = os.environ.get("MASTER_PORT", f"{PORT}") # Default port + + if torch.accelerator.is_available(): + device_type = torch.accelerator.current_accelerator() + device = torch.device(f"{device_type}:{local_rank}") + torch.accelerator.set_device_index(local_rank) + print(f"DDP initialization: device={device}, rank={rank}, world_size={world_size}") + else: + device = torch.device("cpu") + print(f"Running on device {device}") + + backend = torch.distributed.get_default_backend_for_device(device) + torch.distributed.init_process_group( + backend=backend, + world_size=world_size, + device_id=device, + rank=rank, + init_method=f"tcp://{master_addr}:{master_port}", + ) + print(f"Process group initialized ({backend}).") + + if is_root(): + print("DDP initialized: root.") + # Wait for all ranks to reach this point + + dist.barrier() + # communicate run id to all nodes + len_run_id = len(cf.run_id) + run_id_int = torch.zeros(len_run_id, dtype=torch.int32).to(device) + if is_root(): + print(f"Communicating run_id to all nodes: {cf.run_id}") + run_id_int = str_to_tensor(cf.run_id).to(device) + dist.all_reduce(run_id_int, op=torch.distributed.ReduceOp.SUM) + if not is_root(): + cf.run_id = tensor_to_str(run_id_int) + print(f"rank: {rank} has run_id: {cf.run_id}") + + # communicate data_loader_rng_seed + if hasattr(cf, "data_loader_rng_seed"): + if cf.data_loader_rng_seed is not None: + l_seed = torch.tensor( + [cf.data_loader_rng_seed if rank == 0 else 0], dtype=torch.int32 + ).cuda() + dist.all_reduce(l_seed, op=torch.distributed.ReduceOp.SUM) + cf.data_loader_rng_seed = l_seed.item() + + cf.world_size = world_size + cf.rank = rank + cf.local_rank = local_rank + cf.with_ddp = world_size > 1 + + return cf + + def init_perf_monitoring(self): + self.device_handles, self.device_names = [], [] + + pynvml.nvmlInit() + device_count = pynvml.nvmlDeviceGetCount() + + for i in range(device_count): + handle = pynvml.nvmlDeviceGetHandleByIndex(i) + self.device_names += [pynvml.nvmlDeviceGetName(handle)] + self.device_handles += [handle] + + def get_perf(self): + perf_gpu, perf_mem = 0.0, 0.0 + if len(self.device_handles) > 0: + for handle in self.device_handles: + perf = pynvml.nvmlDeviceGetUtilizationRates(handle) + perf_gpu += perf.gpu + perf_mem += perf.memory + perf_gpu /= len(self.device_handles) + perf_mem /= len(self.device_handles) + + return perf_gpu, perf_mem diff --git a/src/weathergen/train/utils.py b/src/weathergen/train/utils.py index 80a31416f..53c6934de 100644 --- a/src/weathergen/train/utils.py +++ b/src/weathergen/train/utils.py @@ -1,4 +1,4 @@ -# (C) Copyright 2024 WeatherGenerator contributors. +# (C) Copyright 2025 WeatherGenerator contributors. # # This software is licensed under the terms of the Apache Licence Version 2.0 # which can be obtained at http://www.apache.org/licenses/LICENSE-2.0. @@ -7,22 +7,25 @@ # granted to it by virtue of its status as an intergovernmental organisation # nor does it submit to any jurisdiction. -import string +import json + import torch -import random +from weathergen.common import config + +# TODO: remove this definition, it should directly using common. +get_run_id = config.get_run_id -def get_run_id() : - s1 = string.ascii_lowercase - s2 = string.ascii_lowercase + string.digits - return ''.join(random.sample(s1, 1)) + ''.join(random.sample(s2, 7)) def str_to_tensor(modelid): - return torch.tensor([ord(c) for c in modelid], dtype=torch.int32) + return torch.tensor([ord(c) for c in modelid], dtype=torch.int32) + def tensor_to_str(tensor): - return ''.join([chr(x) for x in tensor]) + return "".join([chr(x) for x in tensor]) + -def json_to_dict( fname) : - json_str = open( fname, 'r').readlines() - return json.loads( ''.join([s.replace('\n','') for s in json_str])) +def json_to_dict(fname): + with open(fname) as f: + json_str = f.readlines() + return json.loads("".join([s.replace("\n", "") for s in json_str])) diff --git a/src/weathergen/utils/better_abc.py b/src/weathergen/utils/better_abc.py new file mode 100644 index 000000000..875b1d180 --- /dev/null +++ b/src/weathergen/utils/better_abc.py @@ -0,0 +1,42 @@ +""" +Coding recipe for abstract fields in Python. + +Based on https://stackoverflow.com/questions/23831510/abstract-attribute-not-property + +It is necessary because of our coding style and python's limited support for abstract fields. +This code is a workaround to allow defining abstract attributes +in classes that use the `ABCMeta` metaclass. +""" + +from abc import ABCMeta as NativeABCMeta +from collections.abc import Callable +from typing import Any, cast + + +class DummyAttribute: + pass + + +def abstract_attribute[R](obj: Callable[[Any], R] | None = None) -> R: + _obj = cast(Any, obj) + if obj is None: + _obj = DummyAttribute() + _obj.__is_abstract_attribute__ = True + return cast(R, _obj) + + +class ABCMeta(NativeABCMeta): + def __call__(cls, *args, **kwargs): + instance = NativeABCMeta.__call__(cls, *args, **kwargs) + abstract_attributes = { + name + for name in dir(instance) + if hasattr(getattr(instance, name), "__is_abstract_attribute__") + } + if abstract_attributes: + raise NotImplementedError( + "Can't instantiate abstract class {} with abstract attributes: {}".format( + cls.__name__, ", ".join(abstract_attributes) + ) + ) + return instance diff --git a/src/weathergen/utils/cli.py b/src/weathergen/utils/cli.py new file mode 100644 index 000000000..ab816a0e0 --- /dev/null +++ b/src/weathergen/utils/cli.py @@ -0,0 +1,144 @@ +import argparse +from pathlib import Path + +import pandas as pd + + +def get_train_parser() -> argparse.ArgumentParser: + parser = argparse.ArgumentParser(allow_abbrev=False) + _add_general_arguments(parser) + + return parser + + +def get_continue_parser() -> argparse.ArgumentParser: + parser = argparse.ArgumentParser(allow_abbrev=False) + + _add_general_arguments(parser) + _add_model_loading_params(parser) + + parser.add_argument( + "--finetune_forecast", + action="store_true", + help=( + "Fine tune for forecasting. It overwrites some of the Config settings. " + "Overwrites specified with --config take precedence." + ), + ) + + return parser + + +def get_inference_parser() -> argparse.ArgumentParser: + parser = argparse.ArgumentParser(allow_abbrev=False) + + _add_model_loading_params(parser) + _add_general_arguments(parser) + + parser.add_argument( + "--start_date", + "-start", + type=_format_date, + default="2022-10-01", + help="Start date for inference. Format must be parsable with pd.to_datetime.", + ) + parser.add_argument( + "--end_date", + "-end", + type=_format_date, + default="2022-12-01", + help="End date for inference. Format must be parsable with pd.to_datetime.", + ) + parser.add_argument( + "--samples", type=int, default=10000000, help="Number of inference samples." + ) + parser.add_argument( # behaviour changed => implies default=False + "--save_samples", + type=bool, + default=True, + help="Toggle saving of samples from inference. Default True", + ) + parser.add_argument( + "--streams_output", + nargs="+", + help="Output streams during inference.", + ) + + return parser + + +def _format_date(date: str) -> str: + try: + parsed = pd.to_datetime(date, errors="raise") + except (pd.errors.ParserError, ValueError) as e: + msg = f"Can not parse a valid date from input: {date}, with type {type(date)}." + raise ValueError(msg) from e + + return parsed.strftime("%Y%m%d%H%M") + + +def _add_general_arguments(parser: argparse.ArgumentParser): + parser.add_argument( + "--private_config", + type=Path, + default=None, + help=( + "Path to the private configuration file that includes platform specific information " + " like paths." + ), + ) + parser.add_argument( + "--config", + type=Path, + nargs="+", + default=[], + help="Optional experiment specfic configuration files in ascending order of precedence.", + ) + parser.add_argument( + "--run_id", + type=str, + help=( + "The run id for this run." + " All artifacts (models, metrics, ...) will be stored under this run_id." + " If not provided, a new run_id will be created" + ), + ) + parser.add_argument( + "--options", + nargs="+", + default=[], + help=( + "Overwrite individual config options." + " This takes precedence over overwrites passed via --config or --finetune_forecast." + " Individual items should be of the form: parent_obj.nested_obj=value" + ), + ) + + +def _add_model_loading_params(parser: argparse.ArgumentParser): + parser.add_argument( + "-id", + "--from_run_id", + required=True, + help=( + "Start inference or continue training from the WeatherGenerator" + " model with the given run id." + ), + ) + parser.add_argument( + "-e", + "--mini_epoch", + type=int, + default=-1, + help=( + "Mini_epoch of pretrained WeatherGenerator model used" + " (Default -1 corresponds to the last checkpoint)." + ), + ) + parser.add_argument( + "--reuse_run_id", + action="store_true", + help="Use the id given via --from_run_id also for the current run. " + "The storage location for artifacts will be reused as well. " + "This might overwrite artifacts from previous runs.", + ) diff --git a/src/weathergen/utils/compare_run_configs.py b/src/weathergen/utils/compare_run_configs.py old mode 100644 new mode 100755 index 62968e63e..28eade1cf --- a/src/weathergen/utils/compare_run_configs.py +++ b/src/weathergen/utils/compare_run_configs.py @@ -1,4 +1,17 @@ -# (C) Copyright 2024 WeatherGenerator contributors. +#!/usr/bin/env -S uv run +# /// script +# dependencies = [ +# "pandas", +# "tabulate", +# "pyyaml", +# "omegaconf", +# "weathergen", +# ] +# [tool.uv.sources] +# weathergen = { path = "../../../" } +# /// + +# (C) Copyright 2025 WeatherGenerator contributors. # # This software is licensed under the terms of the Apache Licence Version 2.0 # which can be obtained at http://www.apache.org/licenses/LICENSE-2.0. @@ -7,31 +20,242 @@ # granted to it by virtue of its status as an intergovernmental organisation # nor does it submit to any jurisdiction. -import code + import argparse -import dictdiffer +import fnmatch +import logging +import os +from pathlib import Path + +import pandas as pd +import yaml +from omegaconf import OmegaConf + +from weathergen.common.config import load_model_config + + +def truncate_value(value, max_length=50): + """ + Truncate long string values to reduce table width. + """ + if isinstance(value, str) and len(value) > max_length: + return value[: max_length - 3] + "..." + return value + + +def flatten_dict(d, parent_key="", sep="."): + """ + Recursively flattens a nested dictionary, joining keys with sep. + Returns a flat dictionary with compound keys. + """ + items = [] + for k, v in d.items(): + new_key = f"{parent_key}{sep}{k}" if parent_key else k + if isinstance(v, dict): + items.extend(flatten_dict(v, new_key, sep=sep).items()) + else: + items.append((new_key, v)) + return dict(items) + + +def key_matches_patterns(key: str, patterns: list) -> bool: + """ + Check if a key matches any of the wildcard patterns. + """ + if not patterns: + return False + return any(fnmatch.fnmatch(key, pattern) for pattern in patterns) + + +def build_config_dataframe( + configs: dict, max_value_length: int = 50, always_show_patterns: list = None +) -> pd.DataFrame: + """Build DataFrame with configs, filtering identical rows unless in always_show_patterns.""" + always_show_patterns = always_show_patterns or [] + + all_keys = sorted({k for conf in configs.values() for k in conf}) + run_ids = list(configs.keys()) + data = {k: [configs[run_id].get(k, "") for run_id in run_ids] for k in all_keys} + df = pd.DataFrame(data, index=run_ids).T + + # Truncate and filter + df = df.map(lambda x: truncate_value(x, max_value_length)) + varying_rows = df.astype(str).apply(lambda row: len(set(row)) > 1, axis=1) + always_show_rows = df.index.to_series().apply( + lambda key: key_matches_patterns(key, always_show_patterns) + ) + return df[varying_rows | always_show_rows] + + +def highlight_row(row: pd.Series) -> pd.Series: + """Bold all values in a row if there are differences.""" + if len(set(row.astype(str))) <= 1: + return row + return pd.Series([f"**{v}**" if v != "" else v for v in row], index=row.index) + + +def row_has_bold(row: pd.Series) -> bool: + """Return True if any value in the row is bolded.""" + return any(isinstance(v, str) and v.startswith("**") for v in row) + + +def configs_to_markdown_table( + configs: dict, max_value_length: int = 50, always_show_patterns: list = None +) -> str: + """Generate a markdown table comparing all config parameters across runs.""" + df = build_config_dataframe(configs, max_value_length, always_show_patterns) + df_highlighted = df.apply(highlight_row, axis=1) + # Move rows with bold values to the top + bold_mask = df_highlighted.apply(row_has_bold, axis=1) + df_sorted = pd.concat([df_highlighted[bold_mask], df_highlighted[~bold_mask]]) + return df_sorted.to_markdown(tablefmt="github") + + +def process_streams(cfg: dict | None): + """Process and flatten streams configuration.""" + if "streams" not in cfg: + return + + streams_val = cfg["streams"] + + # Convert OmegaConf objects to regular Python objects + if hasattr(streams_val, "_content"): + streams_val = OmegaConf.to_object(streams_val) + + # Unpack streams based on type + if isinstance(streams_val, list): + for i, stream in enumerate(streams_val): + if isinstance(stream, dict): + for k, v in stream.items(): + cfg[f"streams[{i}].{k}"] = v + else: + cfg[f"streams[{i}]"] = stream + elif isinstance(streams_val, dict): + for k, v in streams_val.items(): + cfg[f"streams.{k}"] = v + else: + cfg["streams.value"] = streams_val + + del cfg["streams"] + + +def main(): + logging.basicConfig(level=logging.INFO) + logger = logging.getLogger(__name__) + + parser = argparse.ArgumentParser( + description="Compare WeatherGenerator configs and output markdown table." + ) + parser.add_argument("-r1", "--run_id_1", required=False) + parser.add_argument("-r2", "--run_id_2", required=False) + parser.add_argument( + "-m1", + "--model_directory_1", + type=Path, + default=Path("models/"), + help="Path to model directory for -r1/--run_id_1", + ) + parser.add_argument( + "-m2", + "--model_directory_2", + type=Path, + default=Path("models/"), + help="Path to model directory for -r2/--run_id_2", + ) + parser.add_argument( + "--config", + default="config/compare_config_list.yml", + help="Path to YAML file listing run_ids and always_show_patterns.", + ) + parser.add_argument( + "output", nargs="?", default="reports/compare_configs.md", help="Output markdown file path." + ) + parser.add_argument( + "--max-length", type=int, default=30, help="Maximum length for config values." + ) + parser.add_argument( + "--show", + type=str, + default=[], + help=( + "Put '*' to show all parameters, or leave empty to only show changed parameters. " + "Use for example 'ae_global' to show all parameters starting with 'ae_global'." + ), + ) + + args = parser.parse_args() + + if args.run_id_1 and args.run_id_2: + config_files = [ + [args.run_id_1, args.model_directory_1], + [args.run_id_2, args.model_directory_2], + ] + yaml_always_show_patterns = args.show if args.show else [] + # Read YAML config list if exists + elif Path(args.config).exists(): + with open(args.config) as f: + yaml_data = yaml.safe_load(f) + + config_files = yaml_data["run_ids"] + yaml_always_show_patterns = yaml_data.get("always_show_patterns", []) + else: + # error: pass config or command line arguments + logger.error( + "Please provide a config list (.yml format) or specify two run IDs " + "and their model directories." + ) + return + # Load configs using load_model_config from config module + configs = {} + for item in config_files: + # Handle both formats: [run_id, path] or just path + if isinstance(item, list) and len(item) == 2: + run_id, path = item + else: + path = item + run_id = os.path.splitext(os.path.basename(path))[0] -from obslearn.utils.config import Config + logger.info(f"Loading config for run_id: {run_id} from {path}") + try: + cfg = load_model_config(run_id=run_id, mini_epoch=None, model_path=path) + except Exception: + logger.warning( + f"Failed to load config for run_id: {run_id} from {path}", + "Assuming mini_epoch=0 and retrying.", + ) + cfg = load_model_config(run_id=run_id, mini_epoch=0, model_path=path) + actual_run_id = cfg.get("run_id", run_id) -if __name__ == '__main__' : + # Process streams and flatten + process_streams(cfg) + flat_cfg = flatten_dict(cfg) + configs[actual_run_id] = flat_cfg - parser = argparse.ArgumentParser( ) - parser.add_argument( '-r1', '--run_id_1', required=True) - parser.add_argument( '-r2', '--run_id_2', required=True) - args = parser.parse_args() + # Generate markdown table + md_table = configs_to_markdown_table(configs, args.max_length, yaml_always_show_patterns) - cf1 = Config.load( args.run_id_1) - cf2 = Config.load( args.run_id_2) - # print(cf1.__dict__) - result = dictdiffer.diff( cf1.__dict__, cf2.__dict__) - for item in list(result) : + # Prepare output file name with run ids + run_ids = [str(rid) for rid in configs.keys()] + run_ids_str = "_".join(run_ids) + output_path = args.output + # Ensure 'reports' folder exists + reports_dir = os.path.dirname(output_path) or "reports" + if not os.path.exists(reports_dir): + os.makedirs(reports_dir, exist_ok=True) + # Insert run ids into filename before extension + base, ext = os.path.splitext(os.path.basename(output_path)) + output_file = os.path.join(reports_dir, f"{base}_{run_ids_str}{ext}") - # TODO: if streams_directory differs than we need to manually compare streams using name - # since index-based comparison by dictdiffer is meaningless + # Write output + with open(output_file, "w") as f: + f.write(md_table) + logger.info(f"Table written to {output_file}") + row_count = len(md_table.split("\n")) - 3 + pattern_info = ( + f" (patterns: {', '.join(yaml_always_show_patterns)})" if yaml_always_show_patterns else "" + ) + logger.info(f"Filtered to {row_count} rows{pattern_info}") - # # for streams, translate index in list of streams to stream name - # if item[1][0] == 'streams' : - # name = cf1.streams[item[1][1]]['name'] - # item[1][1] = name - print( f'{item[1]} :: {item[2]}') +if __name__ == "__main__": + main() diff --git a/src/weathergen/utils/config.py b/src/weathergen/utils/config.py deleted file mode 100644 index e382dddad..000000000 --- a/src/weathergen/utils/config.py +++ /dev/null @@ -1,69 +0,0 @@ -# (C) Copyright 2024 WeatherGenerator contributors. -# -# This software is licensed under the terms of the Apache Licence Version 2.0 -# which can be obtained at http://www.apache.org/licenses/LICENSE-2.0. -# -# In applying this licence, ECMWF does not waive the privileges and immunities -# granted to it by virtue of its status as an intergovernmental organisation -# nor does it submit to any jurisdiction. - -from pathlib import Path -import json -import os - -########################################### -class Config : - - def __init__( self) : - pass - - def print( self) : - self_dict = self.__dict__ - for key, value in self_dict.items() : - if key != 'streams' : - print("{} : {}".format( key, value)) - else : - for rt in value : - for k,v in rt.items() : - print( '{}{} : {}'.format( '' if k=='reportypes' else ' ', k, v)) - - def save( self, epoch=None) : - - # save in directory with model files - dirname = './models/{}'.format( self.run_id) - # if not os.path.exists(dirname): - os.makedirs( dirname, exist_ok=True) - dirname = './models/{}'.format( self.run_id) - # if not os.path.exists(dirname): - os.makedirs( dirname, exist_ok=True) - - fname = './models/{}/model_{}'.format( self.run_id, self.run_id) - epoch_str = '' - if epoch is not None : - epoch_str = '_latest' if epoch==-1 else '_epoch{:05d}'.format(epoch) - fname += '{}.json'.format( epoch_str) - - json_str = json.dumps(self.__dict__ ) - with open(fname, 'w') as f : - f.write( json_str) - - @staticmethod - def load( run_id, epoch=None) : - - if '/' in run_id : # assumed to be full path instead of just id - fname = run_id - else : - fname = './models/{}/model_{}'.format( run_id, run_id) - epoch_str = '' - if epoch is not None : - epoch_str = '_latest' if epoch==-1 else '_epoch{:05d}'.format(epoch) - fname += '{}.json'.format( epoch_str) - - with open(fname, 'r') as f : - json_str = f.readlines() - - cf = Config() - cf.__dict__ = json.loads( json_str[0]) - - return cf - diff --git a/src/weathergen/utils/distributed.py b/src/weathergen/utils/distributed.py new file mode 100644 index 000000000..af467a3a9 --- /dev/null +++ b/src/weathergen/utils/distributed.py @@ -0,0 +1,114 @@ +# (C) Copyright 2025 WeatherGenerator contributors. +# +# This software is licensed under the terms of the Apache Licence Version 2.0 +# which can be obtained at http://www.apache.org/licenses/LICENSE-2.0. +# +# In applying this licence, ECMWF does not waive the privileges and immunities +# granted to it by virtue of its status as an intergovernmental organisation +# nor does it submit to any jurisdiction. + + +import torch +import torch.distributed as dist + +SYNC_TIMEOUT_SEC = 60 * 60 # 1 hour + + +def is_root(pg: dist.ProcessGroup | None = None) -> bool: + """ + Check if the current rank is the root rank (rank 0). + + Args: + group (ProcessGroup, optional): The process group to work on. + If None (default), the default process group will be used. + """ + if not _is_distributed_initialized(): + # If not initialized, it assumed to be in single process mode. + # TODO: check what should happen if a process group is passed + return True + return dist.get_rank(pg) == 0 + + +def _is_distributed_initialized(): + return dist.is_available() and dist.is_initialized() + + +def get_world_size() -> int: + """ + Get MPI world size + + Returns: + int: world size + """ + if not _is_distributed_initialized(): + return 1 + + return dist.get_world_size() + + +def get_rank() -> int: + """ + Get current rank number + + Returns: + int: current rank + """ + if not _is_distributed_initialized(): + return 0 + + return dist.get_rank() + + +def ddp_average(data: torch.Tensor) -> torch.Tensor: + """ + Average a tensor across DDP ranks + + Params: + data: tensor to be averaged (arbitrary shape) + + Return : + tensor with same shape as data, but entries averaged across all DDP ranks + """ + if _is_distributed_initialized(): + dist.all_reduce(data, op=dist.ReduceOp.AVG) + return data.cpu() + + +def all_gather_vlen(tensor: torch.Tensor, group=None) -> list[torch.Tensor]: + """Gather tensors with the same number of dimensions but different lengths.""" + + if not _is_distributed_initialized(): + return [tensor] + + world_size = dist.get_world_size(group=group) + + # Gather lengths first + shape = torch.as_tensor(tensor.shape, device=tensor.device) + shapes = [torch.empty_like(shape) for _ in range(world_size)] + dist.all_gather(shapes, shape, group=group) + + # Gather data + inputs = [tensor] * world_size + outputs = [torch.empty(*_shape, dtype=tensor.dtype, device=tensor.device) for _shape in shapes] + dist.all_to_all(outputs, inputs, group=group) + + return outputs + + +def all_gather_vdim(tensor: torch.Tensor, group=None) -> list[torch.Tensor]: + """Gather tensors with different number of dimensions.""" + + if not _is_distributed_initialized(): + return [tensor] + + world_size = dist.get_world_size(group=group) + + # Gather shapes first + shapes = all_gather_vlen(torch.as_tensor(tensor.shape, device=tensor.device), group=group) + + # Gather data + inputs = [tensor] * world_size + outputs = [torch.empty(*_shape, dtype=tensor.dtype, device=tensor.device) for _shape in shapes] + dist.all_to_all(outputs, inputs, group=group) + + return outputs diff --git a/src/weathergen/utils/logger.py b/src/weathergen/utils/logger.py index 6c146f654..9bb39e3a4 100644 --- a/src/weathergen/utils/logger.py +++ b/src/weathergen/utils/logger.py @@ -1,4 +1,4 @@ -# (C) Copyright 2024 WeatherGenerator contributors. +# (C) Copyright 2025 WeatherGenerator contributors. # # This software is licensed under the terms of the Apache Licence Version 2.0 # which can be obtained at http://www.apache.org/licenses/LICENSE-2.0. @@ -7,24 +7,143 @@ # granted to it by virtue of its status as an intergovernmental organisation # nor does it submit to any jurisdiction. +import json import logging -import pathlib +import logging.config import os +import pathlib +from functools import cache + +from weathergen.common.config import _load_private_conf + +LOGGING_CONFIG = """ +{ + "version": 1, + "disable_existing_loggers": false, + "formatters": { + "custom": { + "class": "weathergen.utils.logger.ColoredRelPathFormatter", + "format": \ + "%(asctime)s %(process)d %(filename)s:%(lineno)d : %(levelname)-8s : %(message)s" + } + }, + "handlers": { + "stdout": { + "class": "logging.StreamHandler", + "level": "INFO", + "formatter": "custom", + "stream": "ext://sys.stdout" + }, + "stderr": { + "class": "logging.StreamHandler", + "level": "ERROR", + "formatter": "custom", + "stream": "ext://sys.stderr" + }, + "logfile": { + "class": "logging.FileHandler", + "level": "DEBUG", + "formatter": "custom", + "filename": "log.text", + "mode": "w" + }, + "errorfile": { + "class": "logging.FileHandler", + "level": "ERROR", + "formatter": "custom", + "filename": "error.txt", + "mode": "w" + } + }, + "root": { + "level": "DEBUG", + "handlers": [ + "stderr", + "stdout", + "logfile", + "errorfile" + ] + } +} +""" + + +class ColoredRelPathFormatter(logging.Formatter): + COLOR_CODES = { + logging.CRITICAL: "\033[1;35m", # bright/bold magenta + logging.ERROR: "\033[1;31m", # bright/bold red + logging.WARNING: "\033[1;33m", # bright/bold yellow + logging.INFO: "\033[0;37m", # white / light gray + logging.DEBUG: "\033[1;30m", # bright/bold dark gray + } + + RESET_CODE = "\033[0m" + + def __init__(self, color, *args, **kwargs): + super(ColoredRelPathFormatter, self).__init__(*args, **kwargs) + self.color = color + self.root_path = pathlib.Path(__file__).parent.parent.parent.resolve() + + def format(self, record, *args, **kwargs): + if self.color and record.levelno in self.COLOR_CODES: + record.color_on = self.COLOR_CODES[record.levelno] + record.color_off = self.RESET_CODE + else: + record.color_on = "" + record.color_off = "" + record.pathname = os.path.relpath(record.pathname, self.root_path) + return super(ColoredRelPathFormatter, self).format(record, *args, **kwargs) + + +@cache +def init_loggers(run_id, logging_config=None): + """ + Initialize the logger for the package and set output streams/files. + + WARNING: this function resets all the logging handlers. + + This function follows a singleton pattern, it will only operate once per process + and will be a no-op if called again. + + Valid arguments for streams: tuple of + sys.stdout, sys.stderr : standard out and err streams + null : /dev/null + string/pathlib.Path : specifies path and outfile to be used for stream + + Limitation: Using the same stream in a non-contiguous manner across logging levels, e.g. + the same file for CRITICAL and WARNING but a different than for ERROR is currently + not supported + """ + + # Get current time + # Shelved until decided how to change logging directory structure + # now = datetime.now() + # timestamp = now.strftime("%Y-%m-%d-%H%M") + + # output_dir = f"./output/{timestamp}-{run_id}" + output_dir = f"./output/{run_id}" + + # load the structure for logging config + if logging_config is None: + logging_config = json.loads(LOGGING_CONFIG) + + for _, handler in logging_config["handlers"].items(): + for k, v in handler.items(): + if k == "formatter": + handler[k] = v + elif k == "filename": + filename = f"{output_dir}/{run_id}-{v}" + ofile = pathlib.Path(filename) + # make sure the path is independent of path where job is launched + if not ofile.is_absolute(): + work_dir = pathlib.Path(_load_private_conf().get("path_shared_working_dir")) + ofile = work_dir / ofile + pathlib.Path(ofile.parent).mkdir(parents=True, exist_ok=True) + handler[k] = ofile + else: + continue + + # make sure the parent directory exists + logging.config.dictConfig(logging_config) -class RelPathFormatter(logging.Formatter): - def __init__(self, fmt, datefmt=None): - super().__init__(fmt, datefmt) - self.root_path = pathlib.Path(__file__).parent.parent.parent.resolve() - - def format(self, record): - # Replace the full pathname with the relative path - record.pathname = os.path.relpath(record.pathname, self.root_path) - return super().format(record) - -logger = logging.getLogger('obslearn') -logger.setLevel(logging.DEBUG) -ch = logging.StreamHandler() -formatter = RelPathFormatter('%(pathname)s:%(lineno)d : %(levelname)-8s : %(message)s') -ch.setFormatter(formatter) -logger.handlers.clear() -logger.addHandler(ch) + logging.info(f"Logging set up. Logs are in {output_dir}") diff --git a/src/weathergen/utils/metrics.py b/src/weathergen/utils/metrics.py new file mode 100644 index 000000000..aedb48739 --- /dev/null +++ b/src/weathergen/utils/metrics.py @@ -0,0 +1,64 @@ +""" +Utilities related to reading and writing metrics. + +We use our own simple json-based format to abstract away various backends + (our own pipeline, mlflow, wandb, etc.). +""" + +from pathlib import Path + +import polars as pl + +# Known columns that are not scalar metrics: +_known_cols = {"weathergen.timestamp": pl.Int64, "weathergen.time": pl.Int64, "stage": pl.String} + + +def read_metrics_file(f: str | Path) -> pl.DataFrame: + """ + Loads a file of metrics. + + The resulting dataframe has the following format: + - all columns in known_cols (if they exist in the file) have the right type + - all other columns are of type float64 (including NaN values) + """ + + # All values are scalar, except for known values + # The following point needs to be taken into account: + # 1. The schema is not known in advance + # 2. NaN is encoded as string + # 3. numbers are encoded as numbers + # The file needs to be read 3 times: + # 1. Get the name of all the columns + # 2. Find all the NaN values + # 3. Read the numbers + # 4. Merge the two dataframes + + # Find the list of all columns (read everything) + df0 = pl.read_ndjson(f, infer_schema_length=None) + # Read with the final schema: + schema1 = dict([(n, _known_cols.get(n, pl.Float64)) for n in df0.columns]) + df1 = pl.read_ndjson(f, schema=schema1) + # Read again as strings to find the NaN values: + schema2 = dict([(n, _known_cols.get(n, pl.String)) for n in df0.columns]) + metrics_cols = [n for n in df0.columns if n not in _known_cols] + df2 = pl.read_ndjson(f, schema=schema2).cast(dict([(n, pl.Float64) for n in metrics_cols])) + + # Merge the two dataframes: + for n in metrics_cols: + df1 = df1.with_columns( + pl.when(pl.col(n).is_not_nan()).then(df1[n]).otherwise(df2[n]).alias(n) + ) + return df1 + + +def get_train_metrics_path(base_path: Path, run_id: str) -> Path: + """ + Return the path to the training metrics.json for a particular run_id. This is required for + backwards compatibility after changing the name of the `results/{RUN-ID}/metrics.json` file to + `results/{RUN-ID}/{RUN-ID}_train_metrics.json` to disambiguate `metrics.json`. + See https://github.com/ecmwf/WeatherGenerator/issues/590 for details. + """ + if (base_path / run_id / "metrics.json").exists(): + return base_path / run_id / "metrics.json" + else: + return base_path / run_id / f"{run_id}_train_metrics.json" diff --git a/src/weathergen/utils/metrics_test.py b/src/weathergen/utils/metrics_test.py new file mode 100644 index 000000000..bb8043889 --- /dev/null +++ b/src/weathergen/utils/metrics_test.py @@ -0,0 +1,20 @@ +from io import StringIO +from math import isnan + +from weathergen.utils.metrics import ( + read_metrics_file, +) + +s = """{"weathergen.timestamp":100, "m": "nan"} +{"weathergen.timestamp":101,"m": 1.3} +{"weathergen.timestamp":102,"a": 4} +""" + + +def test1(): + df = read_metrics_file(StringIO(s)) + assert df.shape == (3, 3) + assert df["weathergen.timestamp"].to_list() == [100, 101, 102] + assert isnan(df["m"].to_list()[0]) + assert df["m"].to_list()[1:] == [1.3, None] + assert df["a"].to_list() == [None, None, 4] diff --git a/src/weathergen/utils/plot_training.py b/src/weathergen/utils/plot_training.py index cfd15327b..573c084c4 100644 --- a/src/weathergen/utils/plot_training.py +++ b/src/weathergen/utils/plot_training.py @@ -1,4 +1,4 @@ -# (C) Copyright 2024 WeatherGenerator contributors. +# (C) Copyright 2025 WeatherGenerator contributors. # # This software is licensed under the terms of the Apache Licence Version 2.0 # which can be obtained at http://www.apache.org/licenses/LICENSE-2.0. @@ -7,326 +7,743 @@ # granted to it by virtue of its status as an intergovernmental organisation # nor does it submit to any jurisdiction. -import numpy as np -import code - -import os -import subprocess -import glob import argparse - -import pandas as pd +import logging +import subprocess +import sys +from pathlib import Path import matplotlib.pyplot as plt +import numpy as np +import yaml -from weathergen.utils.config import Config -from weathergen.utils.train_logger import TrainLogger +import weathergen.common.config as config +from weathergen.utils.train_logger import Metrics, TrainLogger -out_folder = './plots/' +_logger = logging.getLogger(__name__) -#################################################################################################### -def clean_out_folder() : - files = glob.glob( out_folder + '*.png') - for f in files : - os.remove(f) +DEFAULT_RUN_FILE = Path("./config/runs_plot_train.yml") -#################################################################################################### -def get_stream_names( run_id) : - # return col names from training (should be identical to validation) - cf = Config.load( run_id, -1) - return [si['name'].replace(',','').replace('/','_').replace(' ','_') for si in cf.streams] #################################################################################################### -def plot_lr( runs_ids, runs_data, runs_active, x_axis='samples') : +def _ensure_list(value): + """ + Ensure that the input value is a list. If it is not a list, convert it to a list. + Parameters + ---------- + value : any + Input value to check. + Returns + ------- + list + A list containing the input value if it was not a list, + or the input value itself if it was already a list. + """ + return value if isinstance(value, list) else [value] - prop_cycle = plt.rcParams['axes.prop_cycle'] - colors = prop_cycle.by_key()['color'] + ['r', 'g', 'b', 'k', 'y', 'm'] - fig = plt.figure( figsize=(10,7), dpi=300) - # train - idx = 0 - linestyle = '-' +#################################################################################################### +def _check_run_id_dict(run_id_dict: dict) -> bool: + """ + Check if the run_id_dict is valid. + + Parameters + ---------- + run_id_dict : dict + Dictionary to check. + Returns + ------- + """ + if not isinstance(run_id_dict, dict): + return False + + for k, v in run_id_dict.items(): + if not isinstance(k, str) or not isinstance(v, list) or len(v) != 2: + raise argparse.ArgumentTypeError( + ( + "Each key must be a string and", + f" each value must be a list of [job_id, experiment_name], but got: {k}: {v}", + ) + ) - legend_str = [] - for j,(run_id, run_data) in enumerate(zip( runs_ids, runs_data)) : - if run_data[idx][1].shape[0] == 0 : - continue - x_idx = [i for i,c in enumerate(run_data[idx][0]) if x_axis in c][0] - data_idxs = [i for i,c in enumerate(run_data[idx][0]) if 'lr'==c][0] +#################################################################################################### +def _read_str_config(yaml_str: str) -> dict: + """ + Read a dictionary-like string to get a configuration dictionary. - plt.plot( run_data[idx][1][:,x_idx], run_data[idx][1][:,data_idxs], linestyle, - color=colors[j % len(colors)]) - legend_str += [ ('R' if runs_active[j] else 'X') + ' : ' + run_id - + ' : ' + runs_ids[run_id][1] ] + Parameters + ---------- + yaml_str : str + Dictionary-like string to read. + Returns + ------- + dict + The content of the string as a dictionary. + """ + config_dict = yaml.safe_load(yaml_str) - if len(legend_str) < 1 : - return + # Validate the structure: {run_id: [job_id, experiment_name]} + _check_run_id_dict(config_dict) + + return config_dict - plt.legend( legend_str) - plt.grid( True, which="both", ls="-") - plt.yscale( 'log') - plt.title( 'learning rate') - plt.ylabel( 'lr'); plt.xlabel( x_axis); plt.tight_layout() - rstr = ''.join([f'{r}_' for r in runs_ids]) - plt.savefig( './plots/{}lr.png'.format( rstr)) - plt.close() #################################################################################################### -def plot_utilization( runs_ids, runs_data, runs_active, x_axis='samples') : - - prop_cycle = plt.rcParams['axes.prop_cycle'] - colors = prop_cycle.by_key()['color'] + ['r', 'g', 'b', 'k', 'y', 'm'] - fig = plt.figure( figsize=(10,7), dpi=300) - - linestyles = ['-', '--', '.-'] - - # performance - idx = 2 - - legend_str = [] - for j,(run_id, run_data) in enumerate(zip( runs_ids, runs_data)) : - if run_data[idx][1].shape[0] == 0 : - continue - - x_idx = [i for i,c in enumerate(run_data[0][0]) if x_axis in c][0] - data_idxs = [i for i in range(len(run_data[2][0]))] - - for ii,di in enumerate(data_idxs) : - plt.plot( run_data[0][1][:,x_idx], run_data[idx][1][:,di], linestyles[ii], - color=colors[j % len(colors)]) - legend_str += [ ('R' if runs_active[j] else 'X') + ' : ' + run_id + ', ' + run_data[idx][0][ii] - + ' : ' + runs_ids[run_id][1] ] - - if len(legend_str) < 1 : - return - - plt.legend( legend_str) - plt.grid( True, which="both", ls="-") - # plt.yscale( 'log') - plt.title( 'utilization') - plt.ylabel( 'percentage utilization'); plt.xlabel( x_axis); plt.tight_layout() - rstr = ''.join([f'{r}_' for r in runs_ids]) - plt.savefig( './plots/{}utilization.png'.format( rstr)) - plt.close() +def _read_yaml_config(yaml_file_path): + """ + Read a YAML file to get a configuration dictionary for plotting training diagnostics. + Expected structure in the YAML file: + train: + plot: + run_id: + slurm_id : SLURM_JOB (specify 0 if not available) + description: job description + run_id: + slurm_id : SLURM_JOB (specify 0 if not available) + description : job description + ... + + Parameters + ---------- + yaml_file_path : str or Path + Path to the YAML file containing the configuration. + Returns + ------- + dict + A dictionary with run IDs as keys and a list of [job ID, experiment name] as values. + """ + with open(yaml_file_path) as f: + data = yaml.safe_load(f) + + # Extract configuration for plotting training diagnostics + config_dict_temp = data.get("train", {}).get("plot", {}) + + # sanity checks + assert len(config_dict_temp) > 0, "At least one run must be specified." + + # convert to legacy format + config_dict = {} + for k, v in config_dict_temp.items(): + assert isinstance(v["slurm_id"], int), "slurm_id has to be int." + assert isinstance(v["description"], str), "description has to be str." + config_dict[k] = [v["slurm_id"], v["description"]] + + # Validate the structure: {run_id: [job_id, experiment_name]} + _check_run_id_dict(config_dict) + + return config_dict + #################################################################################################### -def plot_loss_per_stream( modes, runs_ids, runs_data, runs_times, runs_active, stream_names, - errs = ['mse'], x_axis='samples', x_type='step', x_scale_log=False) : - ''' - Plot each stream in stream_names (using matching to data columns) for all run_ids - ''' +def clean_plot_folder(plot_dir: Path): + """ + Clean the plot folder by removing all png-files in it. - modes = [modes] if type(modes) is not list else modes - # repeat colors when train and val is plotted simultaneously - prop_cycle = plt.rcParams['axes.prop_cycle'] - colors = prop_cycle.by_key()['color'] + ['r', 'g', 'b', 'k', 'm', 'y'] - - for stream_name in stream_names : + Parameters + ---------- + plot_dir : Path + Path to the plot directory + """ + for image in plot_dir.glob("*.png"): + image.unlink() - fig = plt.figure( figsize=(10,7), dpi=300) - legend_strs = [] - min_val = np.finfo( np.float32).max - max_val = 0. - for mode in modes : - legend_strs += [ [] ] - for err in errs : - - idx = 0 if mode=='train' else 1 - linestyle = '-' if mode=='train' else ('--x' if len(modes)>1 else '-x') - linestyle = ':' if 'stddev' in err else linestyle - alpha = 1.0 - if 'train' in modes and 'val' in modes : - alpha = 0.35 if 'train' in mode else alpha - - for j,(run_id, run_data) in enumerate(zip( runs_ids, runs_data)) : - - x_idx = [i for i,c in enumerate(run_data[idx][0]) if x_axis in c][0] - data_idxs = [i for i,c in enumerate(run_data[idx][0]) if err in c] - - for i,col in enumerate( np.array(run_data[idx][0])[data_idxs]) : - if stream_name in col : - if run_data[idx][1].shape[0] == 0 : - continue - - x_vals = run_data[idx][1][:,x_idx] if x_type=='step' else runs_times[j][idx][1] - - plt.plot( x_vals, run_data[idx][1][:,data_idxs[i]], linestyle, - color=colors[j % len(colors)], alpha=alpha) - legend_strs[-1] += [ ('R' if runs_active[j] else 'X') + ' : ' + run_id - + ' : ' + runs_ids[run_id][1] + ': ' + col ] - - min_val = np.min( [ min_val, np.nanmin(run_data[idx][1][:,data_idxs[i]]) ]) - max_val = np.max( [ max_val, np.nanmax(run_data[idx][1][:,data_idxs[i]]) ]) - - # TODO: ensure that legend is plotted with full opacity - legend_str = legend_strs[0] - if len(legend_str) < 1 : - plt.close() - continue - - legend = plt.legend( legend_str, loc='upper right' if not x_scale_log else 'lower left') - for line in legend.get_lines(): - line.set( alpha=1.0) - plt.grid( True, which="both", ls="-") - plt.yscale( 'log') - # cap at 1.0 in case of divergence of run (through normalziation, max should be around 1.0) - plt.ylim( [ 0.95*min_val, (None if max_val<2.0 else min( 1.1, 1.025*max_val)) ]) - if x_scale_log : - plt.xscale( 'log') - plt.title( stream_name) - plt.ylabel( 'loss') - plt.xlabel( x_axis if x_type=='step' else 'rel. time [h]') +#################################################################################################### +def get_stream_names(run_id: str, model_path: Path | None = "./model"): + """ + Get the stream names from the model configuration file. + + Parameters + ---------- + run_id : str + ID of the training run + model_path : Path + Path to the model directory + Returns + ------- + ------- + list + List of stream names + """ + # return col names from training (should be identical to validation) + cf = config.load_model_config(run_id, -1, model_path=model_path) + return [si["name"].replace(",", "").replace("/", "_").replace(" ", "_") for si in cf.streams] + + +#################################################################################################### +def plot_lr( + runs_ids: dict[str, list], + runs_data: list[Metrics], + runs_active: list[bool], + plot_dir: Path, + x_axis: str = "samples", +): + """ + Plot learning rate curves of training runs. + + Parameters + ---------- + runs_ids : dict + dictionary with run ids as keys and list of SLURM job ids and descriptions as values + runs_data : list + list of Metrics objects containing the training data + runs_active : list + list of booleans indicating whether the run is still active + plot_dir : Path + directory to save the plots + x_axis : str + x-axis strings used in the column names (options: "samples", "dtime") + """ + prop_cycle = plt.rcParams["axes.prop_cycle"] + colors = prop_cycle.by_key()["color"] + ["r", "g", "b", "k", "y", "m"] + _fig = plt.figure(figsize=(10, 7), dpi=300) + + linestyle = "-" + + legend_str = [] + for j, run_data in enumerate(runs_data): + if run_data.train.is_empty(): + continue + run_id = run_data.run_id + x_col = next(filter(lambda c: x_axis in c, run_data.train.columns)) + data_cols = list(filter(lambda c: "learning_rate" in c, run_data.train.columns)) + + plt.plot( + run_data.train[x_col], + run_data.train[data_cols], + linestyle, + color=colors[j % len(colors)], + ) + legend_str += [ + ("R" if runs_active[j] else "X") + " : " + run_id + " : " + runs_ids[run_id][1] + ] + + if len(legend_str) < 1: + _logger.warning( + "Could not find any data for plotting the learning rates of the runs: ", runs_ids + ) + return + + plt.legend(legend_str) + plt.grid(True, which="both", ls="-") + plt.yscale("log") + plt.title("learning rate") + plt.ylabel("lr") + plt.xlabel(x_axis) plt.tight_layout() - rstr = ''.join([f'{r}_' for r in runs_ids]) - plt.savefig( out_folder + '{}{}{}.png'.format( rstr, ''.join( [f'{m}_' for m in modes]), stream_name)) + rstr = "".join([f"{r}_" for r in runs_ids]) + + # save the plot + plt_fname = plot_dir / f"{rstr}lr.png" + _logger.info(f"Saving learning rate plot to '{plt_fname}'") + plt.savefig(plt_fname) plt.close() + #################################################################################################### -def plot_loss_per_run( modes, run_id, run_desc, run_data, stream_names, - errs = ['mse'], x_axis='samples', x_scale_log=False) : - ''' - Plot all stream_names (using matching to data columns) for given run_id +def plot_utilization( + runs_ids: dict[str, list], + runs_data: list[Metrics], + runs_active: list[bool], + plot_dir: Path, + x_axis: str = "samples", +): + """ + Plot compute utilization of training runs. + + Parameters + ---------- + runs_ids : dict + dictionary with run ids as keys and list of SLURM job ids and descriptions as values + runs_data : list + list of Metrics objects containing the training data + runs_active : list + list of booleans indicating whether the run is still active + plot_dir : Path + directory to save the plots + x_axis : str + x-axis strings used in the column names (options: "samples", "dtime") + """ + prop_cycle = plt.rcParams["axes.prop_cycle"] + colors = prop_cycle.by_key()["color"] + ["r", "g", "b", "k", "y", "m"] + _fig = plt.figure(figsize=(10, 7), dpi=300) + + linestyles = ["-", "--", ".-"] + + legend_str = [] + for j, (run_id, run_data) in enumerate(zip(runs_ids, runs_data, strict=False)): + if run_data.train.is_empty(): + continue - x_axis : {samples,dtime} as used in the column names - ''' + x_col = next(filter(lambda c: x_axis in c, run_data.train.columns)) + data_cols = run_data.system.columns[1:] + + for ii, col in enumerate(data_cols): + plt.plot( + run_data.train[x_col], + run_data.system[col], + linestyles[ii], + color=colors[j % len(colors)], + ) + legend_str += [ + ("R" if runs_active[j] else "X") + + " : " + + run_id + + ", " + + col + + " : " + + runs_ids[run_id][1] + ] + + if len(legend_str) < 1: + _logger.warning("Could not find any data for utilization plot") + return + + plt.legend(legend_str) + plt.grid(True, which="both", ls="-") + # plt.yscale( 'log') + plt.title("utilization") + plt.ylabel("percentage utilization") + plt.xlabel(x_axis) + plt.tight_layout() + rstr = "".join([f"{r}_" for r in runs_ids]) - modes = [modes] if type(modes) is not list else modes - # repeat colors when train and val is plotted simultaneously - prop_cycle = plt.rcParams['axes.prop_cycle'] - colors = prop_cycle.by_key()['color'] + ['r', 'g', 'b', 'k', 'y', 'm'] + # save the plot + plt_fname = plot_dir / f"{rstr}utilization.png" + _logger.info(f"Saving utilization plot to '{plt_fname}'") + plt.savefig(plt_fname) + plt.close() - fig = plt.figure( figsize=(10,7), dpi=300) - legend_strs = [] - for mode in modes : - legend_strs += [ [] ] - for err in errs : +#################################################################################################### +def plot_loss_per_stream( + modes: list[str], + runs_ids: dict[str, list], + runs_data: list[Metrics], + runs_active: list[bool], + stream_names: list[str], + plot_dir: Path, + errs: list[str] | None = None, + x_axis: str = "samples", + x_type: str = "step", + x_scale_log: bool = False, +): + """ + Plot each stream in stream_names (using matching to data columns) for all run_ids - idx = 0 if mode=='train' else 1 - linestyle = '-' if mode=='train' else ('--x' if len(modes)>1 else '-x') - linestyle = ':' if 'stddev' in err else linestyle - alpha = 1.0 - if 'train' in modes and 'val' in modes : - alpha = 0.35 if 'train' in mode else alpha + Parameters + ---------- + modes : list + list of modes for which losses are plotted (e.g. train, val) + runs_ids : dict + dictionary with run ids as keys and list of SLURM job ids and descriptions as values + runs_data : list + list of Metrics objects containing the training data + runs_active : list + list of booleans indicating whether the run is still active + stream_names : list + list of stream names to plot + plot_dir : Path + directory to save the plots + errs : list + list of errors to plot (e.g. mse, stddev) + x_axis : str + x-axis strings used in the column names (options: "samples", "dtime") + x_type : str + x-axis type (options: "step", "reltime") + x_scale_log : bool + whether to use log scale for x-axis + """ + + if errs is None: + errs = ["loss_mse"] + + modes = [modes] if type(modes) is not list else modes + # repeat colors when train and val is plotted simultaneously + prop_cycle = plt.rcParams["axes.prop_cycle"] + colors = prop_cycle.by_key()["color"] + ["r", "g", "b", "k", "m", "y"] + + for stream_name in stream_names: + _fig = plt.figure(figsize=(10, 7), dpi=300) + + legend_strs = [] + min_val = np.finfo(np.float32).max + max_val = 0.0 + for mode in modes: + legend_strs += [[]] + for err in errs: + linestyle = "-" if mode == "train" else ("--x" if len(modes) > 1 else "-x") + linestyle = ":" if "stddev" in err else linestyle + alpha = 1.0 + if "train" in modes and "val" in modes: + alpha = 0.35 if "train" in mode else alpha + + for j, run_data in enumerate(runs_data): + run_data_mode = run_data.by_mode(mode) + if run_data_mode.is_empty(): + continue + # find the col of the request x-axis (e.g. samples) + x_col = next(filter(lambda c: x_axis in c, run_data_mode.columns)) + # find the cols of the requested metric (e.g. mse) for all streams + # TODO: fix captialization + data_cols = filter( + lambda c: err in c and stream_name.lower() in c.lower(), + run_data_mode.columns, + ) + + for col in data_cols: + x_vals = np.array(run_data_mode[x_col]) + y_data = np.array(run_data_mode[col]) + + plt.plot( + x_vals, + y_data, + linestyle, + color=colors[j % len(colors)], + alpha=alpha, + ) + legend_strs[-1] += [ + ("R" if runs_active[j] else "X") + + " : " + + run_data.run_id + + " : " + + runs_ids[run_data.run_id][1] + + ": " + + col + ] + + # skip all-nan slices + if (~np.isnan(y_data)).sum() > 0: + min_val = np.min([min_val, np.nanmin(y_data)]) + max_val = np.max([max_val, np.nanmax(y_data)]) + + # TODO: ensure that legend is plotted with full opacity + legend_str = legend_strs[0] + if len(legend_str) < 1: + plt.close() + _logger.warning(f"Could not find any data for stream: {stream_name}") + continue - x_idx = [i for i,c in enumerate(run_data[idx][0]) if x_axis in c][0] - data_idxs = [i for i,c in enumerate(run_data[idx][0]) if err in c] + # no valid data found + if (min_val >= max_val) or np.isnan(min_val) or np.isnan(max_val): + continue - for i,col in enumerate( np.array(run_data[idx][0])[data_idxs]) : - for j, stream_name in enumerate(stream_names) : - if stream_name in col : + legend = plt.legend(legend_str, loc="upper right" if not x_scale_log else "lower left") + for line in legend.get_lines(): + line.set(alpha=1.0) + plt.grid(True, which="both", ls="-") + plt.yscale("log") + # cap at 1.0 in case of divergence of run (through normalziation, max should be around 1.0) + plt.ylim([0.95 * min_val, (None if max_val < 2.0 else min(1.1, 1.025 * max_val))]) + if x_scale_log: + plt.xscale("log") + plt.title(stream_name) + plt.ylabel("loss") + plt.xlabel(x_axis if x_type == "step" else "rel. time [h]") + plt.tight_layout() + rstr = "".join([f"{r}_" for r in runs_ids]) + + # save the plot + plt_fname = plot_dir / "{}{}{}.png".format( + rstr, "".join([f"{m}_" for m in modes]), stream_name + ) + _logger.info(f"Saving loss per stream plot to '{plt_fname}'") + plt.savefig(plt_fname) + plt.close() - # skip when no data is available - if run_data[idx][1].shape[0] == 0 : - continue - plt.plot( run_data[idx][1][:,x_idx], run_data[idx][1][:,data_idxs[i]], linestyle, - color=colors[j % len(colors)], alpha=alpha) - legend_strs[-1] += [col] +#################################################################################################### +def plot_loss_per_run( + modes: list[str], + run_id: str, + run_desc: str, + run_data: Metrics, + stream_names: list[str], + plot_dir: Path, + errs: list[str] | None = None, + x_axis: str = "samples", + x_scale_log: bool = False, +): + """ + Plot all stream_names (using matching to data columns) for given run_id - legend_str = legend_strs[0] - if len(legend_str) < 1 : + Parameters + ---------- + modes : list + list of modes for which losses are plotted (e.g. train, val) + run_id : str + ID of the training run to plot + run_desc : List[str] + Description of the training run + run_data : Metrics + Metrics object containing the training data + stream_names : list + list of stream names to plot + plot_dir : Path + directory to save the plots + errs : list + list of errors to plot (e.g. mse, stddev) + x_axis : str + x-axis strings used in the column names (options: "samples", "dtime") + x_scale_log : bool + whether to use log scale for x-axis + """ + if errs is None: + errs = ["mse"] + + plot_dir = Path(plot_dir) + + modes = [modes] if type(modes) is not list else modes + # repeat colors when train and val is plotted simultaneously + prop_cycle = plt.rcParams["axes.prop_cycle"] + colors = prop_cycle.by_key()["color"] + ["r", "g", "b", "k", "y", "m"] + + _fig = plt.figure(figsize=(10, 7), dpi=300) + + legend_strs = [] + for mode in modes: + legend_strs += [[]] + for err in errs: + linestyle = "-" if mode == "train" else ("--x" if len(modes) > 1 else "-x") + linestyle = ":" if "stddev" in err else linestyle + alpha = 1.0 + if "train" in modes and "val" in modes: + alpha = 0.35 if "train" in mode else alpha + run_data_mode = run_data.by_mode(mode) + + x_col = [c for _, c in enumerate(run_data_mode.columns) if x_axis in c][0] + # find the cols of the requested metric (e.g. mse) for all streams + data_cols = [c for _, c in enumerate(run_data_mode.columns) if err in c] + + data_cols = list(data_cols) + + for _, col in enumerate(data_cols): + for j, stream_name in enumerate(stream_names): + if stream_name.lower() in col.lower(): + # skip when no data is available + if run_data_mode[col].shape[0] == 0: + continue + + x_vals = np.array(run_data_mode[x_col]) + y_data = np.array(run_data_mode[col]) + + plt.plot( + x_vals, + y_data, + linestyle, + color=colors[j % len(colors)], + alpha=alpha, + ) + legend_strs[-1] += [col] + + legend_str = legend_strs[0] + if len(legend_str) < 1: + _logger.warning(f"Could not find any data for run: {run_id}") + plt.close() + return + + plt.title(run_id + " : " + run_desc[1]) + legend = plt.legend(legend_str, loc="lower left") + for line in legend.get_lines(): + line.set(alpha=1.0) + plt.yscale("log") + if x_scale_log: + plt.xscale("log") + plt.grid(True, which="both", ls="-") + plt.ylabel("loss") + plt.xlabel("samples") + plt.tight_layout() + sstr = "".join( + [f"{r}_".replace(",", "").replace("/", "_").replace(" ", "_") for r in legend_str] + ) + + # save the plot + plt_fname = plot_dir / "{}_{}{}.png".format(run_id, "".join([f"{m}_" for m in modes]), sstr) + _logger.info(f"Saving loss plot for {run_id}-run to '{plt_fname}'") + plt.savefig(plt_fname) plt.close() - return - - plt.title( run_id + ' : ' + run_desc[1]) - legend = plt.legend( legend_str, loc='lower left') - for line in legend.get_lines(): - line.set( alpha=1.0) - plt.yscale( 'log') - if x_scale_log : - plt.xscale( 'log') - plt.grid( True, which="both", ls="-") - plt.ylabel( 'loss'); plt.xlabel( 'samples'); plt.tight_layout() - sstr = ''.join([f'{r}_'.replace(',','').replace('/','_').replace(' ','_') for r in legend_str]) - plt.savefig( out_folder + '{}_{}{}.png'.format( run_id, ''.join( [f'{m}_' for m in modes]), sstr)) - plt.close() -#################################################################################################### -if __name__ == '__main__' : - - parser = argparse.ArgumentParser() - parser.add_argument( '-d', '--delete') - args = parser.parse_args() - - if args.delete == 'True' : - clean_out_folder() - - runs_ids = { - 'i18n6wmx' : [1, 'ERA5 test'], - } - - - runs_data = [TrainLogger.read( run_id) for run_id in runs_ids.keys()] - - # extract times and convert back to datetime objects, store absolute time ta and relative one tr - runs_times = [] - for rd in runs_data : - # training - if len(rd[1][1]) > 0 : - ta_train = pd.to_datetime( rd[0][1][:,0], format='%Y%m%d%H%M%S') - diff = (ta_train - ta_train[0]) - tr_train = diff.days * 24 + diff.seconds / 3600. - else : - ta_train, tr_train = [], [] - # validation - if len(rd[1][1]) > 0 : - ta_val = pd.to_datetime( rd[1][1][:,0], format='%Y%m%d%H%M%S') - diff = (ta_val - ta_train[0]) - tr_val = diff.days * 24 + diff.seconds / 3600. - else : - ta_val, tr_val = [], [] - runs_times.append( [[ta_train, tr_train], [ta_val, tr_val]]) - - - # determine which runs are still alive (as a process, though they might hang internally) - ret = subprocess.run(["squeue"], capture_output=True) - lines = str(ret.stdout).split( '\\n') - runs_active = [np.array([str(v[0]) in l for l in lines[1:]]).any() for v in runs_ids.values()] - - x_scale_log = False - x_type = 'rel_time', #'step' - x_type = 'step' - - # plot learning rate - plot_lr( runs_ids, runs_data, runs_active) - - # plot performance - plot_utilization( runs_ids, runs_data, runs_active) - - # TODO: finish - smoothing_size = 0 - if smoothing_size > 0 : - # smooth - x = np.linspace( -np.pi, np.pi, smoothing_size) - gauss_filter = np.exp( -np.square(x)) - for j in range(len(runs_data)) : - for i in range(2) : - if runs_data[j][i][1].shape[0] <= gauss_filter.shape[0] : - continue - for i_ch in range( runs_data[j][i][1].shape[-1]) : - if not ('mse' in runs_data[j][i][0][i_ch]) : - continue - res = np.convolve( runs_data[j][i][1][:,i_ch], gauss_filter, 'same') - code.interact( local=locals()) - runs_data[j][i][1][:(res.shape[0]-smoothing_size),i_ch] = res[:-(smoothing_size)] - - - # compare different runs - plot_loss_per_stream( ['train', 'val'], runs_ids, runs_data, runs_times, runs_active, - ['ERA5'], - x_type=x_type, x_scale_log=x_scale_log) - plot_loss_per_stream( ['val'], runs_ids, runs_data, runs_times, runs_active, - ['ERA5'], - x_type=x_type, x_scale_log=x_scale_log) - plot_loss_per_stream( ['train'], runs_ids, runs_data, runs_times, runs_active, - ['ERA5'], - x_type=x_type, x_scale_log=x_scale_log) - - # plot all cols for all run_ids - for run_id, run_data in zip( runs_ids, runs_data) : - plot_loss_per_run( ['train', 'val'], run_id, runs_ids[run_id], run_data, get_stream_names( run_id)) - # plot_loss_per_run( ['val'], run_id, runs_ids[run_id], run_data, get_stream_names( run_id)) +def plot_train(args=None): + # Example usage: + # When providing a YAML for configuring the run IDs: + # python plot_training.py -rf eval_run.yml -m ./trained_models -o ./training_plots + # When providing a string for configuring the run IDs: + # python plot_training.py -rs "{run_id: [job_id, experiment_name]}" + # -m ./trained_models -o ./training_plots + + logging.basicConfig(level=logging.INFO, format="%(asctime)s - %(levelname)s - %(message)s") + parser = argparse.ArgumentParser( + description="""Plot training diagnostics from logged data during training. + An example YAML file looks like this: + train: + plot: + run_id: + slurm_id : SLURM_JOB (specify 0 if not available) + description: job description + run_id: + slurm_id : SLURM_JOB (specify 0 if not available) + description : job description + ... + + A dictionary-string can also be specified on the command line, e.g.: + "{'abcde': ['123456', 'experiment1'], + 'fghij': ['654321', 'experiment2']}" + """ + ) + + parser.add_argument( + "-o", "--output_dir", default="./plots/", type=Path, help="Directory where plots are saved" + ) + parser.add_argument( + "-m", + "--model_base_dir", + default=None, + type=Path, + help="Base-directory where models are saved", + ) + parser.add_argument( + "-d", + "--delete", + default=False, + action="store_true", + help="Delete all plots in the output directory before plotting", + ) + parser.add_argument( + "--streams", + "-s", + dest="streams", + default=["ERA5"], + type=str, + nargs="+", + help="List of streams to plot", + ) + parser.add_argument( + "--x_type", + "-x", + dest="x_type", + default="step", + type=str, + choices=["step", "reltime"], + help="Type of x-axis used in plots. Options: 'step' or 'reltime'", + ) + + run_id_group = parser.add_mutually_exclusive_group() + run_id_group.add_argument( + "-fd", + "--from_dict", + type=_read_str_config, + dest="fd", + help="Dictionary-string of form '{run_id: [job_id, experiment_name]}'" + + "for training runs to plot", + ) + + run_id_group.add_argument( + "-fy", + "--from_yaml", + dest="fy", + type=_read_yaml_config, + help="YAML file configuring the training run ids to plot", + ) + + # parse the command line arguments + args = parser.parse_args(args) + + model_base_dir = Path(args.model_base_dir) if args.model_base_dir else None + out_dir = Path(args.output_dir) + streams = list(args.streams) + x_types_valid = ["step"] # TODO: add "reltime" support when fix available + if args.x_type not in x_types_valid: + raise ValueError(f"x_type must be one of {x_types_valid}, but got {args.x_type}") + + # Post-processing default logic for config from YAML-file + if args.fd is None and args.fy is None: + if DEFAULT_RUN_FILE.exists(): + args.fy = _read_yaml_config(DEFAULT_RUN_FILE) + else: + raise ValueError( + f"Please provide a run_id dictionary or a YAML file with run_ids, " + f"or create a default file at {DEFAULT_RUN_FILE}." + ) + + runs_ids = args.fd if args.fd is not None else args.fy + + if args.delete == "True": + clean_plot_folder(out_dir) + + # read logged data + + runs_data = [TrainLogger.read(run_id, model_path=model_base_dir) for run_id in runs_ids] + + # determine which runs are still alive (as a process, though they might hang internally) + ret = subprocess.run(["squeue"], capture_output=True) + lines = str(ret.stdout).split("\\n") + runs_active = [ + np.array([str(v[0]) in line for line in lines[1:]]).any() for v in runs_ids.values() + ] + + x_scale_log = False + + # plot learning rate + plot_lr(runs_ids, runs_data, runs_active, plot_dir=out_dir) + + # # plot performance + # plot_utilization(runs_ids, runs_data, runs_active, plot_dir=out_dir) + + # compare different runs + plot_loss_per_stream( + ["train", "val"], + runs_ids, + runs_data, + runs_active, + streams, + x_type=args.x_type, + x_scale_log=x_scale_log, + plot_dir=out_dir, + ) + plot_loss_per_stream( + ["val"], + runs_ids, + runs_data, + runs_active, + streams, + x_type=args.x_type, + x_scale_log=x_scale_log, + plot_dir=out_dir, + ) + plot_loss_per_stream( + ["train"], + runs_ids, + runs_data, + runs_active, + streams, + x_type=args.x_type, + x_scale_log=x_scale_log, + plot_dir=out_dir, + ) + + # plot all cols for all run_ids + for run_id, run_data in zip(runs_ids, runs_data, strict=False): + plot_loss_per_run( + ["train", "val"], + run_id, + runs_ids[run_id], + run_data, + get_stream_names(run_id, model_path=model_base_dir), # limit to available streams + plot_dir=out_dir, + ) + plot_loss_per_run( + ["val"], + run_id, + runs_ids[run_id], + run_data, + get_stream_names(run_id, model_path=model_base_dir), # limit to available streams + plot_dir=out_dir, + ) + + +if __name__ == "__main__": + args = sys.argv[1:] # get CLI args + + plot_train(args) diff --git a/src/weathergen/utils/run_id.py b/src/weathergen/utils/run_id.py deleted file mode 100644 index f0f8519e2..000000000 --- a/src/weathergen/utils/run_id.py +++ /dev/null @@ -1,13 +0,0 @@ -# (C) Copyright 2024 WeatherGenerator contributors. -# -# This software is licensed under the terms of the Apache Licence Version 2.0 -# which can be obtained at http://www.apache.org/licenses/LICENSE-2.0. -# -# In applying this licence, ECMWF does not waive the privileges and immunities -# granted to it by virtue of its status as an intergovernmental organisation -# nor does it submit to any jurisdiction. - -from obslearn.train.utils import get_run_id - -if __name__ == '__main__' : - print( get_run_id()) \ No newline at end of file diff --git a/src/weathergen/utils/train_logger.py b/src/weathergen/utils/train_logger.py index dde2d1079..4281743a8 100644 --- a/src/weathergen/utils/train_logger.py +++ b/src/weathergen/utils/train_logger.py @@ -1,4 +1,4 @@ -# (C) Copyright 2024 WeatherGenerator contributors. +# (C) Copyright 2025 WeatherGenerator contributors. # # This software is licensed under the terms of the Apache Licence Version 2.0 # which can be obtained at http://www.apache.org/licenses/LICENSE-2.0. @@ -7,136 +7,450 @@ # granted to it by virtue of its status as an intergovernmental organisation # nor does it submit to any jurisdiction. -import code -import torch -import numpy as np import datetime +import json +import logging +import math +import time +import traceback +from dataclasses import dataclass +from pathlib import Path +from typing import Literal + +import numpy as np +import polars as pl +from torch import Tensor + +import weathergen.common.config as config +from weathergen.utils.metrics import get_train_metrics_path, read_metrics_file + +_weathergen_timestamp = "weathergen.timestamp" +_weathergen_reltime = "weathergen.reltime" +_weathergen_time = "weathergen.time" +_performance_gpu = "perf.gpu" +_performance_memory = "perf.memory" + +_logger = logging.getLogger(__name__) + +Stage = Literal["train", "val"] +RunId = str + +# All the stages currently implemented: +TRAIN: Stage = "train" +VAL: Stage = "val" + + +@dataclass +class Metrics: + run_id: RunId + stage: Stage + train: pl.DataFrame + val: pl.DataFrame + system: pl.DataFrame + + def by_mode(self, s: str) -> pl.DataFrame: + match s: + case "train": + return self.train + case "val": + return self.val + case "system": + return self.system + case _: + raise ValueError(f"Unknown mode {s}. Use 'train', 'val' or 'system'.") + + +class TrainLogger: + ####################################### + def __init__(self, cf, path_run: Path) -> None: + self.cf = cf + self.path_run = path_run + + def log_metrics(self, stage: Stage, metrics: dict[str, float]) -> None: + """ + Log metrics to a file. + For now, just scalar values are expected. There is no check. + """ + ## Clean all the metrics to convert to float. + # Any other type (numpy etc.) will trigger a serialization error. + clean_metrics = { + _weathergen_timestamp: time.time_ns() // 1_000_000, + _weathergen_time: int(datetime.datetime.now().strftime("%Y%m%d%H%M%S")), + "stage": stage, + } + for key, value in metrics.items(): + v = float(value) + if math.isnan(v) or math.isinf(v): + v = str(v) + clean_metrics[key] = v + + # TODO: performance: we repeatedly open the file for each call. Better for multiprocessing + # but we can probably do better and rely for example on the logging module. + + metrics_path = get_train_metrics_path( + base_path=Path(self.cf.run_path), run_id=self.cf.run_id + ) + with open(metrics_path, "ab") as f: + s = json.dumps(clean_metrics) + "\n" + f.write(s.encode("utf-8")) + + ####################################### + def add_train( + self, + samples: int, + lr: float, + avg_loss: Tensor, + losses_all: dict[str, Tensor], + stddev_all: dict[str, Tensor], + perf_gpu: float = 0.0, + perf_mem: float = 0.0, + ) -> None: + """ + Log training data + """ + metrics: dict[str, float] = dict(num_samples=samples) + + log_vals: list[float] = [int(datetime.datetime.now().strftime("%Y%m%d%H%M%S"))] + log_vals += [samples] + + metrics["loss_avg_mean"] = avg_loss.nanmean().item() + metrics["learning_rate"] = lr + metrics["num_samples"] = int(samples) + log_vals += [avg_loss.nanmean().item()] + log_vals += [lr] + + for st in self.cf.streams: + loss = losses_all[st["name"]] + stddev = stddev_all[st["name"]] + + for j, (lf_name, _) in enumerate(self.cf.loss_fcts): + metrics[_key_loss(st["name"], lf_name)] = loss[:, :, j].nanmean().item() + + for k, ch_n in enumerate(st.train_target_channels): + metrics[_key_loss_chn(st["name"], lf_name, ch_n)] = ( + loss[:, k, j].nanmean().item() + ) + log_vals += [loss[:, :, j].nanmean().item()] + + metrics[_key_stddev(st["name"])] = stddev.nanmean().item() + + log_vals += [stddev.nanmean().item()] + + with open(self.path_run / f"{self.cf.run_id}_train_log.txt", "ab") as f: + np.savetxt(f, log_vals) + + log_vals = [] + log_vals += [perf_gpu] + log_vals += [perf_mem] + metrics[_performance_gpu] = perf_gpu + metrics[_performance_memory] = perf_mem + self.log_metrics("train", metrics) + with open(self.path_run / (self.cf.run_id + "_perf_log.txt"), "ab") as f: + np.savetxt(f, log_vals) + + ####################################### + def add_val( + self, samples: int, losses_all: dict[str, Tensor], stddev_all: dict[str, Tensor] + ) -> None: + """ + Log validation data + """ + + metrics: dict[str, float] = dict(num_samples=int(samples)) + + log_vals: list[float] = [int(datetime.datetime.now().strftime("%Y%m%d%H%M%S"))] + log_vals += [samples] + + for st in self.cf.streams: + loss = losses_all[st["name"]] + stddev = stddev_all[st["name"]] + for j, (lf_name, _) in enumerate(self.cf.loss_fcts_val): + metrics[_key_loss(st["name"], lf_name)] = loss[:, :, j].nanmean().item() + for k, ch_n in enumerate(st.val_target_channels): + metrics[_key_loss_chn(st["name"], lf_name, ch_n)] = ( + loss[:, k, j].nanmean().item() + ) + log_vals += [loss[:, :, j].nanmean().item()] + + metrics[_key_stddev(st["name"])] = stddev.nanmean().item() + log_vals += [stddev.nanmean().item()] + + self.log_metrics("val", metrics) + with open(self.path_run / (self.cf.run_id + "_val_log.txt"), "ab") as f: + np.savetxt(f, log_vals) + + ####################################### + @staticmethod + def read(run_id: str, model_path: str = None, mini_epoch: int = -1) -> Metrics: + """ + Read data for run_id + """ + # Load config from given model_path if provided, otherwise use path from private config + if model_path: + cf = config.load_model_config( + run_id=run_id, mini_epoch=mini_epoch, model_path=model_path + ) + else: + cf = config.load_config(private_home=None, from_run_id=run_id, mini_epoch=mini_epoch) + run_id = cf.run_id + + result_dir_base = Path(cf.run_path) + result_dir = result_dir_base / run_id + fname_log_train = result_dir / f"{run_id}_train_log.txt" + fname_log_val = result_dir / f"{run_id}_val_log.txt" + fname_perf_val = result_dir / f"{run_id}_perf_log.txt" + + # training + + # define cols for training + cols_train = ["dtime", "samples", "mse", "lr"] + cols1 = [_weathergen_timestamp, "num_samples", "loss_avg_mean", "learning_rate"] + for si in cf.streams: + for lf in cf.loss_fcts: + cols1 += [_key_loss(si["name"], lf[0])] + cols_train += [ + si["name"].replace(",", "").replace("/", "_").replace(" ", "_") + ", " + lf[0] + ] + with_stddev = [("stats" in lf) for lf in cf.loss_fcts] + if with_stddev: + for si in cf.streams: + cols1 += [_key_stddev(si["name"])] + cols_train += [ + si["name"].replace(",", "").replace("/", "_").replace(" ", "_") + + ", " + + "stddev" + ] + # read training log data + try: + with open(fname_log_train, "rb") as f: + log_train = np.loadtxt(f, delimiter=",") + log_train = log_train.reshape((log_train.shape[0] // len(cols_train), len(cols_train))) + except ( + TypeError, + AttributeError, + IndexError, + ZeroDivisionError, + ValueError, + ) as e: + _logger.warning( + ( + f"Warning: no training data loaded for run_id={run_id}", + "Data loading or reshaping failed — " + "possible format, dimension, or logic issue.", + f"Due to specific error: {e}", + ) + ) + except (FileNotFoundError, PermissionError, OSError) as e: + _logger.error( + ( + f"Error: no training data loaded for run_id={run_id}", + "File system error occurred while handling the log file.", + f"Due to specific error: {e}", + ) + ) + except Exception: + _logger.error( + ( + f"Error: no training data loaded for run_id={run_id}", + f"Due to exception with trace:\n{traceback.format_exc()}", + ) + ) + log_train = np.array([]) + + log_train_df = read_metrics(cf, run_id, "train", cols1, result_dir_base) + + # validation + # define cols for validation + cols_val = ["dtime", "samples"] + cols2 = [_weathergen_timestamp, "num_samples"] + for si in cf.streams: + for lf in cf.loss_fcts_val: + cols_val += [ + si["name"].replace(",", "").replace("/", "_").replace(" ", "_") + ", " + lf[0] + ] + cols2 += [_key_loss(si["name"], lf[0])] + with_stddev = [("stats" in lf) for lf in cf.loss_fcts_val] + if with_stddev: + for si in cf.streams: + cols2 += [_key_stddev(si["name"])] + cols_val += [ + si["name"].replace(",", "").replace("/", "_").replace(" ", "_") + + ", " + + "stddev" + ] + # read validation log data + try: + with open(fname_log_val, "rb") as f: + log_val = np.loadtxt(f, delimiter=",") + log_val = log_val.reshape((log_val.shape[0] // len(cols_val), len(cols_val))) + except ( + TypeError, + AttributeError, + IndexError, + ZeroDivisionError, + ValueError, + ) as e: + _logger.warning( + ( + f"Warning: no validation data loaded for run_id={run_id}", + "Data loading or reshaping failed — " + "possible format, dimension, or logic issue.", + f"Due to specific error: {e}", + ) + ) + except (FileNotFoundError, PermissionError, OSError) as e: + _logger.error( + ( + f"Error: no validation data loaded for run_id={run_id}", + "File system error occurred while handling the log file.", + f"Due to specific error: {e}", + ) + ) + except Exception: + _logger.error( + ( + f"Error: no validation data loaded for run_id={run_id}", + f"Due to exception with trace:\n{traceback.format_exc()}", + ) + ) + log_val = np.array([]) + metrics_val_df = read_metrics(cf, run_id, "val", cols2, result_dir_base) + + # performance + # define cols for performance monitoring + cols_perf = ["GPU", "memory"] + # read perf log data + try: + with open(fname_perf_val, "rb") as f: + log_perf = np.loadtxt(f, delimiter=",") + log_perf = log_perf.reshape((log_perf.shape[0] // len(cols_perf), len(cols_perf))) + except ( + TypeError, + AttributeError, + IndexError, + ZeroDivisionError, + ValueError, + ) as e: + _logger.warning( + ( + f"Warning: no validation data loaded for run_id={run_id}", + "Data loading or reshaping failed — " + "possible format, dimension, or logic issue.", + f"Due to specific error: {e}", + ) + ) + except (FileNotFoundError, PermissionError, OSError) as e: + _logger.error( + ( + f"Error: no validation data loaded for run_id={run_id}", + "File system error occurred while handling the log file.", + f"Due to specific error: {e}", + ) + ) + except Exception: + _logger.error( + ( + f"Error: no validation data loaded for run_id={run_id}", + f"Due to exception with trace:\n{traceback.format_exc()}", + ) + ) + log_perf = np.array([]) + metrics_system_df = read_metrics( + cf, + run_id, + None, + [_weathergen_timestamp, _performance_gpu, _performance_memory], + result_dir_base, + ) + + return Metrics(run_id, "train", log_train_df, metrics_val_df, metrics_system_df) + + +def read_metrics( + cf: config.Config, + run_id: RunId | None, + stage: Stage | None, + cols: list[str] | None, + results_path: Path, +) -> pl.DataFrame: + """ + Read metrics for run_id + + stage: stage to load ("train", "val" or empty). If None, all stages are loaded. + cols: list of columns to load. If None, all columns are loaded. + run_id: run_id to load. If None, the run_id form the config is used. + """ + + assert cols is None or cols, "cols must be non empty or None" + if run_id is None: + run_id = cf.run_id + assert run_id, "run_id must be provided" + + metrics_path = get_train_metrics_path(base_path=results_path, run_id=run_id) + # TODO: this should be a config option + df = read_metrics_file(metrics_path) + if stage is not None: + df = df.filter(pl.col("stage") == stage) + df = df.drop("stage") + df = clean_df(df, cols) + return df + + +def clean_df(df, columns: list[str] | None): + """ + Selects the required data from the dataframe, and ensures thath all columns are numeric. + """ + # Convert all string columns to float. type == str they contained nan/inf values + for k, v in df.schema.items(): + if v == pl.String: + df = df.with_columns(df[k].cast(pl.Float64).alias(k)) + + # Convert timestamp column to date + df = df.with_columns( + pl.from_epoch(df[_weathergen_timestamp], time_unit="ms").alias(_weathergen_timestamp) + ) + df = df.with_columns( + (df[_weathergen_timestamp] - df[_weathergen_timestamp].min()).alias(_weathergen_reltime) + ) + + if columns: + columns = list(set(columns)) # remove duplicates + # Backwards compatibility of "loss_avg_mean" (old) and "loss_avg_0_mean" (new) metric name + if "loss_avg_mean" not in df.columns: + idcs = [i for i in range(len(columns)) if columns[i] == "loss_avg_mean"] + if len(idcs) > 0: + columns[idcs[0]] = "loss_avg_0_mean" + df = df.select(columns) + # Remove all rows where all columns are null + df = df.filter(~pl.all_horizontal(pl.col(c).is_null() for c in columns)) + + return df + + +def clean_name(s: str) -> str: + """ + Remove all characters from a string except letters, digits, and underscores. + + Args: + s (str): The input string. + + Returns: + str: A new string containing only alphanumeric characters and underscores, + in the same order and capitalization as they appeared in the input. + """ + return "".join(c for c in s if c.isalnum() or c == "_") + + +def _key_loss(st_name: str, lf_name: str) -> str: + st_name = clean_name(st_name) + return f"stream.{st_name}.loss_{lf_name}.loss_avg" + + +def _key_loss_chn(st_name: str, lf_name: str, ch_name: str) -> str: + st_name = clean_name(st_name) + return f"stream.{st_name}.loss_{lf_name}.loss_{ch_name}" + -from weathergen.utils.config import Config - - -class TrainLogger : - - ####################################### - def __init__( self, cf, path_run) -> None: - - self.cf = cf - self.path_run = path_run - # TODO: add header with col names (loadtxt has an option to skip k header lines) - - ####################################### - def add_train( self, samples, lr, loss_avg, stddev_avg, perf_gpu=0., perf_mem=0.) -> None : - ''' - Log training data - ''' - - log_vals = [ int(datetime.datetime.now().strftime( '%Y%m%d%H%M%S')) ] - log_vals += [samples] - - log_vals += [loss_avg[0].mean()] - log_vals += [lr] - - for i_obs, rt in enumerate( self.cf.streams) : - for j,_ in enumerate( self.cf.loss_fcts) : - log_vals += [ loss_avg[j,i_obs] ] - if len(stddev_avg) > 0 : - for i_obs, rt in enumerate( self.cf.streams) : - log_vals += [ stddev_avg[i_obs] ] - - with open( self.path_run + self.cf.run_id + '_train_log.txt', 'ab') as f: - np.savetxt( f, log_vals) - - log_vals = [] - log_vals += [perf_gpu] - log_vals += [perf_mem] - with open( self.path_run + self.cf.run_id + '_perf_log.txt', 'ab') as f: - np.savetxt( f, log_vals) - - ####################################### - def add_val( self, samples, loss_avg, stddev_avg) -> None : - ''' - Log validation data - ''' - - log_vals = [ int(datetime.datetime.now().strftime( '%Y%m%d%H%M%S')) ] - log_vals += [samples] - - for i_obs, rt in enumerate( self.cf.streams) : - for j,_ in enumerate( self.cf.loss_fcts) : - log_vals += [ loss_avg[j,i_obs] ] - if len(stddev_avg) > 0 : - for i_obs, rt in enumerate( self.cf.streams) : - log_vals += [ stddev_avg[i_obs] ] - - with open( self.path_run + self.cf.run_id + '_val_log.txt', 'ab') as f: - np.savetxt( f, log_vals) - - ####################################### - @staticmethod - def read( run_id, epoch=-1) : - ''' - Read data for run_id - ''' - - cf = Config.load( run_id, epoch) - run_id = cf.run_id - - fname_log_train = f'./results/{run_id}/{run_id}_train_log.txt' - fname_log_val = f'./results/{run_id}/{run_id}_val_log.txt' - fname_perf_val = f'./results/{run_id}/{run_id}_perf_log.txt' - fname_config = f'./models/model_{run_id}.json' - - # training - - # define cols for training - cols_train = ['dtime', 'samples', 'mse', 'lr'] - for si in cf.streams : - for j,lf in enumerate(cf.loss_fcts) : - cols_train += [ si['name'].replace(',','').replace('/','_').replace(' ','_') + ', ' + lf[0]] - with_stddev = [(True if 'stats' in lf else False) for lf in cf.loss_fcts] - if with_stddev : - for si in cf.streams : - cols_train += [ si['name'].replace(',','').replace('/','_').replace(' ','_')+', '+ 'stddev'] - # read training log data - try : - with open( fname_log_train, 'rb') as f: - log_train = np.loadtxt( f, delimiter=',') - log_train = log_train.reshape( ( log_train.shape[0]//len(cols_train), len(cols_train) )) - except : - print( f'Warning: no training data loaded for run_id={run_id}') - log_train = np.array([]) - - # validation - - # define cols for validation - cols_val = ['dtime', 'samples'] - for si in cf.streams : - for j,lf in enumerate(cf.loss_fcts_val) : - cols_val += [ si['name'].replace(',','').replace('/','_').replace(' ','_') + ', ' + lf[0]] - with_stddev = [(True if 'stats' in lf else False) for lf in cf.loss_fcts_val] - if with_stddev : - for si in cf.streams : - cols_val += [ si['name'].replace(',','').replace('/','_').replace(' ','_')+', '+ 'stddev'] - # read validation log data - try : - with open( fname_log_val, 'rb') as f: - log_val = np.loadtxt( f, delimiter=',') - log_val = log_val.reshape( ( log_val.shape[0]//len(cols_val), len(cols_val) )) - except : - print( f'Warning: no validation data loaded for run_id={run_id}') - log_val = np.array([]) - - # performance - - # define cols for performance monitoring - cols_perf = ['GPU', 'memory'] - # read perf log data - try : - with open( fname_perf_val, 'rb') as f: - log_perf = np.loadtxt( f, delimiter=',') - log_perf = log_perf.reshape( ( log_perf.shape[0]//len(cols_perf), len(cols_perf) )) - except : - print( f'Warning: no performance data loaded for run_id={run_id}') - log_perf = np.array([]) - - return ( (cols_train, log_train), (cols_val, log_val), (cols_perf, log_perf) ) +def _key_stddev(st_name: str) -> str: + st_name = clean_name(st_name) + return f"stream.{st_name}.stddev_avg" diff --git a/src/weathergen/utils/utils.py b/src/weathergen/utils/utils.py new file mode 100644 index 000000000..5deba9287 --- /dev/null +++ b/src/weathergen/utils/utils.py @@ -0,0 +1,26 @@ +# (C) Copyright 2025 WeatherGenerator contributors. +# +# This software is licensed under the terms of the Apache Licence Version 2.0 +# which can be obtained at http://www.apache.org/licenses/LICENSE-2.0. +# +# In applying this licence, ECMWF does not waive the privileges and immunities +# granted to it by virtue of its status as an intergovernmental organisation +# nor does it submit to any jurisdiction. + +import torch + + +def get_dtype(value: str) -> torch.dtype: + """ + changes the conf value to a torch dtype + """ + if value == "bf16": + return torch.bfloat16 + elif value == "fp16": + return torch.float16 + elif value == "fp32": + return torch.float32 + else: + raise NotImplementedError( + f"Dtype {value} is not recognized, choose either, bf16, fp16, or fp32" + ) diff --git a/src/weathergen/utils/validation_io.py b/src/weathergen/utils/validation_io.py index 2cd858d48..355be0e51 100644 --- a/src/weathergen/utils/validation_io.py +++ b/src/weathergen/utils/validation_io.py @@ -1,4 +1,4 @@ -# (C) Copyright 2024 WeatherGenerator contributors. +# (C) Copyright 2025 WeatherGenerator contributors. # # This software is licensed under the terms of the Apache Licence Version 2.0 # which can be obtained at http://www.apache.org/licenses/LICENSE-2.0. @@ -7,122 +7,80 @@ # granted to it by virtue of its status as an intergovernmental organisation # nor does it submit to any jurisdiction. -import time - -import numpy as np -import torch - -import zarr - - -def sanitize_stream_str( istr) : - return istr.replace( ' ', '_').replace( '-', '_').replace(',','') - -################################# -def read_validation( cf, epoch, base_path, instruments, forecast_steps, rank=0) : - - streams, columns, data = [], [], [] - - fname = base_path + 'validation_epoch{:05d}_rank{:04d}.zarr'.format( epoch, rank) - store = zarr.DirectoryStore( fname) - ds = zarr.group( store=store) - - for ii, stream_info in enumerate( cf.streams) : - - n = stream_info['name'] - if len(instruments) : - if not np.array( [r in n for r in instruments]).any() : - continue - - streams += [ stream_info['name'] ] - columns.append( ds[f'{sanitize_stream_str(n)}/0'].attrs['cols']) - data += [ [] ] - - for fstep in forecast_steps : - - data[-1] += [ [] ] - istr = sanitize_stream_str(n) - - data[-1][-1].append( ds[f'{istr}/{fstep}/sources']) - data[-1][-1].append( ds[f'{istr}/{fstep}/sources_coords']) - data[-1][-1].append( ds[f'{istr}/{fstep}/preds']) - data[-1][-1].append( ds[f'{istr}/{fstep}/targets']) - data[-1][-1].append( ds[f'{istr}/{fstep}/targets_coords']) - data[-1][-1].append( ds[f'{istr}/{fstep}/sources_lens']) - data[-1][-1].append( ds[f'{istr}/{fstep}/targets_lens']) - data[-1][-1].append( ~np.isnan( data[-1][-1][3]) ) - - data[-1][-1].append( np.mean( data[-1][-1][2], axis=1) ) - data[-1][-1].append( np.std( data[-1][-1][2], axis=1) ) - - return streams, columns, data - -################################# -def write_validation( cf, base_path, rank, epoch, cols, - sources, preds_all, targets_all, - targets_coords_all, targets_lens, jac = None) : - - if 0==len(cf.analysis_streams_output) : - return - - fname = base_path + 'validation_epoch{:05d}_rank{:04d}'.format( epoch, rank) - fname += '' if jac is None else '_jac' - fname += '.zarr' - - store = zarr.DirectoryStore( fname) - ds = zarr.group( store=store) - - for k, si in enumerate( cf.streams) : - - # only store requested streams - if not np.array([s in si['name'] for s in cf.analysis_streams_output]).any() : - continue - - # skip empty entries (e.g. no channels from the sources are used as targets) - if 0 == len(targets_all[k]) or 0 == len(targets_all[k][0]) : - continue - - # TODO: this only saves the first batch - source_k = sources[0][k].cpu().detach().numpy() - source_lens_k = np.array([source_k.shape[0]]) - preds_k = torch.cat( preds_all[k], 1).transpose( 1, 0).cpu().detach().numpy() - targets_k = torch.cat( targets_all[k], 0).cpu().detach().numpy() - targets_coords_k = torch.cat( targets_coords_all[k], 0).cpu().detach().numpy() - targets_lens_k = np.array(targets_lens[k], dtype=np.int64) - - fs = cf.forecast_steps - fs = fs if type(fs)==int else fs[ min( epoch, len(fs)-1)] - rn = si['name'].replace( ' ', '_').replace( '-', '_').replace(',','') - - write_first = False - if rn in ds.group_keys() : - if f'{fs}' not in ds[rn].group_keys() : - write_first = True - else : - write_first = True - - # TODO: how to avoid this - if write_first : - ds_source = ds.require_group( f'{rn}/{fs}') - cols_obsvalues = [col[:9]=='obsvalue_' for col in cols[k]] - ds_source.attrs['cols'] = np.array(cols[k])[cols_obsvalues].tolist() - ds_source.create_dataset( 'sources', data=source_k, chunks=(1024, *source_k.shape[1:])) - ds_source.create_dataset( 'sources_lens', data=source_lens_k) - ds_source.create_dataset( 'preds', data=preds_k, chunks=(1024, *preds_k.shape[1:])) - ds_source.create_dataset( 'targets', data=targets_k, chunks=(1024, *targets_k.shape[1:])) - ds_source.create_dataset( 'targets_coords', data=targets_coords_k, - chunks=(1024, *targets_coords_k.shape[1:])) - ds_source.create_dataset( 'targets_lens', data=targets_lens_k) - else : - rn = rn + f'/{fs}' - ds[f'{rn}/sources'].append( source_k) - ds[f'{rn}/sources_lens'].append( source_lens_k) - ds[f'{rn}/preds'].append( preds_k) - ds[f'{rn}/targets'].append( targets_k) - ds[f'{rn}/targets_coords'].append( targets_coords_k) - ds[f'{rn}/targets_lens'].append( targets_lens_k) - - if jac is not None : - ds_source.create_dataset( 'jacobian', data=jac[k]) - - store.close() +import logging + +import weathergen.common.config as config +import weathergen.common.io as io +from weathergen.common.io import TimeRange +from weathergen.datasets.data_reader_base import TimeWindowHandler, str_to_datetime64 + +_logger = logging.getLogger(__name__) + + +def write_output( + cf, + mini_epoch, + batch_idx, + sources, + preds_all, + targets_all, + targets_coords_all, + targets_times_all, + targets_lens, + sample_idxs, +): + stream_names = [stream.name for stream in cf.streams] + analysis_streams_output = cf.get( 'analysis_streams_output', None) + if cf.streams_output is not None: + output_stream_names = cf.streams_output + elif analysis_streams_output is not None: # --- to be removed at some point --- + output_stream_names = analysis_streams_output # --- to be removed at some point --- + else: + output_stream_names = None + + if output_stream_names is None: + output_stream_names = stream_names + + output_streams = {name: stream_names.index(name) for name in output_stream_names} + + _logger.debug(f"Using output streams: {output_streams} from streams: {stream_names}") + + target_channels: list[list[str]] = [list(stream.val_target_channels) for stream in cf.streams] + source_channels: list[list[str]] = [list(stream.val_source_channels) for stream in cf.streams] + + geoinfo_channels = [[] for _ in cf.streams] # TODO obtain channels + + # assume: is batch size guarnteed and constant: + # => calculate global sample indices for this batch by offsetting by sample_start + sample_start = batch_idx * cf.batch_size_validation_per_gpu + + assert len(stream_names) == len(targets_all[0]), "data does not match number of streams" + assert len(stream_names) == len(preds_all[0]), "data does not match number of streams" + assert len(stream_names) == len(sources[0]), "data does not match number of streams" + + start_date = str_to_datetime64(cf.start_date_val) + end_date = str_to_datetime64(cf.end_date_val) + + twh = TimeWindowHandler(start_date, end_date, cf.len_hrs, cf.step_hrs) + source_windows = (twh.window(idx) for idx in sample_idxs) + source_intervals = [TimeRange(window.start, window.end) for window in source_windows] + + data = io.OutputBatchData( + sources, + source_intervals, + targets_all, + preds_all, + targets_coords_all, + targets_times_all, + targets_lens, + output_streams, + target_channels, + source_channels, + geoinfo_channels, + sample_start, + cf.forecast_offset, + ) + + with io.ZarrIO(config.get_path_output(cf, mini_epoch)) as writer: + for subset in data.items(): + writer.write_zarr(subset) diff --git a/stac/abi-goes16.jsonnet b/stac/abi-goes16.jsonnet new file mode 100644 index 000000000..ce4af2822 --- /dev/null +++ b/stac/abi-goes16.jsonnet @@ -0,0 +1,90 @@ +local common = import 'common.jsonnet'; + +{ + name: 'ABI-GOES16', + filename: 'abigoes.json', + description: 'The Advanced Baseline Imager (ABI) on GOES16 (Geostationary Operational Environmental Satellite 16) provides high-resolution, multispectral imagery for real-time weather monitoring and forecasting. GOES-16 is also called GOES-East as it covers the Eastern side of the Americas.', + title: 'ABI-GOES16', + unique_id: '13', + start_datetime: '2017-12-17T00:00:41', + end_datetime: '2024-12-31 23:50:20', + frequency: '10min', + fixed_timesteps: 'True', + keywords: [ + 'atmosphere', + 'observation', + 'geostationary', + 'satellite', + ], + providers: [ + common.providers.nasa, + common.providers.nasa_processor + ], + processing_level: 'Operational L1b', + + variables: { + names: [ + 'time_bounds_swaths', + 'time_bounds_rows', + 'a_h_NRTH', + 'b_h_NRTH', + 'Rad', + 'DQF', + 't', + 'time_bounds', + 'goes_imager_projection', + 'kappa0', + 'band_id', + 'band_wavelength', + 'min_radiance_value_of_valid_pixels', + 'max_radiance_value_of_valid_pixels', + 'mean_radiance_value_of_valid_pixels', + 'std_dev_radiance_value_of_valid_pixels', + 'esun', + 'earth_sun_distance_anomaly_in_AU', + 'nominal_satellite_height', + 'nominal_satellite_subpoint_lat', + 'nominal_satellite_subpoint_lon', + 'x_image', + 'y_image', + 'geospatial_lat_lon_extent', + 'yaw_flip_flag', + 'x_image_bounds', + 'y_image_bounds', + 'planck_bc1', + 'planck_bc2', + 'planck_fk1', + 'planck_fk2', + 'focal_plane_temperature_threshold_decreasing', + 'focal_plane_temperature_threshold_increasing', + 'focal_plane_temperature_threshold_exceeded_count', + 'percent_uncorrectable_L0_errors', + 'saturated_pixel_count', + 'undersaturated_pixel_count', + 'valid_pixel_count', + 'maximum_focal_plane_temperature', + 'algorithm_dynamic_input_data_container', + 'algorithm_product_version_container', + 'reprocessing_version', + 'missing_pixel_count', + 'processing_parm_version_container', + 't_star_look', + 'star_id', + 'band_wavelength_star_look', + 'y', + 'x', + ] + }, + + geometry: [-156, 6, -81, 81], + + dataset: { + dataset_name: 'RP_ABI-L1b-RadF-M6C*_G16_s*_e*_c*.nc', + type: 'application/vnd+netcdf', + description: 'Observation dataset', + locations: [common.hpc.hpc2020], + size: '230.8 TB', + inodes: '5.426.532', + roles: ['data'], + }, +} diff --git a/stac/cerra.jsonnet b/stac/cerra.jsonnet new file mode 100644 index 000000000..2daa3bf0c --- /dev/null +++ b/stac/cerra.jsonnet @@ -0,0 +1,676 @@ +local common = import 'common.jsonnet'; + +{ + name: 'CERRA', + filename: 'cerra.json', + description: 'The Copernicus European Regional Reanalysis (CERRA), developed under the Copernicus Climate Change Service (C3S), provides a high-resolution reanalysis of atmospheric conditions over Europe. Covering the period from 1984 onward, CERRA delivers hourly data at a spatial resolution of 5.5 km, capturing fine-scale climate and weather patterns with improved detail compared to global reanalyses', + title: 'CERRA', + unique_id: '2', + start_datetime: '1984-09-01T06:00:00', + end_datetime: '2020-12-31T18:00:00', + frequency: '3h', + fixed_timesteps: 'True', + keywords: [ + 'europe', + 'copernicus', + 'atmosphere', + 'reanalysis', + ], + providers: [ + common.providers.copernicus, + ], + processing_level: 'NA', + + // retrieved from anemoi dataset with: + // ds.variables, ds.statistics["minimum"], ds.statistics["maximum"], + // ds.statistics["mean"], ds.statistics["stdev"], + // ds.statistics_tendencies()["mean"], ds.statistics_tendencies()["stdev"] + variables: { + names: [ + '10si_10', + '10wdir_10', + '2t_2', + 'al', + 'cos_julian_day', + 'cos_latitude', + 'cos_local_time', + 'cos_longitude', + 'cos_solar_zenith_angle', + 'lsm', + 'msl_0', + 'orog', + 'r_100', + 'r_1000', + 'r_150', + 'r_200', + 'r_250', + 'r_300', + 'r_400', + 'r_50', + 'r_500', + 'r_600', + 'r_700', + 'r_850', + 'r_925', + 'rsn', + 'sde', + 'sf', + 'sin_julian_day', + 'sin_latitude', + 'sin_local_time', + 'sin_longitude', + 'skt', + 'sp', + 't_100', + 't_1000', + 't_150', + 't_200', + 't_250', + 't_300', + 't_400', + 't_50', + 't_500', + 't_600', + 't_700', + 't_850', + 't_925', + 'tciwv_0', + 'tp', + 'u_100', + 'u_1000', + 'u_150', + 'u_200', + 'u_250', + 'u_300', + 'u_400', + 'u_50', + 'u_500', + 'u_600', + 'u_700', + 'u_850', + 'u_925', + 'v_100', + 'v_1000', + 'v_150', + 'v_200', + 'v_250', + 'v_300', + 'v_400', + 'v_50', + 'v_500', + 'v_600', + 'v_700', + 'v_850', + 'v_925', + 'z_100', + 'z_1000', + 'z_150', + 'z_200', + 'z_250', + 'z_300', + 'z_400', + 'z_50', + 'z_500', + 'z_600', + 'z_700', + 'z_850', + 'z_925', + ], + mins: [ + 5.62394189e-06, + 1.27694761e-08, + 2.02712051e+02, + 5.20827882e-02, + -9.99997675e-01, + 2.52968043e-01, + -1.00000000e+00, + 2.73873925e-01, + 0.00000000e+00, + 0.00000000e+00, + 9.15235391e+04, + -4.11015381e+02, + 0.00000000e+00, + 0.00000000e+00, + 0.00000000e+00, + 0.00000000e+00, + 0.00000000e+00, + 0.00000000e+00, + 0.00000000e+00, + 0.00000000e+00, + 0.00000000e+00, + 0.00000000e+00, + 0.00000000e+00, + 0.00000000e+00, + 0.00000000e+00, + 1.00133286e+02, + 0.00000000e+00, + 0.00000000e+00, + -9.99999404e-01, + 3.46809298e-01, + -1.00000000e+00, + -8.49018574e-01, + 1.97412720e+02, + 5.93108477e+04, + 1.83557007e+02, + 2.24763779e+02, + 1.86210373e+02, + 1.89312103e+02, + 1.92907486e+02, + 1.97630112e+02, + 2.11168442e+02, + 1.78602402e+02, + 2.17516159e+02, + 2.20507004e+02, + 2.17648209e+02, + 2.27576126e+02, + 2.26398041e+02, + -1.05852914e+00, + 0.00000000e+00, + -4.29200745e+01, + -5.09635544e+01, + -4.77315025e+01, + -6.18216667e+01, + -7.43816376e+01, + -7.24793777e+01, + -6.86115417e+01, + -4.32194443e+01, + -5.69152908e+01, + -5.55516205e+01, + -6.62634048e+01, + -6.92024841e+01, + -6.35123405e+01, + -6.45962982e+01, + -4.42999763e+01, + -7.65900269e+01, + -1.01852310e+02, + -1.15096771e+02, + -1.05506874e+02, + -9.44697800e+01, + -6.79569397e+01, + -8.08676300e+01, + -6.62030029e+01, + -5.87583351e+01, + -6.22382965e+01, + -6.17014198e+01, + 1.43262250e+05, + -7.09842969e+03, + 1.19476250e+05, + 1.02385188e+05, + 8.90707109e+04, + 7.78865625e+04, + 5.96469258e+04, + 1.82196641e+05, + 4.51202656e+04, + 3.21100137e+04, + 2.06955391e+04, + 5.83286133e+03, + -8.44327942e+02, + ], + maxs: [ + 5.28027840e+01, + 3.60003632e+02, + 3.24446747e+02, + 8.49857926e-01, + 1.00000000e+00, + 9.37935650e-01, + 1.00000000e+00, + 1.00000000e+00, + 9.99949932e-01, + 1.00000000e+00, + 1.06743219e+05, + 4.00135962e+03, + 1.00070312e+02, + 1.00378998e+02, + 1.00111328e+02, + 1.00169922e+02, + 1.00207031e+02, + 1.00232422e+02, + 1.00261719e+02, + 1.00009766e+02, + 1.00271484e+02, + 1.00214844e+02, + 1.00298828e+02, + 1.00373047e+02, + 1.00378601e+02, + 3.00001953e+02, + 1.26113281e+02, + 1.15117188e+02, + 9.99999404e-01, + 9.67474639e-01, + 1.00000000e+00, + 9.61765587e-01, + 3.45043610e+02, + 1.08649039e+05, + 2.41005920e+02, + 3.22723755e+02, + 2.44259048e+02, + 2.47739609e+02, + 2.47744934e+02, + 2.52012115e+02, + 2.66412720e+02, + 2.55196594e+02, + 2.77656647e+02, + 2.85742432e+02, + 2.93582123e+02, + 3.10354004e+02, + 3.17671417e+02, + 7.01758041e+01, + 3.06929688e+02, + 7.56467590e+01, + 4.68188019e+01, + 9.53927917e+01, + 1.04401718e+02, + 1.22580872e+02, + 1.22861374e+02, + 1.02492699e+02, + 9.02089539e+01, + 8.59620056e+01, + 7.54451904e+01, + 6.29366837e+01, + 6.07016602e+01, + 5.81062088e+01, + 5.84863396e+01, + 4.31917763e+01, + 6.98955231e+01, + 8.45281906e+01, + 9.00723572e+01, + 8.62032928e+01, + 8.62836914e+01, + 6.68381729e+01, + 7.12501221e+01, + 6.05944405e+01, + 6.52927170e+01, + 7.25929108e+01, + 5.90667572e+01, + 1.66283281e+05, + 4.81913867e+03, + 1.42371547e+05, + 1.24159750e+05, + 1.09216508e+05, + 9.65778672e+04, + 7.57898047e+04, + 2.08567656e+05, + 5.91258125e+04, + 4.49169297e+04, + 3.25831230e+04, + 1.72189570e+04, + 1.06431680e+04, + ], + means: [ + 5.31759671e+00, + 1.82890160e+02, + 2.84196743e+02, + 2.77168156e-01, + 4.19128363e-03, + 6.54402856e-01, + -1.83814930e-05, + 8.95160329e-01, + 2.12955304e-01, + 5.28147071e-01, + 1.01390159e+05, + 2.36831323e+02, + 7.65249460e+00, + 6.88474084e+01, + 1.15043052e+01, + 3.04358233e+01, + 4.85260954e+01, + 5.42775885e+01, + 5.06893779e+01, + 3.24156859e+00, + 4.63506773e+01, + 4.53308558e+01, + 4.75729489e+01, + 5.97874802e+01, + 6.60394944e+01, + 2.54934229e+02, + 2.94238224e-01, + 7.88061774e-02, + -7.11292635e-03, + 7.14476879e-01, + -2.58335021e-06, + 1.25806572e-01, + 2.85014068e+02, + 9.86660974e+04, + 2.12625405e+02, + 2.85540276e+02, + 2.15987936e+02, + 2.17948093e+02, + 2.21596750e+02, + 2.28078829e+02, + 2.42183185e+02, + 2.13494397e+02, + 2.53749521e+02, + 2.62790043e+02, + 2.70028266e+02, + 2.78380529e+02, + 2.81994323e+02, + 1.45715313e+01, + 4.98549761e-01, + 1.10172523e+01, + 4.95077156e-01, + 1.48218923e+01, + 1.59973408e+01, + 1.52368319e+01, + 1.35926784e+01, + 1.05703051e+01, + 5.08724646e+00, + 8.32905866e+00, + 6.53706673e+00, + 4.84091003e+00, + 2.39136158e+00, + 1.27459015e+00, + 3.37035780e-01, + -3.78458694e-01, + 3.93264300e-01, + -7.59636770e-02, + -2.60289022e-01, + -2.08126776e-01, + -1.14076301e-01, + -5.43919038e-01, + -5.06507083e-02, + 4.03868651e-03, + -4.99432825e-03, + -1.60883061e-01, + -2.83507263e-01, + 1.58887157e+05, + 1.12681738e+03, + 1.33961297e+05, + 1.16038921e+05, + 1.01985456e+05, + 9.02313634e+04, + 7.08293732e+04, + 2.01164886e+05, + 5.49396190e+04, + 4.14130278e+04, + 2.96109251e+04, + 1.42985714e+04, + 7.48170813e+03, + ], + stds: [ + 3.87283445e+00, + 1.08693885e+02, + 1.18319326e+01, + 1.56108630e-01, + 7.06383117e-01, + 1.82534982e-01, + 7.07102156e-01, + 1.20628899e-01, + 2.88874245e-01, + 4.92187354e-01, + 1.04959814e+03, + 4.44184576e+02, + 9.14751969e+00, + 2.43637737e+01, + 1.53793390e+01, + 2.99507868e+01, + 3.29562310e+01, + 3.21014008e+01, + 3.19738902e+01, + 4.80240931e+00, + 3.14678410e+01, + 3.07615251e+01, + 3.02296049e+01, + 2.87729841e+01, + 2.72051279e+01, + 5.93082153e+01, + 1.52393381e+00, + 5.32859602e-01, + 7.07781522e-01, + 1.67214121e-01, + 7.07111329e-01, + 4.10255106e-01, + 1.30434820e+01, + 5.02809074e+03, + 7.79938350e+00, + 1.13431393e+01, + 6.36824752e+00, + 6.19094897e+00, + 5.99604953e+00, + 7.00611611e+00, + 8.32486820e+00, + 6.75944639e+00, + 8.60986963e+00, + 8.80495869e+00, + 9.34410520e+00, + 1.05441258e+01, + 1.10803401e+01, + 7.84970218e+00, + 1.56107664e+00, + 9.74984406e+00, + 5.39346384e+00, + 1.25551630e+01, + 1.52662887e+01, + 1.64396910e+01, + 1.58321058e+01, + 1.29832669e+01, + 1.09722277e+01, + 1.06112426e+01, + 9.01701956e+00, + 7.96988058e+00, + 7.27661423e+00, + 7.20762131e+00, + 8.17763444e+00, + 5.13146518e+00, + 1.09739614e+01, + 1.42203580e+01, + 1.61905336e+01, + 1.58914613e+01, + 1.30126200e+01, + 6.83899013e+00, + 1.04819549e+01, + 8.76972390e+00, + 7.55421992e+00, + 6.60250432e+00, + 6.61816997e+00, + 3.40446521e+03, + 8.37777559e+02, + 3.60846845e+03, + 3.67988647e+03, + 3.56398125e+03, + 3.30669629e+03, + 2.71472707e+03, + 3.75196899e+03, + 2.20511845e+03, + 1.78624260e+03, + 1.43159290e+03, + 1.01390711e+03, + 8.87882842e+02, + ], + + tendencies: + { + means: [ + 3.73359176e-05, + 1.53003880e-04, + -1.57682356e-04, + 1.10361919e-06, + 3.05707151e-05, + 0.00000000e+00, + -1.57987933e-05, + 0.00000000e+00, + 1.27543485e-06, + 0.00000000e+00, + 4.18306886e-03, + 0.00000000e+00, + -5.07987426e-05, + -9.45053428e-05, + -8.04828690e-05, + -7.58013795e-05, + -2.44649757e-04, + 3.22629915e-05, + 3.05392256e-04, + 7.46403539e-05, + 2.27136061e-04, + 1.36577072e-04, + 3.53202385e-05, + 2.54396604e-06, + 2.57827061e-05, + 8.70623933e-01, + 2.94759151e-06, + 2.05215403e-06, + 1.76501840e-05, + 0.00000000e+00, + -2.09657059e-05, + 0.00000000e+00, + -1.35452830e-04, + 2.24807897e-03, + -3.98069811e-05, + -1.77578449e-04, + -1.66562239e-05, + -7.67689382e-05, + -1.06749330e-04, + -1.62427007e-04, + -2.09020236e-04, + -1.75744664e-04, + -2.03609455e-04, + -1.90130268e-04, + -1.82794576e-04, + -1.87010692e-04, + -1.87340424e-04, + -1.82020072e-04, + 6.49393589e-06, + 2.38088364e-04, + 2.27066292e-05, + 1.70577506e-04, + 1.46354452e-04, + 1.39709981e-04, + 1.24773021e-04, + 8.68914372e-05, + 3.55996837e-04, + 6.07905214e-05, + 3.83965117e-05, + 3.10262205e-05, + 4.55265680e-05, + 4.26895054e-05, + 3.94497449e-05, + 8.55346719e-06, + 3.90140812e-05, + 4.86486918e-05, + 4.57528329e-05, + 4.37222001e-05, + 3.87266170e-05, + 2.36740288e-05, + 2.15531982e-05, + 1.12621165e-05, + 7.80421483e-06, + 2.03458447e-05, + 3.02641600e-05, + -8.28586934e-02, + 2.90386250e-03, + -8.08715652e-02, + -7.71640391e-02, + -7.12370145e-02, + -6.42698963e-02, + -4.84719166e-02, + -1.05326459e-01, + -3.50242425e-02, + -2.46443367e-02, + -1.62618574e-02, + -5.76892575e-03, + -1.02279581e-03, + ], + stds: [ + 2.07685463e+00, + 1.01122612e+02, + 4.19497587e+00, + 1.64013741e-02, + 3.04392616e-03, + 0.00000000e+00, + 9.99998113e-01, + 0.00000000e+00, + 4.00329659e-01, + 0.00000000e+00, + 2.39407536e+02, + 0.00000000e+00, + 3.42704841e+00, + 1.25886723e+01, + 6.23322478e+00, + 1.51787961e+01, + 2.11777602e+01, + 2.48992107e+01, + 2.67461468e+01, + 1.61529075e+00, + 2.56105297e+01, + 2.40228208e+01, + 2.19554083e+01, + 1.68110926e+01, + 1.33198475e+01, + 6.03804667e+00, + 2.09967896e-02, + 4.60778539e-01, + 3.04401875e-03, + 0.00000000e+00, + 1.00000183e+00, + 0.00000000e+00, + 7.54929403e+00, + 2.28067952e+02, + 1.03927376e+00, + 2.60402653e+00, + 1.29251273e+00, + 1.84726254e+00, + 1.66534712e+00, + 1.34330410e+00, + 1.46095691e+00, + 1.13585368e+00, + 1.48680256e+00, + 1.39619037e+00, + 1.34424100e+00, + 1.55029113e+00, + 1.96402880e+00, + 2.83750469e+00, + 1.58056991e+00, + 2.73521494e+00, + 2.79204942e+00, + 3.14688684e+00, + 4.52547258e+00, + 6.09056640e+00, + 6.55962560e+00, + 5.52760714e+00, + 2.84083354e+00, + 4.32335952e+00, + 3.64057506e+00, + 3.35146399e+00, + 3.43432976e+00, + 3.60909873e+00, + 2.95625245e+00, + 2.97918696e+00, + 3.49322326e+00, + 5.23493343e+00, + 7.18543450e+00, + 7.74271041e+00, + 6.46823068e+00, + 3.10549576e+00, + 4.95936117e+00, + 4.05305627e+00, + 3.62234726e+00, + 3.64438659e+00, + 3.82290060e+00, + 1.80578498e+02, + 1.89932026e+02, + 2.11981366e+02, + 2.71028445e+02, + 3.26270280e+02, + 3.38568440e+02, + 2.90777053e+02, + 1.92108853e+02, + 2.33160327e+02, + 1.93620113e+02, + 1.72713747e+02, + 1.69112109e+02, + 1.77388321e+02, + ], + }, + }, + + geometry: [-15, 32, 20, 60], + + dataset: { + dataset_name: 'cerra-rr-an-oper-0001-mars-5p5km-1984-2020-6h-v2-hmsi.zarr', + type: 'application/vnd+zarr', + description: 'Anemoi dataset', + locations: [common.hpc.hpc2020, common.hpc.ewc, common.hpc.marenostrum5, common.hpc.leonardo], + size: '9 TB', + inodes: '53,192', + roles: ['data'], + }, +} diff --git a/stac/common.jsonnet b/stac/common.jsonnet new file mode 100644 index 000000000..8e7984e14 --- /dev/null +++ b/stac/common.jsonnet @@ -0,0 +1,62 @@ +// Variable filler + +{ + providers: { + copernicus: { + name: 'Copernicus', + roles: ['provider'], + url: 'https://copernicus.eu', + }, + ecmwf_host: { + name: 'ECMWF', + roles: ['host'], + url: 'https://ecmwf.int', + }, + ecmwf_provider: { + name: 'ECMWF', + roles: ['provider'], + url: 'https://www.ecmwf.int/', + }, + nasa: { + name: 'NASA', + roles: ['provider'], + url: 'https://www.nasa.gov', + }, + nasa_processor: { + name: 'NASA', + roles: ['processor'], + url: 'https://www.nasa.gov', + }, + eumetsat: { + name: 'EUMETSAT', + roles: ['provider'], + url: 'https://eumetsat.int', + }, + eumetsat_processor: { + name: 'EUMETSAT', + roles: ['processor'], + url: 'https://eumetsat.int', + }, + cma: { + name: 'CMA', + roles: ['provider'], + url: 'https://www.cma.gov.cn/', + }, + awi: { + name: 'AWI', + roles: ['provider'], + url: 'https://www.awi.de', + }, + }, + + hpc: { + leonardo: 'leonardo', + hpc2020: 'hpc2020', + lumi: 'lumi', + ewc: 'European Weather Cloud', + marenostrum5: 'marenostrum5', + jsc: 'juwels_booster', + levante: 'levante', + alps: 'alps', + }, +} diff --git a/stac/era5_v8.jsonnet b/stac/era5_v8.jsonnet new file mode 100644 index 000000000..ffa42deec --- /dev/null +++ b/stac/era5_v8.jsonnet @@ -0,0 +1,768 @@ +local common = import 'common.jsonnet'; + +{ + name: 'ERA5v8', + filename: 'era5v8.json', + description: 'ERA5 is a reanalysis dataset produced by ECMWF, providing hourly estimates of a large number of atmospheric, land, and oceanic climate variables.', + unique_id: '1', + title: 'ERA5', + start_datetime: '1979-01-01T00:00:00', + end_datetime: '2023-12-31T18:00:00', + frequency: '6h', + fixed_timesteps: 'True', + keywords: [ + 'ERA5', + 'global', + 'atmosphere', + 'reanalysis', + ], + providers: [ + common.providers.ecmwf_provider, + ], + processing_level: 'NA', + + + // retrieved from anemoi dataset with: + // ds.variables, ds.statistics["minimum"], ds.statistics["maximum"], + // ds.statistics["mean"], ds.statistics["stdev"], + // ds.statistics_tendencies()["mean"], ds.statistics_tendencies()["stdev"] + variables: { + names: [ + '10u', + '10v', + '2d', + '2t', + 'cos_julian_day', + 'cos_latitude', + 'cos_local_time', + 'cos_longitude', + 'cp', + 'insolation', + 'lsm', + 'msl', + 'q_100', + 'q_1000', + 'q_150', + 'q_200', + 'q_250', + 'q_300', + 'q_400', + 'q_50', + 'q_500', + 'q_600', + 'q_700', + 'q_850', + 'q_925', + 'sdor', + 'sin_julian_day', + 'sin_latitude', + 'sin_local_time', + 'sin_longitude', + 'skt', + 'slor', + 'sp', + 't_100', + 't_1000', + 't_150', + 't_200', + 't_250', + 't_300', + 't_400', + 't_50', + 't_500', + 't_600', + 't_700', + 't_850', + 't_925', + 'tcw', + 'tp', + 'u_100', + 'u_1000', + 'u_150', + 'u_200', + 'u_250', + 'u_300', + 'u_400', + 'u_50', + 'u_500', + 'u_600', + 'u_700', + 'u_850', + 'u_925', + 'v_100', + 'v_1000', + 'v_150', + 'v_200', + 'v_250', + 'v_300', + 'v_400', + 'v_50', + 'v_500', + 'v_600', + 'v_700', + 'v_850', + 'v_925', + 'w_100', + 'w_1000', + 'w_150', + 'w_200', + 'w_250', + 'w_300', + 'w_400', + 'w_50', + 'w_500', + 'w_600', + 'w_700', + 'w_850', + 'w_925', + 'z', + 'z_100', + 'z_1000', + 'z_150', + 'z_200', + 'z_250', + 'z_300', + 'z_400', + 'z_50', + 'z_500', + 'z_600', + 'z_700', + 'z_850', + 'z_925', + ], + + mins: [ + -4.49394379e+01, + -4.43153229e+01, + 1.86626816e+02, + 1.89908325e+02, + -9.99997675e-01, + 1.24922609e-02, + -1.00000000e+00, + -1.00000000e+00, + 0.00000000e+00, + 0.00000000e+00, + 0.00000000e+00, + 9.11235625e+04, + -1.17661999e-04, + 9.99999727e-09, + -9.88260290e-05, + -8.83662142e-05, + -2.48111784e-04, + -1.64085635e-04, + -8.22196016e-05, + 1.04556477e-07, + -5.47191739e-05, + 9.99999372e-09, + 7.25790272e-09, + 9.99999372e-09, + 9.99999727e-09, + 0.00000000e+00, + -9.99999404e-01, + -9.99921978e-01, + -1.00000000e+00, + -1.00000000e+00, + 1.88418503e+02, + 9.99999756e-05, + 4.95389492e+04, + 1.71877274e+02, + 2.13323837e+02, + 1.77818359e+02, + 1.83676529e+02, + 1.91708237e+02, + 1.97768753e+02, + 2.08886047e+02, + 1.72232498e+02, + 2.12267517e+02, + 1.99463104e+02, + 2.04115372e+02, + 2.12136658e+02, + 2.15690491e+02, + 3.56401764e-02, + 0.00000000e+00, + -6.58096771e+01, + -4.13815918e+01, + -6.07784424e+01, + -7.25727692e+01, + -8.16486359e+01, + -8.60549774e+01, + -7.65379639e+01, + -7.53735199e+01, + -6.22264099e+01, + -5.90408783e+01, + -7.12684021e+01, + -7.99715424e+01, + -6.80020447e+01, + -6.73240356e+01, + -4.24148102e+01, + -8.48845673e+01, + -1.02374771e+02, + -9.87489014e+01, + -9.63981628e+01, + -8.96564941e+01, + -8.13757935e+01, + -7.60469055e+01, + -6.72691040e+01, + -6.33320618e+01, + -6.38844452e+01, + -6.28929443e+01, + -2.28956413e+00, + -6.49004745e+00, + -5.56071281e+00, + -8.00729465e+00, + -9.83796406e+00, + -1.09255714e+01, + -1.21863241e+01, + -1.15801334e+00, + -1.17948818e+01, + -1.09547596e+01, + -1.11764212e+01, + -7.03121376e+00, + -4.17296028e+00, + -9.86007812e+02, + 1.38013812e+05, + -7.26114453e+03, + 1.15293500e+05, + 9.86913125e+04, + 8.54609375e+04, + 7.44556250e+04, + 5.66994102e+04, + 1.75447312e+05, + 4.24197812e+04, + 3.02291484e+04, + 1.94835781e+04, + 5.28037891e+03, + -1.12285962e+03, + ], + maxs: [ + 3.72930298e+01, + 8.63477325e+01, + 3.06452454e+02, + 3.25734100e+02, + 1.00000000e+00, + 9.99966681e-01, + 1.00000000e+00, + 1.00000000e+00, + 1.18124008e-01, + 1.00000000e+00, + 1.00000000e+00, + 1.07650750e+05, + 5.66915151e-05, + 3.16821858e-02, + 1.03373837e-04, + 8.15930020e-04, + 2.10092240e-03, + 3.59533937e-03, + 6.77897036e-03, + 8.32692967e-06, + 1.03576845e-02, + 1.69278271e-02, + 1.85309686e-02, + 2.31868401e-02, + 2.70082690e-02, + 8.80149109e+02, + 9.99999404e-01, + 9.99921978e-01, + 1.00000000e+00, + 1.00000000e+00, + 3.45493652e+02, + 1.45110607e-01, + 1.06965758e+05, + 2.48841278e+02, + 3.26945618e+02, + 2.42687973e+02, + 2.43511917e+02, + 2.48826477e+02, + 2.58807434e+02, + 2.71492371e+02, + 2.60852417e+02, + 2.85387695e+02, + 2.93532135e+02, + 3.04730499e+02, + 3.15522308e+02, + 3.21813843e+02, + 1.43308609e+02, + 3.50006104e-01, + 8.54967499e+01, + 3.70482483e+01, + 1.13082031e+02, + 1.22952728e+02, + 1.26730515e+02, + 1.19997391e+02, + 1.06650055e+02, + 9.95386200e+01, + 8.68177795e+01, + 7.62345123e+01, + 6.38558655e+01, + 6.03621979e+01, + 6.04451294e+01, + 6.16262054e+01, + 6.25923920e+01, + 7.25314484e+01, + 9.56968994e+01, + 1.04253052e+02, + 1.01470810e+02, + 9.17555237e+01, + 8.20102692e+01, + 7.95196533e+01, + 6.66145477e+01, + 6.49055176e+01, + 6.55298767e+01, + 6.24255676e+01, + 1.66524982e+00, + 1.26513729e+01, + 1.93596268e+00, + 2.69087315e+00, + 3.68252563e+00, + 3.82894421e+00, + 5.23805428e+00, + 1.10882187e+00, + 6.82522392e+00, + 6.70955753e+00, + 7.45905399e+00, + 5.95300388e+00, + 7.25452614e+00, + 5.42842305e+04, + 1.66822312e+05, + 5.21706152e+03, + 1.42851938e+05, + 1.24576438e+05, + 1.09547250e+05, + 9.69046250e+04, + 7.61650938e+04, + 2.08846250e+05, + 5.93146523e+04, + 4.50826523e+04, + 3.27713906e+04, + 1.73604727e+04, + 1.08912666e+04, + ], + means: [ + -5.60749475e-01, + 2.16951956e-01, + 2.83074613e+02, + 2.88060911e+02, + 3.25883146e-05, + 7.96621946e-01, + 0.00000000e+00, + 1.60837930e-09, + 4.18763506e-04, + 2.53287901e-01, + 2.84039355e-01, + 1.01148725e+05, + 2.68075919e-06, + 9.76336466e-03, + 6.68699968e-06, + 2.74203524e-05, + 8.16892759e-05, + 1.77448841e-04, + 5.24215976e-04, + 2.67142152e-06, + 1.15930954e-03, + 2.10069525e-03, + 3.32639294e-03, + 6.30045406e-03, + 8.35132330e-03, + 2.07250326e+01, + 4.48531858e-06, + -2.42203001e-08, + 0.00000000e+00, + -1.56107403e-09, + 2.88847380e+02, + 3.42588576e-03, + 9.85629074e+04, + 2.04008150e+02, + 2.89000440e+02, + 2.11043088e+02, + 2.18624965e+02, + 2.26077882e+02, + 2.33901357e+02, + 2.48179685e+02, + 2.11070116e+02, + 2.59120714e+02, + 2.67414790e+02, + 2.74241964e+02, + 2.81749376e+02, + 2.84769679e+02, + 2.54860339e+01, + 7.47761617e-04, + 9.31935607e+00, + -6.10089665e-01, + 1.39091506e+01, + 1.47393783e+01, + 1.35931992e+01, + 1.17896220e+01, + 8.44398933e+00, + 2.78063579e+00, + 6.00363410e+00, + 4.20194875e+00, + 2.72961537e+00, + 7.14865127e-01, + -1.01380186e-01, + 1.60062288e-02, + 2.24596071e-01, + -7.57511644e-02, + -9.23143477e-02, + -5.92486906e-02, + -3.12920186e-02, + -1.98557965e-02, + 2.38855940e-04, + -4.02617999e-02, + -5.74038364e-02, + -2.59664728e-02, + 9.74758989e-02, + 2.15186084e-01, + -3.25729084e-05, + 8.61880292e-03, + -9.62705570e-05, + -2.01293988e-04, + -2.99631109e-04, + -3.42865093e-04, + -3.50322252e-04, + -1.60037270e-05, + -3.80018234e-04, + -2.58807966e-04, + 4.65371716e-04, + 4.17299677e-03, + 7.04067491e-03, + 2.27727262e+03, + 1.59892212e+05, + 9.44661315e+02, + 1.35806647e+05, + 1.18073332e+05, + 1.03842624e+05, + 9.18109458e+04, + 7.19081810e+04, + 2.00988907e+05, + 5.56483413e+04, + 4.18532647e+04, + 2.98460866e+04, + 1.42908018e+04, + 7.38562995e+03, + ], + stds: [ + 5.39871352e+00, + 4.46878268e+00, + 1.53113976e+01, + 1.55242635e+01, + 7.07118299e-01, + 2.27453437e-01, + 7.07106781e-01, + 7.07106781e-01, + 1.29410740e-03, + 3.25979869e-01, + 4.40217572e-01, + 1.08019190e+03, + 6.27908921e-07, + 5.85692778e-03, + 4.18538917e-06, + 2.57587317e-05, + 8.62411258e-05, + 1.96058764e-04, + 5.93969497e-04, + 2.62138246e-07, + 1.26020895e-03, + 2.01901437e-03, + 2.83037560e-03, + 4.28120351e-03, + 5.06488497e-03, + 6.17107951e+01, + 7.07095264e-01, + 5.60052148e-01, + 7.07106781e-01, + 7.07106781e-01, + 1.63905900e+01, + 1.00039460e-02, + 6.80231309e+03, + 1.14263171e+01, + 1.34094147e+01, + 7.37700694e+00, + 5.20307297e+00, + 7.26399260e+00, + 9.36899522e+00, + 1.08657374e+01, + 7.44429969e+00, + 1.09151352e+01, + 1.08897090e+01, + 1.15140449e+01, + 1.23537782e+01, + 1.27668321e+01, + 1.71599970e+01, + 2.35457201e-03, + 1.42119444e+01, + 6.01012568e+00, + 1.74459859e+01, + 1.88347039e+01, + 1.85336560e+01, + 1.72240905e+01, + 1.42377588e+01, + 1.46445933e+01, + 1.19041239e+01, + 1.02519572e+01, + 9.07703638e+00, + 8.03400039e+00, + 7.73874067e+00, + 6.94677982e+00, + 4.98552782e+00, + 9.60032826e+00, + 1.17983303e+01, + 1.27098636e+01, + 1.22402160e+01, + 1.00717113e+01, + 5.51239347e+00, + 8.20669586e+00, + 6.97722363e+00, + 6.15659845e+00, + 5.65278018e+00, + 5.96197142e+00, + 2.39639905e-02, + 8.70846363e-02, + 5.12940051e-02, + 8.28576756e-02, + 1.12645184e-01, + 1.39414009e-01, + 1.76097918e-01, + 1.11556009e-02, + 1.88950943e-01, + 1.91547566e-01, + 1.93147395e-01, + 1.76087048e-01, + 1.39203029e-01, + 6.17140115e+03, + 4.10248992e+03, + 8.72026283e+02, + 4.70580041e+03, + 4.81948160e+03, + 4.58648868e+03, + 4.19597611e+03, + 3.38525828e+03, + 3.84755364e+03, + 2.71336157e+03, + 2.17200503e+03, + 1.71967757e+03, + 1.17848557e+03, + 9.88623015e+02, + ], + tendencies: + { + means: [ + 6.45020048e-07, + -3.91051211e-07, + 9.13015241e-06, + 1.73589231e-05, + -6.03205261e-10, + 0.00000000e+00, + -2.62108975e-14, + 0.00000000e+00, + 7.23818945e-10, + 6.85583124e-11, + 0.00000000e+00, + 5.34478794e-04, + 3.77797237e-13, + -1.27874185e-09, + 1.25180212e-12, + 1.11469638e-11, + 8.66099970e-11, + 2.53691253e-10, + 1.13475749e-09, + -7.96527452e-13, + 2.19395998e-09, + 1.53576808e-09, + 3.45403133e-10, + 3.28029380e-09, + 2.09156140e-09, + 0.00000000e+00, + 1.40169700e-07, + 0.00000000e+00, + -5.08799775e-14, + 0.00000000e+00, + 1.66917017e-05, + 0.00000000e+00, + 6.76560681e-04, + -1.07140727e-05, + 1.22826766e-05, + -3.51486749e-06, + -2.83154226e-06, + 1.71752648e-06, + 5.77977760e-06, + 7.47303617e-06, + -3.64523797e-05, + 6.83516543e-06, + 7.37813137e-06, + 8.16318098e-06, + 5.13682973e-06, + 8.21828006e-06, + 1.05561051e-05, + 9.61761858e-10, + -3.52539898e-05, + 7.22681451e-07, + -4.81434987e-05, + -3.87836163e-05, + -3.28518948e-05, + -3.08944239e-05, + -1.98054662e-05, + 1.45402531e-05, + -7.69965326e-06, + -4.68547632e-06, + 1.19672273e-07, + 2.07348946e-06, + 3.05564645e-06, + 2.96162666e-08, + -2.32419737e-07, + 1.05669550e-06, + 6.32369802e-06, + 5.80533947e-06, + 3.24771399e-06, + 5.86670193e-07, + 2.61542959e-06, + -5.75932631e-07, + -5.52731302e-06, + -4.60052765e-06, + -1.75671874e-06, + -8.73537254e-07, + 1.20689427e-10, + -9.70168044e-08, + 1.56170377e-09, + 2.80638622e-09, + 3.76853905e-09, + 3.83897756e-09, + 1.43899677e-09, + -1.13018981e-10, + 2.31637265e-09, + 3.42613588e-09, + -2.24547834e-09, + -2.58354341e-08, + -6.03037422e-08, + 0.00000000e+00, + 2.19608188e-03, + 4.38200078e-04, + 3.00449376e-03, + 3.26208310e-03, + 3.32488946e-03, + 3.12307783e-03, + 2.52655393e-03, + -2.43672190e-03, + 2.06909686e-03, + 1.67868961e-03, + 1.31342488e-03, + 8.50353813e-04, + 6.67373786e-04, + ], + stds: [ + 2.13540711e+00, + 2.39813424e+00, + 1.69495980e+00, + 2.77896570e+00, + 3.04096061e-03, + 0.00000000e+00, + 1.00000000e+00, + 0.00000000e+00, + 1.34885524e-03, + 4.68381930e-01, + 0.00000000e+00, + 2.51706184e+02, + 1.94620754e-07, + 8.52648219e-04, + 1.83800112e-06, + 1.23183321e-05, + 4.27647444e-05, + 8.98662418e-05, + 2.52863784e-04, + 3.62864342e-08, + 4.90193196e-04, + 7.24428518e-04, + 9.92347129e-04, + 1.31331721e-03, + 1.08039469e-03, + 0.00000000e+00, + 3.04709932e-03, + 0.00000000e+00, + 1.00000000e+00, + 0.00000000e+00, + 4.73075398e+00, + 0.00000000e+00, + 2.37874619e+02, + 1.15969953e+00, + 1.85240123e+00, + 1.10115310e+00, + 1.42252252e+00, + 1.33690200e+00, + 1.12096427e+00, + 1.21428941e+00, + 1.43999847e+00, + 1.26787362e+00, + 1.24246848e+00, + 1.22117762e+00, + 1.49259728e+00, + 1.66868120e+00, + 3.17785672e+00, + 2.31701188e-03, + 2.83352778e+00, + 2.24795232e+00, + 3.36455492e+00, + 4.23715712e+00, + 5.06342663e+00, + 5.25654164e+00, + 4.53961222e+00, + 2.74218728e+00, + 3.65652209e+00, + 3.09672817e+00, + 2.81688334e+00, + 2.83576680e+00, + 2.93921393e+00, + 3.16015963e+00, + 2.55840811e+00, + 3.76971944e+00, + 4.94318341e+00, + 6.10593642e+00, + 6.40017403e+00, + 5.51276539e+00, + 3.10712264e+00, + 4.39350303e+00, + 3.64020398e+00, + 3.22130334e+00, + 3.16803977e+00, + 3.34724482e+00, + 3.09858773e-02, + 6.93663575e-02, + 6.31448889e-02, + 9.66495665e-02, + 1.25784820e-01, + 1.55715626e-01, + 2.01741484e-01, + 1.53634126e-02, + 2.18826936e-01, + 2.25074095e-01, + 2.27583414e-01, + 2.00929690e-01, + 1.47015124e-01, + 0.00000000e+00, + 1.92599506e+02, + 2.02467895e+02, + 2.11414379e+02, + 2.48943247e+02, + 2.85799969e+02, + 2.95860432e+02, + 2.63736377e+02, + 2.13224520e+02, + 2.22459475e+02, + 1.94517805e+02, + 1.80754431e+02, + 1.81428163e+02, + 1.90652074e+02, + ], + }, + }, + + geometry: [-180, 180, -90, 90], + + dataset: { + dataset_name: 'aifs-ea-an-oper-0001-mars-o96-1979-2023-6h-v8.zarr', + type: 'application/vnd+zarr', + description: 'ERA5 data on O96 healPix grid. version 8. Contains tendencies', + locations: [common.hpc.hpc2020, common.hpc.ewc, common.hpc.jsc, common.hpc.marenostrum5, common.hpc.leonardo], + size: '593 GB', + inodes: '65,863', + roles: ['data'], + }, +} diff --git a/stac/functions.libsonnet b/stac/functions.libsonnet new file mode 100644 index 000000000..f409d2d84 --- /dev/null +++ b/stac/functions.libsonnet @@ -0,0 +1,129 @@ +// Variable filler + + +local check_unique_ids(datasets) = + local ids = [ds.unique_id for ds in datasets]; + local unique_ids = std.set(ids); + if std.length(unique_ids) != std.length(ids) then + error 'Duplicate in unique IDs: ' + std.join(', ', ids) + else + {}; + +local check_variable_lengths(vars) = + if std.objectHas(vars, 'means') && std.length(vars.names) != std.length(vars.means) then + error 'lengths of variables and means do not match' + else if std.objectHas(vars, 'stds') && std.length(vars.names) != std.length(vars.stds) then + error 'lengths of variables and stds do not match' + else if std.objectHas(vars, 'mins') && std.length(vars.names) != std.length(vars.mins) then + error 'length of variables and minimum do not match' + else if std.objectHas(vars, 'maxs') && std.length(vars.names) != std.length(vars.maxs) then + error 'lengths of variables and maximum do not match' + + else if std.objectHas(vars, 'tendencies') && + std.length(vars.tendencies.means) > 0 && + std.length(vars.names) != std.length(vars.tendencies.means) then + error 'lengths of variables and tendencies (means) do not match' + else if std.objectHas(vars, 'tendencies') && + std.length(vars.tendencies.stds) > 0 && + std.length(vars.names) != std.length(vars.tendencies.stds) then + error 'lengths of variables and tendencies (stds) do not match' + else + {}; + + +local fill_variables(vars) = + check_variable_lengths(vars) + + + { + [vars.names[k]]: { + min: (if std.objectHas(vars, 'mins') then vars.mins[k] else 'NA'), + max: (if std.objectHas(vars, 'maxs') then vars.maxs[k] else 'NA'), + mean: (if std.objectHas(vars, 'means') then vars.means[k] else 'NA'), + std: (if std.objectHas(vars, 'stds') then vars.stds[k] else 'NA'), + tendency_mean: ( + if std.objectHas(vars, 'tendencies') && std.length(vars.tendencies.means) > 0 + then vars.tendencies.means[k] + else 'NA' + ), + tendency_std: ( + if std.objectHas(vars, 'tendencies') && std.length(vars.tendencies.stds) > 0 + then vars.tendencies.stds[k] + else 'NA' + ), + } + for k in std.range(0, std.length(vars.names) - 1) + }; + +local fill_properties(ds) = { + name: ds.name, + description: ds.description, + unique_id: ds.unique_id, + title: ds.title, + start_datetime: ds.start_datetime, + end_datetime: ds.end_datetime, + keywords: ds.keywords, + providers: ds.providers, + variables: fill_variables(ds.variables), + frequency: ds.frequency, + fixed_timesteps: ds.fixed_timesteps, + processing_level: ds.processing_level, +}; + +local fill_geometry(vars) = { + type: 'Polygon', + coordinates: [ + [ + [vars[0], vars[2]], + [vars[0], vars[3]], + [vars[1], vars[3]], + [vars[1], vars[2]], + [vars[0], vars[2]], + ], + ], +}; + +local fill_assets(ds) = { + [ds.dataset_name]: { + title: ds.dataset_name, + href: ds.dataset_name, + type: ds.type, + roles: ds.roles, + description: ds.description, + locations: ds.locations, + size: ds.size, + inodes: ds.inodes, + }, +}; + +// Optional: create catalogue link +local dataset_entry_catalogue(ds, href_link) = { + rel: 'child', + href: href_link + ds.filename, + title: ds.title, + type: 'application/json', +}; + +// Create full STAC item for a dataset +local dataset_entry_fill(ds) = { + type: 'Feature', + stac_version: '1.0.0', + id: 'weathergen.atmo.' + ds.name, + properties: fill_properties(ds), + geometry: fill_geometry(ds.geometry), + bbox: [ds.geometry[0], ds.geometry[2], ds.geometry[1], ds.geometry[3]], + stac_extensions: [ + 'https://stac-extensions.github.io/datacube/v2.2.0/schema.json', + 'https://stac-extensions.github.io/alternate-assets/v1.2.0/schema.json', + 'https://stac-extensions.github.io/xarray-assets/v1.0.0/schema.json', + ], + assets: fill_assets(ds.dataset), +}; + +{ + check_unique_ids: check_unique_ids, + fill_variables: fill_variables, + fill_geometry: fill_geometry, + fill_assets: fill_assets, + dataset_entry_catalogue: dataset_entry_catalogue, + dataset_entry_fill: dataset_entry_fill, +} diff --git a/stac/fy3a.jsonnet b/stac/fy3a.jsonnet new file mode 100644 index 000000000..04d186028 --- /dev/null +++ b/stac/fy3a.jsonnet @@ -0,0 +1,62 @@ +local common = import 'common.jsonnet'; + +{ + name: 'FY-3A, MWHS', + filename: 'fy3a.json', + description: "The data from the MWHS microwave radiometer onboard FY-3A, a Fengyun satellite. Data is available for three FY-3 satellites, FY-3A, FY-3B and FY-3C.", + title: 'FY-3A, MWHS', + unique_id: '10', + start_datetime: '2008-07-01T00:19:46', + end_datetime: '2014-05-05T00:33:45', + frequency: 'NA', + fixed_timesteps: 'False', + keywords: [ + 'atmosphere', + 'observation', + 'polar-orbiter', + 'satellite', + ], + providers: [ + common.providers.ecmwf_host, + common.providers.cma, + common.providers.eumetsat_processor, + ], + processing_level: '1C', + + + + variables: { + names: [ + 'quality_pixel_bitmask', + 'instrtemp', + 'scnlin', + 'satellite_azimuth_angle', + 'satellite_zenith_angle', + 'solar_azimuth_angle', + 'solar_zenith_angle', + 'data_quality_bitmask', + 'quality_scanline_bitmask', + 'time', + 'warmnedt', + 'coldnedt', + 'btemps', + 'u_independent_btemps', + 'u_structured_btemps', + 'u_common_btemps', + 'quality_issue_pixel_bitmask', + ], + + }, + + geometry: [-180, 180, -90, 90], + + dataset: { + dataset_name: 'MICROWAVE_FCDR_V1.1-20200512/FY3A/*/*.nc', + type: 'application/vnd+netcdf', + description: 'Observation dataset', + locations: [common.hpc.hpc2020], + size: '664.9 GB', + inodes: '31039', + roles: ['data'], + }, +} diff --git a/stac/fy3b.jsonnet b/stac/fy3b.jsonnet new file mode 100644 index 000000000..a8a48adae --- /dev/null +++ b/stac/fy3b.jsonnet @@ -0,0 +1,61 @@ +local common = import 'common.jsonnet'; + +{ + name: 'FY-3B, MWHS', + filename: 'fy3b.json', + description: "The data from the MWHS microwave radiometer onboard FY-3B, a Fengyun satellite. Data is available for three FY-3 satellites, FY-3A, FY-3B and FY-3C.", + title: 'FY-3B, MWHS', + unique_id: '11', + start_datetime: '2010-11-18T22:23:16', + end_datetime: '2018-12-31T22:29:55', + frequency: 'NA', + fixed_timesteps: 'False', + keywords: [ + 'atmosphere', + 'observation', + 'polar-orbiter', + 'satellite', + ], + providers: [ + common.providers.ecmwf_host, + common.providers.cma, + common.providers.eumetsat_processor, + ], + processing_level: '1C', + + + variables: { + names: [ + 'quality_pixel_bitmask', + 'instrtemp', + 'scnlin', + 'satellite_azimuth_angle', + 'satellite_zenith_angle', + 'solar_azimuth_angle', + 'solar_zenith_angle', + 'data_quality_bitmask', + 'quality_scanline_bitmask', + 'time', + 'warmnedt', + 'coldnedt', + 'btemps', + 'u_independent_btemps', + 'u_structured_btemps', + 'u_common_btemps', + 'quality_issue_pixel_bitmask', + ], + + }, + + geometry: [-180, 180, -90, 90], + + dataset: { + dataset_name: 'MICROWAVE_FCDR_V1.1-20200512/FY3C/*/*.nc', + type: 'application/vnd+netcdf', + description: 'Observation dataset', + locations: [common.hpc.hpc2020], + size: '961.4 GB', + inodes: '44204', + roles: ['data'], + }, +} diff --git a/stac/fy3c.jsonnet b/stac/fy3c.jsonnet new file mode 100644 index 000000000..d5e663b5b --- /dev/null +++ b/stac/fy3c.jsonnet @@ -0,0 +1,61 @@ +local common = import 'common.jsonnet'; + +{ + name: 'FY-3C, MWHS', + filename: 'fy3c.json', + description: "The data from the MWHS microwave radiometer onboard FY-3C, a Fengyun satellite. Data is available for three FY-3 satellites, FY-3A, FY-3B and FY-3C.", + title: 'FY-3C, MWHS', + unique_id: '12', + start_datetime: '2013-09-30T21:05:22', + end_datetime: '2018-12-31T19:58:58', + frequency: 'NA', + fixed_timesteps: 'False', + keywords: [ + 'atmosphere', + 'observation', + 'polar-orbiter', + 'satellite', + ], + providers: [ + common.providers.ecmwf_host, + common.providers.cma, + common.providers.eumetsat_processor, + ], + processing_level: '1C', + + + variables: { + names: [ + 'quality_pixel_bitmask', + 'instrtemp', + 'scnlin', + 'satellite_azimuth_angle', + 'satellite_zenith_angle', + 'solar_azimuth_angle', + 'solar_zenith_angle', + 'data_quality_bitmask', + 'quality_scanline_bitmask', + 'time', + 'warmnedt', + 'coldnedt', + 'btemps', + 'u_independent_btemps', + 'u_structured_btemps', + 'u_common_btemps', + 'quality_issue_pixel_bitmask', + ], + + }, + + geometry: [-180, 180, -90, 90], + + dataset: { + dataset_name: 'MICROWAVE_FCDR_V1.1-20200512/FY3C/*/*.nc', + type: 'application/vnd+netcdf', + description: 'Observation dataset', + locations: [common.hpc.hpc2020], + size: '1.51 TB', + inodes: '27805', + roles: ['data'], + }, +} diff --git a/stac/ifs_fesom_atmos.jsonnet b/stac/ifs_fesom_atmos.jsonnet new file mode 100644 index 000000000..09fdee761 --- /dev/null +++ b/stac/ifs_fesom_atmos.jsonnet @@ -0,0 +1,1211 @@ +local common = import 'common.jsonnet'; + +{ + name: "AWI IFS-FESOM (ATMOS)", + filename: "ifs-fesom_atmos.json", + description: "The atmosphere element component of the AWI IFS-FESOM coupled climate dataset that integrates atmospheric simulations from the IFS model with ocean and sea ice dynamics from the FESOM framework, capturing detailed Earth system interactions at high resolution.", + title: "AWI IFS-FESOM Coupled Climate Model (Atmos)", + unique_id: "14", + start_datetime: "2000-01-01T00:00:00", + end_datetime: "2209-12-31T23:59:59", + frequency: "6h", + fixed_timesteps: "True", + keywords: [ + "coupled model", + "climate simulation", + "atmosphere", + ], + providers: [ + common.providers.awi, + ], + processing_level: "model output", + + //retrieved from one arr file + // ds['data'].attrs['columns'][2:], ds['data'].attrs['means'] + // ds['data'].attrs['std'] + // tendencies were calculate using a script: https://gitlab.jsc.fz-juelich.de/esde/WeatherGenerator-private/-/blob/main/data/preprocessing/tendencies/compute_tendencies.py?ref_type=heads (slighly modified) + // calculate min and max also separately + + variables: { + names: [ + '10u', + '10v', + '2d', + '2t', + 'ci', + 'cp', + 'lsp', + 'msl', + 'q_100000', + 'q_92500', + 'q_85000', + 'q_70000', + 'q_60000', + 'q_50000', + 'q_40000', + 'q_30000', + 'q_25000', + 'q_20000', + 'q_15000', + 'q_10000', + 'q_7000', + 'q_5000', + 'q_3000', + 'q_2000', + 'q_1000', + 'q_500', + 'q_100', + 'r_100000', + 'r_92500', + 'r_85000', + 'r_70000', + 'r_60000', + 'r_50000', + 'r_40000', + 'r_30000', + 'r_25000', + 'r_20000', + 'r_15000', + 'r_10000', + 'r_7000', + 'r_5000', + 'r_3000', + 'r_2000', + 'r_1000', + 'r_500', + 'r_100', + 'sst', + 't_100000', + 't_92500', + 't_85000', + 't_70000', + 't_60000', + 't_50000', + 't_40000', + 't_30000', + 't_25000', + 't_20000', + 't_15000', + 't_10000', + 't_7000', + 't_5000', + 't_3000', + 't_2000', + 't_1000', + 't_500', + 't_100', + 'tcc', + 'tsr', + 'tsrc', + 'u_100000', + 'u_92500', + 'u_85000', + 'u_70000', + 'u_60000', + 'u_50000', + 'u_40000', + 'u_30000', + 'u_25000', + 'u_20000', + 'u_15000', + 'u_10000', + 'u_7000', + 'u_5000', + 'u_3000', + 'u_2000', + 'u_1000', + 'u_500', + 'u_100', + 'v_100000', + 'v_92500', + 'v_85000', + 'v_70000', + 'v_60000', + 'v_50000', + 'v_40000', + 'v_30000', + 'v_25000', + 'v_20000', + 'v_15000', + 'v_10000', + 'v_7000', + 'v_5000', + 'v_3000', + 'v_2000', + 'v_1000', + 'v_500', + 'v_100', + 'vo_100000', + 'vo_92500', + 'vo_85000', + 'vo_70000', + 'vo_60000', + 'vo_50000', + 'vo_40000', + 'vo_30000', + 'vo_25000', + 'vo_20000', + 'vo_15000', + 'vo_10000', + 'vo_7000', + 'vo_5000', + 'vo_3000', + 'vo_2000', + 'vo_1000', + 'vo_500', + 'vo_100', + 'w_100000', + 'w_92500', + 'w_85000', + 'w_70000', + 'w_60000', + 'w_50000', + 'w_40000', + 'w_30000', + 'w_25000', + 'w_20000', + 'w_15000', + 'w_10000', + 'w_7000', + 'w_5000', + 'w_3000', + 'w_2000', + 'w_1000', + 'w_500', + 'w_100', + 'z_100000', + 'z_92500', + 'z_85000', + 'z_70000', + 'z_60000', + 'z_50000', + 'z_40000', + 'z_30000', + 'z_25000', + 'z_20000', + 'z_15000', + 'z_10000', + 'z_7000', + 'z_5000', + 'z_3000', + 'z_2000', + 'z_1000', + 'z_500', + 'z_100', + ], + mins: [ + -4.02148900e+01, + -4.07384830e+01, + 1.43933330e+02, + 1.88484830e+02, + -4.44849850e-01, + -1.54989050e-02, + -5.88887000e-03, + 8.93906300e+04, + -3.22573280e-03, + -3.17010560e-03, + -3.17010560e-03, + -3.17788700e-03, + -2.11937050e-03, + -1.03737760e-03, + -7.26723300e-04, + -2.39441130e-04, + -1.23358540e-04, + -3.92669350e-05, + -7.51106240e-06, + -3.24543040e-07, + -1.95659500e-07, + 9.48989440e-08, + 2.36657730e-07, + 2.53085860e-07, + 1.92082830e-06, + 2.04992700e-06, + 1.91870530e-06, + -5.32310300e+01, + -3.29931500e+01, + -3.58498900e+01, + -3.40566830e+01, + -3.12862510e+01, + -3.54163550e+01, + -3.17167820e+01, + -2.74727650e+01, + -2.65579720e+01, + -3.66659900e+01, + -4.27701260e+01, + -3.34065320e+01, + -1.13122150e+01, + -2.61382720e+00, + -1.57455650e+00, + -9.91673470e-01, + -1.47990660e+00, + -1.78257960e-01, + -7.67921000e-04, + 2.63053620e+02, + 2.09536730e+02, + 2.12616930e+02, + 2.11836230e+02, + 2.03575780e+02, + 1.99280870e+02, + 2.04766360e+02, + 2.05688570e+02, + 1.94882970e+02, + 1.88246370e+02, + 1.81760210e+02, + 1.74207290e+02, + 1.75463120e+02, + 1.67005920e+02, + 1.73241740e+02, + 1.71840600e+02, + 1.72378720e+02, + 1.81506760e+02, + 1.87434970e+02, + 1.94984040e+02, + -3.27500270e+00, + -5.57693800e+05, + -3.90631450e+04, + -3.94975500e+01, + -5.83334730e+01, + -6.96147160e+01, + -6.27234650e+01, + -6.08474800e+01, + -6.76836900e+01, + -7.91645660e+01, + -8.33340760e+01, + -8.50818100e+01, + -7.88508150e+01, + -6.06589660e+01, + -6.69696960e+01, + -5.67155230e+01, + -7.01156300e+01, + -8.91884700e+01, + -1.08877350e+02, + -1.27536360e+02, + -1.57519150e+02, + -1.84137010e+02, + -4.14777300e+01, + -5.90212750e+01, + -6.33746260e+01, + -6.49989300e+01, + -6.17431100e+01, + -7.65300750e+01, + -8.99900700e+01, + -1.13108430e+02, + -1.17309960e+02, + -1.09698410e+02, + -8.94641650e+01, + -6.73095250e+01, + -6.57928300e+01, + -7.83822000e+01, + -1.05105446e+02, + -1.20964320e+02, + -1.40974900e+02, + -1.72735870e+02, + -1.90465200e+02, + -3.79133270e-04, + -5.66808160e-04, + -5.81978700e-04, + -4.50187280e-04, + -3.99311660e-04, + -4.06562060e-04, + -5.23902200e-04, + -6.87431140e-04, + -6.71046150e-04, + -5.41627900e-04, + -3.60609400e-04, + -3.13273800e-04, + -2.82384200e-04, + -2.37437240e-04, + -2.75412780e-04, + -3.20302200e-04, + -3.24124100e-04, + -5.61225700e-04, + -6.91428800e-04, + -2.37942170e+00, + -2.87856720e+00, + -4.08001200e+00, + -5.63640930e+00, + -4.95408060e+00, + -4.21110530e+00, + -3.39700400e+00, + -2.69725230e+00, + -2.75887230e+00, + -2.63052850e+00, + -2.02693700e+00, + -1.06793550e+00, + -4.59838600e-01, + -3.84084940e-01, + -2.60565730e-01, + -1.98394120e-01, + -1.10035870e-01, + -6.37829500e-02, + -1.30025540e-02, + -8.92859800e+03, + -2.71850000e+03, + 3.92144500e+03, + 1.81278670e+04, + 2.92689500e+04, + 4.12972000e+04, + 5.54680860e+04, + 7.35679450e+04, + 8.45062660e+04, + 9.75177660e+04, + 1.13948190e+05, + 1.36547810e+05, + 1.55837050e+05, + 1.73627420e+05, + 2.00521560e+05, + 2.22145620e+05, + 2.59841800e+05, + 3.00532900e+05, + 4.06875530e+05, + ], + maxs: [ + 3.87701000e+01, + 3.86095800e+01, + 3.13691300e+02, + 3.30634120e+02, + 1.46429900e+00, + 6.82864600e-02, + 7.28547300e-02, + 1.08770540e+05, + 3.47696320e-02, + 3.05923190e-02, + 3.05665140e-02, + 3.03759670e-02, + 2.89473120e-02, + 1.18480740e-02, + 6.07667960e-03, + 2.79953540e-03, + 1.35867430e-03, + 4.51644200e-04, + 9.35364900e-05, + 2.62675750e-05, + 1.74583670e-05, + 3.92490400e-06, + 4.11177330e-06, + 3.97689500e-06, + 4.02457770e-06, + 4.11734500e-06, + 4.45599200e-06, + 1.61423630e+02, + 1.67212300e+02, + 1.64811390e+02, + 1.68897020e+02, + 1.70215520e+02, + 1.62425060e+02, + 1.78092850e+02, + 1.98214720e+02, + 1.83619610e+02, + 1.79469920e+02, + 1.87680160e+02, + 1.91976010e+02, + 1.94511050e+02, + 1.79966860e+02, + 1.76469020e+02, + 1.69285460e+02, + 6.70791240e+01, + 1.32706570e+01, + 9.18085600e-01, + 3.17236900e+02, + 3.30433400e+02, + 3.24749540e+02, + 3.17690860e+02, + 3.05181760e+02, + 2.95518600e+02, + 2.84492580e+02, + 2.69761380e+02, + 2.56098880e+02, + 2.48778500e+02, + 2.44127030e+02, + 2.44592360e+02, + 2.55108640e+02, + 2.65050800e+02, + 2.65682900e+02, + 2.71344800e+02, + 2.77082950e+02, + 2.99078250e+02, + 3.21452550e+02, + 3.37559140e+02, + 4.30519000e+00, + 2.74875060e+07, + 2.61537080e+07, + 4.01554370e+01, + 5.90995900e+01, + 6.73807800e+01, + 6.75421400e+01, + 7.86689400e+01, + 9.56959840e+01, + 1.09809000e+02, + 1.26302930e+02, + 1.32633830e+02, + 1.36118790e+02, + 1.17522650e+02, + 9.77589500e+01, + 9.39231300e+01, + 1.00846890e+02, + 1.21987144e+02, + 1.38812790e+02, + 1.68160260e+02, + 1.82090700e+02, + 2.23966380e+02, + 3.89724040e+01, + 6.18127700e+01, + 7.08706700e+01, + 6.79034650e+01, + 7.39835050e+01, + 8.13348540e+01, + 9.73133800e+01, + 1.08139755e+02, + 1.18327860e+02, + 1.16398740e+02, + 9.28671200e+01, + 6.80711300e+01, + 7.87281650e+01, + 8.89637700e+01, + 1.03928474e+02, + 1.15729485e+02, + 1.37592800e+02, + 1.78903520e+02, + 1.93993880e+02, + 4.15104730e-04, + 6.29437850e-04, + 6.81201750e-04, + 5.43022470e-04, + 4.33062260e-04, + 4.49977030e-04, + 5.66415660e-04, + 6.73182800e-04, + 6.14638500e-04, + 4.95686200e-04, + 3.86123720e-04, + 2.51133020e-04, + 2.43720120e-04, + 2.38488900e-04, + 2.90747120e-04, + 3.19464690e-04, + 3.64300130e-04, + 6.02230900e-04, + 6.87111300e-04, + 3.48762900e+00, + 3.79090790e+00, + 4.55823100e+00, + 5.74013600e+00, + 5.00736200e+00, + 4.33115300e+00, + 3.17674880e+00, + 2.38557650e+00, + 2.06455020e+00, + 1.71239860e+00, + 1.28027030e+00, + 5.77580000e-01, + 5.01659200e-01, + 4.12704940e-01, + 2.53399940e-01, + 1.75150350e-01, + 1.01819060e-01, + 6.21116530e-02, + 1.45820650e-02, + 6.05341600e+03, + 1.20969380e+04, + 1.85545040e+04, + 3.32764650e+04, + 4.52581200e+04, + 5.95030430e+04, + 7.63983360e+04, + 9.70703700e+04, + 1.09869080e+05, + 1.24831300e+05, + 1.43236560e+05, + 1.67507810e+05, + 1.88266880e+05, + 2.08595450e+05, + 2.41449140e+05, + 2.68846000e+05, + 3.16533530e+05, + 3.66629250e+05, + 4.96567780e+05, + ], + means: [ + 0.001512893126346171, + 0.16890795528888702, + 273.94232177734375, + 278.6968078613281, + 0.10867653042078018, + 0.0003167228715028614, + 0.00026623933808878064, + 100885.84375, + 0.0070860134437680244, + 0.0060128928162157536, + 0.0046473778784275055, + 0.002488265745341778, + 0.001613425207324326, + 0.0009162343922071159, + 0.00042273945291526616, + 0.00013691304775420576, + 6.126026710262522e-05, + 2.0305025827838108e-05, + 4.956342763762223e-06, + 2.1044027107564034e-06, + 2.030660652962979e-06, + 2.1257467324176105e-06, + 2.3476166006730637e-06, + 2.5137062493740814e-06, + 2.7587032036535675e-06, + 3.0263486223702785e-06, + 3.685030833366909e-06, + 78.0499496459961, + 77.30339050292969, + 69.01009368896484, + 55.07302474975586, + 52.18269348144531, + 51.63574981689453, + 53.947608947753906, + 55.49324417114258, + 50.21294021606445, + 37.627315521240234, + 26.590721130371094, + 25.17813491821289, + 13.596026420593262, + 6.380970478057861, + 4.337967395782471, + 2.6305429935455322, + 0.4181714355945587, + 0.04079168662428856, + 0.0005312269204296172, + 282.61029052734375, + 281.5940856933594, + 278.0523986816406, + 275.1765441894531, + 267.9559631347656, + 261.53656005859375, + 253.16494750976562, + 242.13131713867188, + 228.26901245117188, + 221.35006713867188, + 216.3387908935547, + 212.2685546875, + 207.4629364013672, + 207.837158203125, + 210.68716430664062, + 215.20797729492188, + 219.3460235595703, + 227.13165283203125, + 239.08250427246094, + 264.4266357421875, + 0.705430805683136, + 4194826.0, + 5060187.0, + 0.002040264429524541, + 0.6627169251441956, + 1.4681116342544556, + 3.4032742977142334, + 4.810956954956055, + 6.521771430969238, + 8.785501480102539, + 11.855074882507324, + 13.575922012329102, + 14.832322120666504, + 14.545326232910156, + 11.479822158813477, + 7.922692775726318, + 6.277455806732178, + 5.277339458465576, + 5.357693672180176, + 6.255331039428711, + 6.330896377563477, + 8.032340049743652, + 0.17362920939922333, + 0.1929241269826889, + 0.1304398775100708, + 0.012522663921117783, + -0.0255670715123415, + -0.01780513860285282, + -0.019304752349853516, + -0.0053521147929131985, + -0.007112377323210239, + -0.01476877462118864, + -0.03489441052079201, + 0.00326238083653152, + 0.013180352747440338, + 0.009064869023859501, + 0.005032650660723448, + 5.71998862142209e-06, + -0.005608535837382078, + -0.009624656289815903, + -0.09301469475030899, + 1.5382498474991735e-07, + 1.4541262771672336e-07, + 1.5975916767274612e-07, + 2.033883959029481e-07, + 3.9789906480791615e-08, + -7.56664277901109e-08, + -1.546955274989159e-07, + -2.3988690145415603e-07, + -3.0235239023568283e-07, + -4.103944775124546e-07, + -5.742086273130553e-07, + -7.968686190906737e-07, + -9.85370434136712e-07, + -1.1486886251077522e-06, + -1.3526175735023571e-06, + -1.4645942201241269e-06, + -1.5500266954404651e-06, + -1.530355007162143e-06, + -1.2818295545002911e-06, + 0.01239265501499176, + 0.011709229089319706, + 0.008623454719781876, + 0.0021713741589337587, + 0.0005025228601880372, + 0.0004386529908515513, + 0.00035662477603182197, + 0.00015658812480978668, + 4.922510925098322e-05, + 1.0659641702659428e-05, + 2.1207799363764934e-05, + -1.1339187039993703e-05, + -3.937945075449534e-05, + -5.332443834049627e-05, + -5.97997750446666e-05, + -5.700021574739367e-05, + -4.354259726824239e-05, + -2.7555350243346766e-05, + -3.5805519473797176e-06, + 687.061767578125, + 6976.41845703125, + 13724.701171875, + 28935.0, + 40675.6640625, + 54159.7578125, + 70032.015625, + 89447.765625, + 101204.390625, + 115204.234375, + 132898.375, + 157299.203125, + 178509.984375, + 198723.4375, + 229939.734375, + 255225.375, + 299615.15625, + 345898.21875, + 462808.75 + ], + stds: [ + 5.693164348602295, + 4.715293884277344, + 20.876657485961914, + 21.427907943725586, + 0.2908477485179901, + 0.001166125643067062, + 0.0008350107818841934, + 1470.149169921875, + 0.005955067463219166, + 0.005033303517848253, + 0.004143625032156706, + 0.0025368400383740664, + 0.0018132260302081704, + 0.0011611981317400932, + 0.000578120059799403, + 0.00019360071746632457, + 8.474419882986695e-05, + 2.611067975522019e-05, + 4.3518234633666e-06, + 4.341798387486051e-07, + 2.703858115182811e-07, + 2.6434832989252754e-07, + 2.999292689764843e-07, + 3.730714013272518e-07, + 4.2893992713288753e-07, + 4.395521386868495e-07, + 2.53000251859703e-07, + 18.818065643310547, + 21.75848388671875, + 26.31398582458496, + 30.729440689086914, + 31.940793991088867, + 33.10162353515625, + 33.79802322387695, + 33.446044921875, + 33.66539764404297, + 34.38759231567383, + 31.78432273864746, + 32.57829666137695, + 18.89381217956543, + 15.265466690063477, + 15.370347023010254, + 10.726360321044922, + 1.38625967502594, + 0.11761236190795898, + 0.0038774770218878984, + 11.575693130493164, + 17.003585815429688, + 15.770951271057129, + 15.198408126831055, + 14.41689395904541, + 13.351896286010742, + 13.280254364013672, + 13.275673866271973, + 11.87940502166748, + 9.744429588317871, + 7.34329891204834, + 8.409246444702148, + 12.376836776733398, + 11.767593383789062, + 10.002999305725098, + 9.841185569763184, + 10.396921157836914, + 11.142080307006836, + 11.874174118041992, + 10.358405113220215, + 0.3666086196899414, + 5783975.0, + 6760642.0, + 6.290798664093018, + 8.223560333251953, + 8.389314651489258, + 9.144004821777344, + 10.282099723815918, + 11.949355125427246, + 14.362478256225586, + 17.41698455810547, + 18.64910316467285, + 18.749900817871094, + 17.239391326904297, + 14.68474292755127, + 14.276371002197266, + 15.222061157226562, + 17.796215057373047, + 19.961837768554688, + 23.42776107788086, + 27.1632137298584, + 33.98922348022461, + 5.238987922668457, + 6.426464080810547, + 6.180879592895508, + 6.625279426574707, + 7.4922566413879395, + 8.825697898864746, + 10.84925365447998, + 13.222649574279785, + 13.707006454467773, + 12.595921516418457, + 10.123950958251953, + 7.787810802459717, + 6.921725749969482, + 7.0326056480407715, + 7.985318660736084, + 8.970782279968262, + 10.609649658203125, + 12.076651573181152, + 14.136162757873535, + 2.44894308707444e-05, + 3.194465534761548e-05, + 3.0399089155253023e-05, + 2.875826248782687e-05, + 3.0602495826315135e-05, + 3.619291601353325e-05, + 4.58522881672252e-05, + 5.328184852260165e-05, + 4.987173088011332e-05, + 3.962892878917046e-05, + 2.8239079256309196e-05, + 1.9502518625813536e-05, + 1.6364561815862544e-05, + 1.6184549167519435e-05, + 1.802031329134479e-05, + 1.9893635908374563e-05, + 2.271947596454993e-05, + 2.5089355403906666e-05, + 2.9744123821728863e-05, + 0.0829530879855156, + 0.11522987484931946, + 0.1403646022081375, + 0.15029902756214142, + 0.15072277188301086, + 0.14864742755889893, + 0.13639713823795319, + 0.10578497499227524, + 0.08371785283088684, + 0.058755502104759216, + 0.03359216824173927, + 0.01541175041347742, + 0.009107847698032856, + 0.006449859589338303, + 0.004212618805468082, + 0.003107759403064847, + 0.0019001071341335773, + 0.001208994654007256, + 0.00037181892548687756, + 1191.3887939453125, + 1344.78076171875, + 1575.7747802734375, + 2217.778564453125, + 2764.9619140625, + 3423.122802734375, + 4243.91064453125, + 5258.80810546875, + 5789.27880859375, + 6189.51220703125, + 6272.06640625, + 5964.66552734375, + 5809.5302734375, + 6076.25, + 6968.5791015625, + 7879.7607421875, + 9563.99609375, + 11225.1708984375, + 14963.015625, + ], + tendencies: + { + means: [ + 6.79964427e-05, + 2.00832023e-04, + -2.89965966e-04, + 3.33103804e-03, + 1.57059227e-08, + 2.99882965e-08, + -3.58654125e-08, + 5.15319096e-02, + -2.59432597e-07, + -4.37241875e-08, + 2.15168976e-07, + 8.98990381e-08, + 3.63237697e-08, + -5.91876337e-09, + -1.30448688e-08, + -7.85433941e-09, + -3.24977639e-09, + -5.79034823e-10, + 1.38694921e-11, + 7.00009347e-13, + -4.82565289e-13, + -6.27918420e-14, + 1.24132648e-12, + -1.33723992e-12, + -1.02935451e-12, + -7.59766912e-12, + 5.99269246e-12, + -8.44668491e-03, + -4.60814362e-03, + 3.37127175e-05, + 7.11040675e-04, + 2.67785020e-04, + -1.80799157e-04, + -4.37428437e-04, + -7.11412489e-04, + -7.67201948e-04, + -4.34358689e-04, + -3.92553842e-04, + -1.89375224e-04, + -4.72313239e-04, + -1.44605523e-04, + -1.05533425e-04, + -1.74388986e-04, + -3.33110089e-05, + -3.20614278e-06, + 7.16443678e-08, + -1.89167222e-05, + 1.91373415e-03, + 1.34967154e-03, + 4.62075543e-04, + 7.78386166e-05, + 2.81235912e-05, + -1.20824439e-05, + -1.85385469e-05, + -1.94482510e-05, + -7.62664621e-06, + 8.28034211e-06, + 5.42778792e-05, + 4.52267553e-06, + -9.09262030e-06, + -7.81184068e-06, + -4.13458971e-05, + -3.03164032e-05, + 1.14100015e-04, + 1.02769040e-04, + 2.93115317e-04, + -3.31612049e-06, + -2.94902842e+02, + -8.44096444e+02, + 1.06989361e-04, + 4.90542672e-04, + 5.63787817e-04, + 4.14248651e-04, + 5.08349180e-04, + 4.80808219e-04, + 4.90686817e-04, + 4.43070342e-04, + 3.46852297e-04, + 2.66047783e-04, + 4.35259446e-04, + 5.88674348e-04, + 1.92673089e-04, + -3.16994077e-04, + 1.35748318e-04, + -8.48294724e-05, + 7.52275867e-05, + 2.51418506e-03, + -2.38567897e-03, + 2.54432717e-04, + 2.31232234e-04, + 1.07689091e-04, + -3.19112169e-06, + -3.92678123e-06, + -2.25287511e-05, + -1.27236665e-04, + -1.42684123e-04, + -1.29366356e-04, + -1.69981918e-04, + -1.12178783e-04, + -9.83858370e-05, + -1.67018831e-04, + -4.31042361e-04, + -9.01136296e-04, + -1.01117876e-03, + 1.57996531e-04, + 8.76248798e-04, + 4.39599747e-03, + -7.28306839e-11, + -4.28992138e-11, + -6.61685489e-11, + -4.73201133e-11, + -5.90173978e-11, + -2.57039550e-11, + -1.21468713e-12, + 5.48073311e-11, + 9.35095067e-11, + 7.71902233e-11, + 8.13906017e-11, + 9.67772051e-11, + 9.92297533e-11, + 8.02346566e-11, + -3.89020327e-11, + -9.57784699e-11, + -2.96807875e-10, + -8.11915024e-11, + -5.20958754e-10, + -6.44218314e-05, + -5.56636012e-05, + -4.80754978e-05, + -3.90076089e-05, + -3.44582528e-05, + -2.98138033e-05, + -2.50538721e-05, + -2.01402961e-05, + -1.76076495e-05, + -1.49198916e-05, + -1.19198557e-05, + -8.62978716e-06, + -6.44516628e-06, + -4.87376383e-06, + -3.30867047e-06, + -2.44950064e-06, + -1.55614034e-06, + -1.07571520e-06, + -3.27599074e-07, + 4.02377246e-02, + 6.73737427e-02, + 8.56625574e-02, + 9.48633289e-02, + 9.65141793e-02, + 9.68599758e-02, + 9.57841449e-02, + 9.39492898e-02, + 9.32280842e-02, + 9.29518861e-02, + 9.61682425e-02, + 1.00855414e-01, + 9.87161637e-02, + 9.87791016e-02, + 9.47724919e-02, + 9.04350260e-02, + 9.44071193e-02, + 1.24999706e-01, + 1.58455038e-01, + ], + stds: [ + 2.04880912e+00, + 2.27721528e+00, + 1.77107824e+00, + 2.61922021e+00, + 3.06422742e-03, + 1.18098068e-03, + 6.98917234e-04, + 3.32806756e+02, + 7.52833137e-04, + 8.03809095e-04, + 9.29509305e-04, + 7.37105888e-04, + 5.44784952e-04, + 3.69135594e-04, + 1.94228759e-04, + 7.15903528e-05, + 3.26509221e-05, + 9.59570549e-06, + 1.43588680e-06, + 1.30395224e-07, + 3.97129138e-08, + 3.23368676e-08, + 3.60392892e-08, + 3.58263537e-08, + 4.10098487e-08, + 4.85546331e-08, + 8.35013918e-08, + 9.65433558e+00, + 9.79734556e+00, + 1.27996748e+01, + 1.62510613e+01, + 1.77176746e+01, + 1.89854789e+01, + 1.97870851e+01, + 1.93370040e+01, + 1.82048264e+01, + 1.49693382e+01, + 9.36898569e+00, + 7.21208312e+00, + 4.62783657e+00, + 1.29973296e+00, + 7.66014233e-01, + 4.91520191e-01, + 1.15702544e-01, + 1.59410286e-02, + 3.08223992e-04, + 7.12956368e-02, + 1.78357112e+00, + 1.64942268e+00, + 1.47626264e+00, + 1.25219270e+00, + 1.26096422e+00, + 1.31440936e+00, + 1.28533681e+00, + 1.07941062e+00, + 1.26509566e+00, + 1.49683856e+00, + 1.09288676e+00, + 8.88012478e-01, + 1.07523545e+00, + 1.05611646e+00, + 1.13776416e+00, + 1.26553555e+00, + 1.63660967e+00, + 2.32605101e+00, + 3.72749865e+00, + 2.59542483e-01, + 7.47788012e+06, + 8.75582689e+06, + 2.20974828e+00, + 2.92713070e+00, + 2.75808830e+00, + 2.62225975e+00, + 2.85457785e+00, + 3.41474418e+00, + 4.35689950e+00, + 5.25466670e+00, + 5.07013361e+00, + 4.09518676e+00, + 2.96180227e+00, + 2.39545737e+00, + 2.33201832e+00, + 2.38344240e+00, + 2.60270437e+00, + 2.89723272e+00, + 3.63666169e+00, + 4.88743821e+00, + 8.35326981e+00, + 2.48941975e+00, + 3.36883723e+00, + 3.14060359e+00, + 3.05781795e+00, + 3.43862243e+00, + 4.19446455e+00, + 5.36130891e+00, + 6.44963589e+00, + 6.23163634e+00, + 4.99100513e+00, + 3.48663487e+00, + 2.73897934e+00, + 2.63252540e+00, + 2.66594609e+00, + 2.87948845e+00, + 3.20231178e+00, + 3.97109376e+00, + 5.40261631e+00, + 9.32239190e+00, + 1.78130313e-05, + 2.52892026e-05, + 2.30487699e-05, + 2.09571429e-05, + 2.32513672e-05, + 2.91649951e-05, + 3.84187209e-05, + 4.32325610e-05, + 3.69251126e-05, + 2.45161046e-05, + 1.43930166e-05, + 9.00017100e-06, + 7.08884424e-06, + 6.04360853e-06, + 5.62536891e-06, + 5.92017734e-06, + 7.09696754e-06, + 9.40389091e-06, + 1.60120691e-05, + 5.08040870e-02, + 9.39932779e-02, + 1.30344412e-01, + 1.51335815e-01, + 1.54034956e-01, + 1.49593356e-01, + 1.32945777e-01, + 9.61623151e-02, + 7.31659866e-02, + 5.24249339e-02, + 3.23197158e-02, + 1.57288627e-02, + 1.00414238e-02, + 7.56568069e-03, + 5.20657662e-03, + 3.97055566e-03, + 2.53266011e-03, + 1.67653088e-03, + 5.67623676e-04, + 2.71020850e+02, + 2.64577039e+02, + 2.58957550e+02, + 2.58322889e+02, + 2.69489864e+02, + 2.93978264e+02, + 3.34186930e+02, + 3.73786022e+02, + 3.70353569e+02, + 3.34293442e+02, + 2.91120499e+02, + 2.73959793e+02, + 2.87784370e+02, + 3.18688906e+02, + 3.79932830e+02, + 4.38908117e+02, + 5.64231399e+02, + 7.47186290e+02, + 1.49751993e+03, + ], + }, + }, + + geometry: [0, 360, -90, 90], + + + dataset: { + // This contains a set of files from the year 2000-2209, each for one year + // example: atmos_all/2000.zarr + dataset_name: 'atmos_all/{year}.zarr', + type: 'application/zarr', + description: 'Atmospheric component of the AWI model using OpenIFS (CY43R3 version). Configured on a regular grid of 400 (longitude) × 192 (latitude) points', + locations: [common.hpc.jsc], + size: '12 TB', + inodes: '614,657', + roles: ['data'], + }, +} diff --git a/stac/ifs_fesom_ocean_elem.jsonnet b/stac/ifs_fesom_ocean_elem.jsonnet new file mode 100644 index 000000000..7d88ec584 --- /dev/null +++ b/stac/ifs_fesom_ocean_elem.jsonnet @@ -0,0 +1,735 @@ +local common = import 'common.jsonnet'; + +{ + "name": "AWI IFS-FESOM (OCEAN ELEM)", + "filename": "ifs-fesom_ocean_elem.json", + "description": "The ocean element component of the AWI IFS-FESOM coupled climate dataset that integrates atmospheric simulations from the IFS model with ocean and sea ice dynamics from the FESOM framework, capturing detailed Earth system interactions at high resolution.", + "title": "AWI IFS-FESOM Coupled Climate Model (Ocean element)", + "unique_id": "15", + "start_datetime": "2000-01-01T00:00:00", + "end_datetime": "2209-12-31T23:59:59", + "frequency": "1 day", + "fixed_timesteps": "True", + "keywords": [ + "coupled model", + "climate simulation", + "ocean", + "ocean element", + ], + "providers": [ + common.providers.awi, + ], + "processing_level": "model output", + + //retrieved from one arr file + // ds['data'].attrs['columns'][2:], ds['data'].attrs['means'] + // ds['data'].attrs['std'] + // tendencies were calculate using a script: https://gitlab.jsc.fz-juelich.de/esde/WeatherGenerator-private/-/blob/main/data/preprocessing/tendencies/compute_tendencies.py?ref_type=heads (slighly modified) + // calculate min and max also separately + + variables: { + names: [ + 'ty_sur', + 'u_2', + 'u_7', + 'u_15', + 'u_25', + 'u_35', + 'u_45', + 'u_55', + 'u_65', + 'u_75', + 'u_85', + 'u_95', + 'u_107', + 'u_125', + 'u_147', + 'u_175', + 'u_210', + 'u_255', + 'u_310', + 'u_375', + 'u_450', + 'u_535', + 'u_630', + 'u_735', + 'u_850', + 'u_975', + 'u_1110', + 'u_1255', + 'u_1415', + 'u_1600', + 'u_1810', + 'u_2035', + 'u_2275', + 'u_2525', + 'u_2775', + 'u_3025', + 'u_3275', + 'u_3525', + 'u_3775', + 'u_4025', + 'u_4275', + 'u_4525', + 'u_4775', + 'u_5025', + 'u_5275', + 'u_5525', + 'u_5825', + 'u_6125', + 'tx_sur', + 'v_2', + 'v_7', + 'v_15', + 'v_25', + 'v_35', + 'v_45', + 'v_55', + 'v_65', + 'v_75', + 'v_85', + 'v_95', + 'v_107', + 'v_125', + 'v_147', + 'v_175', + 'v_210', + 'v_255', + 'v_310', + 'v_375', + 'v_450', + 'v_535', + 'v_630', + 'v_735', + 'v_850', + 'v_975', + 'v_1110', + 'v_1255', + 'v_1415', + 'v_1600', + 'v_1810', + 'v_2035', + 'v_2275', + 'v_2525', + 'v_2775', + 'v_3025', + 'v_3275', + 'v_3525', + 'v_3775', + 'v_4025', + 'v_4275', + 'v_4525', + 'v_4775', + 'v_5025', + 'v_5275', + 'v_5525', + 'v_5825', + 'v_6125', + ], + mins: [ + -4.05822800e+00, + -2.90257170e+00, + -2.76596800e+00, + -2.58443070e+00, + -2.49516130e+00, + -2.40667960e+00, + -2.27296070e+00, + -2.21714020e+00, + -2.21634100e+00, + -2.08386900e+00, + -1.92749400e+00, + -1.81304570e+00, + -1.83064540e+00, + -1.80692230e+00, + -1.74321130e+00, + -1.67289330e+00, + -1.66162040e+00, + -1.52693580e+00, + -1.48514160e+00, + -1.39857340e+00, + -1.09145900e+00, + -1.08259360e+00, + -1.09084200e+00, + -9.96496140e-01, + -8.29466040e-01, + -8.21406500e-01, + -9.57396600e-01, + -9.66500160e-01, + -9.26159560e-01, + -8.95926400e-01, + -1.06642560e+00, + -1.50195290e+00, + -7.71802070e-01, + -6.84844700e-01, + -7.69757500e-01, + -7.65094640e-01, + -7.93736000e-01, + -9.31236270e-01, + -9.15882470e-01, + -8.19114570e-01, + -7.65560570e-01, + -9.03668400e-01, + -6.45355200e-01, + -3.39742500e-01, + -2.24562390e-01, + -2.25960050e-01, + -1.86184560e-01, + 0.00000000e+00, + -4.28349300e+00, + -3.09444140e+00, + -2.98502920e+00, + -2.62424680e+00, + -2.52409530e+00, + -2.34698820e+00, + -2.27336800e+00, + -2.20554600e+00, + -2.11837530e+00, + -2.02173200e+00, + -1.89746570e+00, + -1.85762730e+00, + -1.83168360e+00, + -1.64427110e+00, + -1.65935930e+00, + -1.71675430e+00, + -1.58125570e+00, + -1.34577740e+00, + -1.17440650e+00, + -9.62043200e-01, + -9.97251200e-01, + -1.14961980e+00, + -1.14471940e+00, + -1.08048180e+00, + -8.65731600e-01, + -8.45774530e-01, + -8.47901100e-01, + -7.95656200e-01, + -8.78375500e-01, + -7.51347700e-01, + -8.81861200e-01, + -7.09255340e-01, + -7.26453900e-01, + -7.13301400e-01, + -7.27918500e-01, + -6.86222430e-01, + -7.46361140e-01, + -5.87532500e-01, + -4.90685200e-01, + -5.51239200e-01, + -4.82220260e-01, + -3.97435780e-01, + -3.62835400e-01, + -2.80702770e-01, + -2.46475220e-01, + -2.24333200e-01, + -2.16253860e-01, + 0.00000000e+00, + ], + maxs: [ + 3.20660000e+00, + 3.70025540e+00, + 3.61009200e+00, + 3.39294980e+00, + 3.25449040e+00, + 3.05589650e+00, + 2.95910170e+00, + 2.73777750e+00, + 2.56960150e+00, + 2.39552190e+00, + 2.30904840e+00, + 2.10527300e+00, + 2.04448560e+00, + 1.75900840e+00, + 1.67325720e+00, + 1.60089490e+00, + 1.38911680e+00, + 1.32687000e+00, + 1.30533200e+00, + 1.15056250e+00, + 1.29423180e+00, + 1.11452130e+00, + 1.05505920e+00, + 9.25778030e-01, + 8.61339700e-01, + 8.20470000e-01, + 8.10592200e-01, + 7.65759000e-01, + 8.18788300e-01, + 7.59725300e-01, + 7.00529700e-01, + 6.82193300e-01, + 6.46058440e-01, + 6.40169500e-01, + 6.77663200e-01, + 6.38877200e-01, + 6.37415400e-01, + 6.37130260e-01, + 6.31017800e-01, + 6.31361200e-01, + 4.24794880e-01, + 5.70674660e-01, + 4.67337940e-01, + 3.52707600e-01, + 2.29207680e-01, + 1.86956350e-01, + 1.69581440e-01, + 0.00000000e+00, + 3.80459740e+00, + 2.79329000e+00, + 2.60421350e+00, + 2.42773300e+00, + 2.33881400e+00, + 2.23501400e+00, + 2.20942970e+00, + 2.14899300e+00, + 2.06166820e+00, + 1.97586670e+00, + 1.86088630e+00, + 1.79607090e+00, + 1.83760490e+00, + 1.90140460e+00, + 1.69322760e+00, + 1.53950400e+00, + 1.35182030e+00, + 1.21304250e+00, + 1.11507170e+00, + 1.13060930e+00, + 8.81018900e-01, + 9.62841450e-01, + 9.28627500e-01, + 8.70205760e-01, + 7.95059400e-01, + 7.56254300e-01, + 8.21297170e-01, + 7.63306500e-01, + 8.17793130e-01, + 9.28493900e-01, + 9.35087200e-01, + 8.42833200e-01, + 8.31027000e-01, + 1.01112280e+00, + 8.18302630e-01, + 6.83009000e-01, + 5.92106200e-01, + 6.57581600e-01, + 7.25216800e-01, + 8.03578260e-01, + 9.39945800e-01, + 1.00461240e+00, + 7.16737750e-01, + 2.92733370e-01, + 2.26891980e-01, + 2.04246550e-01, + 1.94158670e-01, + 0.00000000e+00, + ], + means: [ + 0.0014986582100391388, + -0.020640794187784195, + -0.0194745771586895, + -0.015702081844210625, + -0.009677456691861153, + -0.00455519137904048, + -0.0008144613821059465, + 0.002094091149047017, + 0.0044489167630672455, + 0.006495979614555836, + 0.008342064917087555, + 0.010011048056185246, + 0.011934586800634861, + 0.014339592307806015, + 0.01649792119860649, + 0.017170889303088188, + 0.01494122575968504, + 0.011007098481059074, + 0.007130878046154976, + 0.0038376031443476677, + 0.0026369555853307247, + 0.003455997444689274, + 0.0034204740077257156, + 0.002448122017085552, + 0.002080702455714345, + 0.0018841188866645098, + 0.0016205976717174053, + 0.0016530966386198997, + 0.0016257022507488728, + 0.0014280588366091251, + 0.0012494398979470134, + 0.0011284278007224202, + 0.0009969003731384873, + 0.0009723420953378081, + 0.0008234934066422284, + 0.0006053713732399046, + 0.00034477506414987147, + 5.0320195441599935e-05, + -3.845872561214492e-05, + 6.39344216324389e-05, + 2.4087945348583162e-05, + -5.393216270022094e-06, + 1.4672230463474989e-05, + 5.7334632401762065e-06, + 5.183766916161403e-06, + -1.934892679855693e-06, + -9.578817298461217e-06, + 0.0, + 0.012413257732987404, + -0.0007197542581707239, + -0.0015933234244585037, + -0.002354219788685441, + -0.002695431001484394, + -0.002416547853499651, + -0.001512316637672484, + -0.0006604160880669951, + 6.31799193797633e-05, + 0.0006665881955996156, + 0.0012129677925258875, + 0.001664375071413815, + 0.0021404412109404802, + 0.0026531077455729246, + 0.0031408765353262424, + 0.003336072899401188, + 0.0029038796201348305, + 0.0021690507419407368, + 0.0014845682308077812, + 0.0009348196326754987, + 0.0007495750905945897, + 0.000763435906264931, + 0.0005760613130405545, + 0.0002760784700512886, + 0.00011936565715586767, + -5.2408768169698305e-06, + -0.0001337933208560571, + -0.00019301949942018837, + -0.0001688509073574096, + -0.00022771942894905806, + -0.00020466132264118642, + -0.00014178291894495487, + -8.728873945074156e-05, + -6.11957730143331e-05, + -3.28432179230731e-05, + -1.8775799617287703e-05, + -3.6100653233006597e-05, + -4.80596354464069e-05, + -1.0421264960314147e-05, + 4.1858340409817174e-05, + 6.193675653776154e-05, + 7.973912579473108e-05, + 6.645732355536893e-05, + 5.8640387578634545e-05, + 4.246804746799171e-05, + 2.3591643184772693e-05, + 1.9365375010238495e-06, + 0.0, + ], + stds: [ + 0.1149645745754242, + 0.2020045965909958, + 0.1934438794851303, + 0.17691950500011444, + 0.15911765396595, + 0.1447390466928482, + 0.13438086211681366, + 0.12675102055072784, + 0.12076697498559952, + 0.11574187129735947, + 0.11137962341308594, + 0.10760815441608429, + 0.10360421985387802, + 0.10032349824905396, + 0.10111156851053238, + 0.10410355776548386, + 0.0967150405049324, + 0.07944530993700027, + 0.06527426838874817, + 0.0580405555665493, + 0.053686633706092834, + 0.04974823072552681, + 0.04610694199800491, + 0.04293695092201233, + 0.039821598678827286, + 0.03689362108707428, + 0.0342414453625679, + 0.03183373436331749, + 0.029296832159161568, + 0.02722606249153614, + 0.02555002272129059, + 0.023759884759783745, + 0.022116774693131447, + 0.020654674619436264, + 0.019162526354193687, + 0.017585473135113716, + 0.016074825078248978, + 0.015048585832118988, + 0.013628973625600338, + 0.011980430223047733, + 0.010402435436844826, + 0.008546233177185059, + 0.007009623106569052, + 0.005539731588214636, + 0.003948947414755821, + 0.002579973079264164, + 0.0013529193820431828, + 0.0, + 0.13546857237815857, + 0.1263405829668045, + 0.12241507321596146, + 0.11449543386697769, + 0.10603608936071396, + 0.09887248277664185, + 0.09387128800153732, + 0.09050410240888596, + 0.08805426210165024, + 0.08597707748413086, + 0.08399682492017746, + 0.08197164535522461, + 0.07916373759508133, + 0.07502514868974686, + 0.06987597048282623, + 0.06497101485729218, + 0.05999073386192322, + 0.05551240220665932, + 0.051570791751146317, + 0.04801889508962631, + 0.04447999596595764, + 0.04112628102302551, + 0.03808588162064552, + 0.03535434603691101, + 0.03266824409365654, + 0.030315089970827103, + 0.02823241427540779, + 0.02645212784409523, + 0.02458120509982109, + 0.02293495461344719, + 0.021731702610850334, + 0.020434178411960602, + 0.019267592579126358, + 0.018275270238518715, + 0.017277603968977928, + 0.016195766627788544, + 0.01515091210603714, + 0.014132826589047909, + 0.012835639528930187, + 0.011547588743269444, + 0.010262034833431244, + 0.008475004695355892, + 0.00687987357378006, + 0.005459477659314871, + 0.003965720999985933, + 0.0026946677826344967, + 0.0013288235059008002, + 0.0, + ], + tendencies: + { + means: [ + 7.91411538e-07, + -2.22174631e-07, + -3.95309572e-07, + -5.58231640e-07, + -5.29399658e-07, + -1.80900954e-07, + 6.07158492e-08, + 1.89395310e-07, + 2.46425250e-07, + 3.04049925e-07, + 3.66471786e-07, + 4.37614781e-07, + 4.89793860e-07, + 4.55089255e-07, + 3.55827392e-07, + 2.18942242e-07, + 1.28819354e-07, + 1.36181718e-07, + 1.49456377e-07, + 1.33374894e-07, + 1.38986093e-07, + 1.48466180e-07, + 1.15512194e-07, + 6.18027191e-08, + 1.89748096e-08, + -7.65539282e-09, + -2.11507306e-08, + -2.00954922e-08, + -2.32716941e-08, + -1.48637388e-08, + 4.92298976e-09, + 1.12446451e-08, + 9.76077443e-09, + 1.98523881e-08, + 3.18869183e-08, + 4.44791804e-08, + 4.79168874e-08, + 4.15066350e-08, + 3.33290903e-08, + 1.55577262e-08, + 6.17315457e-09, + 3.33150450e-09, + 1.93249978e-09, + -2.99596124e-10, + 9.05836315e-10, + 1.97991402e-09, + 4.15476784e-10, + 0.00000000e+00, + 1.29320414e-06, + -1.00985291e-06, + -8.49006449e-07, + -2.27963241e-07, + -2.15899913e-10, + -1.26082841e-07, + -1.88453193e-07, + -1.80041054e-07, + -1.34752838e-07, + -9.33605863e-08, + -6.74649017e-08, + -4.63567806e-08, + -2.93362535e-08, + 1.78430790e-08, + 7.18177278e-08, + 5.87767053e-08, + 3.57432778e-08, + 1.11473991e-08, + 8.82355423e-09, + -1.06422240e-08, + -8.80557671e-09, + -1.89707834e-08, + -1.77022048e-08, + -8.48856017e-09, + -1.37216275e-08, + -1.39822176e-08, + -6.31974961e-09, + -4.23388102e-09, + 5.94703597e-09, + 1.53592836e-08, + 1.39732030e-08, + 1.18299139e-08, + 1.26023282e-08, + 8.01748906e-09, + -7.28847027e-10, + -8.56890988e-09, + -1.94933980e-08, + -1.61988606e-08, + -1.62305739e-08, + -1.57105212e-08, + -1.79976993e-08, + -1.28000349e-08, + -6.06864710e-09, + -3.29618524e-09, + 2.18251173e-09, + 7.26256443e-10, + 9.08099007e-11, + 0.00000000e+00, + ], + stds: [ + 1.09235074e-01, + 7.14914342e-02, + 6.60012358e-02, + 5.61012582e-02, + 4.65472060e-02, + 3.85706074e-02, + 3.35850185e-02, + 3.02729178e-02, + 2.80759542e-02, + 2.64882479e-02, + 2.52371709e-02, + 2.41900152e-02, + 2.27174268e-02, + 2.10596797e-02, + 1.91869021e-02, + 1.71669204e-02, + 1.53602377e-02, + 1.38918866e-02, + 1.26939512e-02, + 1.17599330e-02, + 1.10752910e-02, + 1.05806538e-02, + 1.02048040e-02, + 9.88796627e-03, + 9.57228229e-03, + 9.22610448e-03, + 8.84745334e-03, + 8.47594907e-03, + 8.03448079e-03, + 7.55800219e-03, + 7.16947278e-03, + 6.78201546e-03, + 6.41038813e-03, + 6.11132820e-03, + 5.81038753e-03, + 5.42730554e-03, + 5.00390013e-03, + 4.59390628e-03, + 4.17790130e-03, + 3.72892167e-03, + 3.27947488e-03, + 2.68132106e-03, + 2.14863148e-03, + 1.67806686e-03, + 1.19492225e-03, + 7.84685617e-04, + 3.81541797e-04, + 0.00000000e+00, + 1.12256086e-01, + 7.25022089e-02, + 6.69892953e-02, + 5.73759157e-02, + 4.79116882e-02, + 4.00214847e-02, + 3.50973129e-02, + 3.19725881e-02, + 2.99248040e-02, + 2.84039678e-02, + 2.71423902e-02, + 2.59867648e-02, + 2.45467197e-02, + 2.27186634e-02, + 2.05752903e-02, + 1.85060460e-02, + 1.65734354e-02, + 1.51242241e-02, + 1.40793494e-02, + 1.33096091e-02, + 1.26388324e-02, + 1.20953951e-02, + 1.16861764e-02, + 1.13517762e-02, + 1.09852609e-02, + 1.05396465e-02, + 1.00409063e-02, + 9.52547398e-03, + 8.93073792e-03, + 8.32486094e-03, + 7.83003653e-03, + 7.34893577e-03, + 6.92279823e-03, + 6.63333667e-03, + 6.35052677e-03, + 6.00918737e-03, + 5.65405392e-03, + 5.27694165e-03, + 4.89139323e-03, + 4.42795779e-03, + 3.93978663e-03, + 3.17515243e-03, + 2.50305260e-03, + 1.96745223e-03, + 1.39216114e-03, + 8.85007257e-04, + 3.86312422e-04, + 0.00000000e+00, + ], + }, + }, + + geometry: [-180, 180, -90, 90], + + dataset: { + // This contains a set of files from the year 2000-2209, each for one year + // example: ocean_elem/ocean_elem_2000.zarr + dataset_name: 'ocean_elem/ocean_elem_{year}.zarr', + type: 'application/zarr', + description: 'Ocean element component of the AWI model using OpenIFS (CY43R3 version). Configured on a regular grid of 400 (longitude) × 192 (latitude) points', + locations: [common.hpc.jsc], + size: '4.4 TB', + inodes: '154,452', + roles: ['data'], + }, +} diff --git a/stac/ifs_fesom_ocean_node.jsonnet b/stac/ifs_fesom_ocean_node.jsonnet new file mode 100644 index 000000000..1e97c6aee --- /dev/null +++ b/stac/ifs_fesom_ocean_node.jsonnet @@ -0,0 +1,1141 @@ +local common = import 'common.jsonnet'; + +{ + "name": "AWI IFS-FESOM (OCEAN NODE)", + "filename": "ifs-fesom_ocean_node.json", + "description": "The ocean node component of the AWI IFS-FESOM coupled climate dataset that integrates atmospheric simulations from the IFS model with ocean and sea ice dynamics from the FESOM framework, capturing detailed Earth system interactions at high resolution.", + "title": "AWI IFS-FESOM Coupled Climate Model (Ocean node)", + "unique_id": "16", + "start_datetime": "2000-01-01T00:00:00", + "end_datetime": "2209-12-31T23:59:59", + "frequency": "1 day", + "fixed_timesteps": "True", + "keywords": [ + "coupled model", + "climate simulation", + "ocean", + "ocean node", + ], + "providers": [ + common.providers.awi, + ], + "processing_level": "model output", + + //retrieved from one arr file + // ds['data'].attrs['columns'][2:], ds['data'].attrs['means'] + // ds['data'].attrs['std'] + // tendencies were calculate using a script: https://gitlab.jsc.fz-juelich.de/esde/WeatherGenerator-private/-/blob/main/data/preprocessing/tendencies/compute_tendencies.py?ref_type=heads (slighly modified) + // calculate min and max also separately + + variables: { + names: [ + 'sss', + 'temp_2', + 'temp_7', + 'temp_15', + 'temp_25', + 'temp_35', + 'temp_45', + 'temp_55', + 'temp_65', + 'temp_75', + 'temp_85', + 'temp_95', + 'temp_107', + 'temp_125', + 'temp_147', + 'temp_175', + 'temp_210', + 'temp_255', + 'temp_310', + 'temp_375', + 'temp_450', + 'temp_535', + 'temp_630', + 'temp_735', + 'temp_850', + 'temp_975', + 'temp_1110', + 'temp_1255', + 'temp_1415', + 'temp_1600', + 'temp_1810', + 'temp_2035', + 'temp_2275', + 'temp_2525', + 'temp_2775', + 'temp_3025', + 'temp_3275', + 'temp_3525', + 'temp_3775', + 'temp_4025', + 'temp_4275', + 'temp_4525', + 'temp_4775', + 'temp_5025', + 'temp_5275', + 'temp_5525', + 'temp_5825', + 'temp_6125', + 'fh', + 'MLD2', + 'ssh', + 'salt_2', + 'salt_7', + 'salt_15', + 'salt_25', + 'salt_35', + 'salt_45', + 'salt_55', + 'salt_65', + 'salt_75', + 'salt_85', + 'salt_95', + 'salt_107', + 'salt_125', + 'salt_147', + 'salt_175', + 'salt_210', + 'salt_255', + 'salt_310', + 'salt_375', + 'salt_450', + 'salt_535', + 'salt_630', + 'salt_735', + 'salt_850', + 'salt_975', + 'salt_1110', + 'salt_1255', + 'salt_1415', + 'salt_1600', + 'salt_1810', + 'salt_2035', + 'salt_2275', + 'salt_2525', + 'salt_2775', + 'salt_3025', + 'salt_3275', + 'salt_3525', + 'salt_3775', + 'salt_4025', + 'salt_4275', + 'salt_4525', + 'salt_4775', + 'salt_5025', + 'salt_5275', + 'salt_5525', + 'salt_5825', + 'salt_6125', + 'evap', + 'prec', + 'fw', + 'a_ice', + 'm_ice', + 'w_0', + 'w_5', + 'w_10', + 'w_20', + 'w_30', + 'w_40', + 'w_50', + 'w_60', + 'w_70', + 'w_80', + 'w_90', + 'w_100', + 'w_115', + 'w_135', + 'w_160', + 'w_190', + 'w_230', + 'w_280', + 'w_340', + 'w_410', + 'w_490', + 'w_580', + 'w_680', + 'w_790', + 'w_910', + 'w_1040', + 'w_1180', + 'w_1330', + 'w_1500', + 'w_1700', + 'w_1920', + 'w_2150', + 'w_2400', + 'w_2650', + 'w_2900', + 'w_3150', + 'w_3400', + 'w_3650', + 'w_3900', + 'w_4150', + 'w_4400', + 'w_4650', + 'w_4900', + 'w_5150', + 'w_5400', + 'w_5650', + 'w_6000', + 'w_6250', + 'sst', + 'swr', + 'snow', + ], + mins: [ + 4.09146000e+00, + -2.41106920e+00, + -2.39218020e+00, + -2.37865500e+00, + -2.36380770e+00, + -2.38846900e+00, + -2.67149330e+00, + -3.01089100e+00, + -3.22548580e+00, + -3.18317680e+00, + -2.86138870e+00, + -3.13428660e+00, + -3.40982400e+00, + -2.03733470e+00, + -2.04340700e+00, + -2.04870400e+00, + -2.05147500e+00, + -2.05832770e+00, + -2.05831770e+00, + -2.03724000e+00, + -2.02422300e+00, + -2.02393820e+00, + -1.95850520e+00, + -1.93859950e+00, + -2.01038120e+00, + -1.34405610e+00, + -1.25056300e+00, + -1.09434010e+00, + -1.11406400e+00, + -1.15315320e+00, + -1.18406460e+00, + -1.22324170e+00, + -1.24240460e+00, + -1.26277220e+00, + -1.27399710e+00, + -1.28291320e+00, + -1.28115510e+00, + -1.08344980e+00, + -1.07199940e+00, + -1.05720320e+00, + -1.03676950e+00, + -8.48968200e-01, + -8.52999000e-01, + -8.75092600e-01, + -8.21526050e-01, + -7.96533000e-01, + -2.32363970e-01, + 0.00000000e+00, + -6.26905640e+02, + -5.53550000e+03, + -3.07803300e+00, + 4.09146000e+00, + 4.28872780e+00, + 4.58653160e+00, + 4.77420900e+00, + 0.00000000e+00, + 0.00000000e+00, + 0.00000000e+00, + 0.00000000e+00, + 0.00000000e+00, + 0.00000000e+00, + 0.00000000e+00, + 0.00000000e+00, + 0.00000000e+00, + 0.00000000e+00, + 0.00000000e+00, + 0.00000000e+00, + 0.00000000e+00, + 0.00000000e+00, + 0.00000000e+00, + 0.00000000e+00, + 0.00000000e+00, + 0.00000000e+00, + 0.00000000e+00, + 0.00000000e+00, + 0.00000000e+00, + 0.00000000e+00, + 0.00000000e+00, + 0.00000000e+00, + 0.00000000e+00, + 0.00000000e+00, + 0.00000000e+00, + 0.00000000e+00, + 0.00000000e+00, + 0.00000000e+00, + 0.00000000e+00, + 0.00000000e+00, + 0.00000000e+00, + 0.00000000e+00, + 0.00000000e+00, + 0.00000000e+00, + 0.00000000e+00, + 0.00000000e+00, + 0.00000000e+00, + 0.00000000e+00, + 0.00000000e+00, + 0.00000000e+00, + 0.00000000e+00, + -4.39320930e-07, + 0.00000000e+00, + -1.23689515e-05, + 0.00000000e+00, + 0.00000000e+00, + -5.74311750e-18, + -6.56508800e-04, + -1.28546220e-03, + -2.41589690e-03, + -3.66671360e-03, + -3.15646830e-03, + -4.52455130e-03, + -4.55176920e-03, + -5.22204630e-03, + -6.60856700e-03, + -6.48492900e-03, + -7.06707500e-03, + -9.23417500e-03, + -1.08425280e-02, + -1.07143110e-02, + -1.03876630e-02, + -9.77122000e-03, + -1.43380710e-02, + -1.31979170e-02, + -9.79206150e-03, + -1.07046380e-02, + -1.25517870e-02, + -2.09608670e-02, + -1.89982430e-02, + -1.70383380e-02, + -1.55737870e-02, + -2.17656160e-02, + -2.28526100e-02, + -2.45442070e-02, + -2.49551890e-02, + -2.51705870e-02, + -2.21153500e-02, + -1.79000940e-02, + -1.56068300e-02, + -1.62086620e-02, + -2.40087710e-02, + -2.48503240e-02, + -2.43637770e-02, + -2.30394680e-02, + -2.62292210e-02, + -2.05512920e-02, + -8.65730900e-03, + -1.40088440e-02, + -9.31009200e-03, + -5.66811500e-03, + -4.10096530e-03, + 0.00000000e+00, + 0.00000000e+00, + -2.41106920e+00, + 2.20133280e-14, + 0.00000000e+00, + ], + maxs: [ + 4.37069630e+01, + 3.68182500e+01, + 3.72615500e+01, + 3.76355060e+01, + 3.80740050e+01, + 3.55034500e+01, + 3.40421900e+01, + 3.30923840e+01, + 3.24824800e+01, + 3.19715120e+01, + 3.18336450e+01, + 3.14685290e+01, + 3.13181800e+01, + 3.09531560e+01, + 3.02340970e+01, + 2.88998410e+01, + 2.66405180e+01, + 2.50840970e+01, + 2.38184800e+01, + 2.22599090e+01, + 2.21513420e+01, + 2.21285210e+01, + 2.19847220e+01, + 2.18424590e+01, + 2.18029000e+01, + 2.16428990e+01, + 1.58158040e+01, + 1.57923740e+01, + 1.57656800e+01, + 1.56924360e+01, + 1.56298900e+01, + 1.55851900e+01, + 1.55820920e+01, + 1.55792720e+01, + 1.55735910e+01, + 1.55599890e+01, + 1.55507145e+01, + 1.55137090e+01, + 9.87976300e+00, + 9.86550400e+00, + 4.81351000e+00, + 4.78990400e+00, + 4.75826550e+00, + 3.29253940e+00, + 2.75645040e+00, + 2.20648530e+00, + 2.12301680e+00, + 0.00000000e+00, + 4.03002340e+03, + -6.00554560e+00, + 2.52861120e+00, + 4.37069630e+01, + 4.38082430e+01, + 4.39297640e+01, + 4.40281450e+01, + 4.31920550e+01, + 4.31444100e+01, + 4.30970000e+01, + 4.30733950e+01, + 4.30568470e+01, + 4.30267700e+01, + 4.30243840e+01, + 4.30149600e+01, + 4.30066600e+01, + 4.30003170e+01, + 4.29928400e+01, + 4.29904000e+01, + 4.29873800e+01, + 4.29819150e+01, + 4.29604600e+01, + 4.29592300e+01, + 4.29575200e+01, + 4.29506600e+01, + 4.29448240e+01, + 4.29380200e+01, + 4.29241030e+01, + 4.10297780e+01, + 4.10298700e+01, + 4.10296800e+01, + 4.10135570e+01, + 4.09971100e+01, + 4.09967500e+01, + 4.09970970e+01, + 4.09952900e+01, + 4.09854770e+01, + 4.09843670e+01, + 4.09824500e+01, + 4.09781570e+01, + 3.51021270e+01, + 3.50952400e+01, + 3.50796200e+01, + 3.50765700e+01, + 3.50724000e+01, + 3.49525260e+01, + 3.49221500e+01, + 3.49106180e+01, + 3.49053880e+01, + 0.00000000e+00, + 1.01811670e-07, + 2.52546200e-06, + 3.83766160e-06, + 1.00000000e+00, + 2.59364280e+01, + 5.63939150e-18, + 5.47847400e-04, + 9.59915460e-04, + 1.44477650e-03, + 2.47891220e-03, + 2.96681900e-03, + 2.30192770e-03, + 2.73348340e-03, + 2.85711000e-03, + 8.20557000e-03, + 6.50213940e-03, + 4.98062740e-03, + 4.13050040e-03, + 6.02416230e-03, + 5.18272440e-03, + 4.01948000e-03, + 6.63167600e-03, + 5.29185700e-03, + 5.57743400e-03, + 6.52509650e-03, + 7.33623470e-03, + 6.33935200e-03, + 8.13931800e-03, + 1.43899005e-02, + 9.61135700e-03, + 7.88523300e-03, + 1.00830910e-02, + 1.00684500e-02, + 1.11337010e-02, + 1.23745430e-02, + 1.33212020e-02, + 2.01495360e-02, + 1.91381420e-02, + 1.83346660e-02, + 1.64315960e-02, + 1.40752450e-02, + 1.04268790e-02, + 1.28377580e-02, + 1.83274280e-02, + 1.42376670e-02, + 9.72497700e-03, + 8.48415500e-03, + 1.54165765e-02, + 1.05178640e-02, + 5.65414880e-03, + 4.86245000e-03, + 0.00000000e+00, + 0.00000000e+00, + 3.68182500e+01, + 4.02569950e+02, + 7.66055100e-07, + ], + means : [ + 33.48014831542969, + 12.473414421081543, + 12.414287567138672, + 12.063199996948242, + 11.513513565063477, + 10.672279357910156, + 10.141136169433594, + 9.73779582977295, + 9.421512603759766, + 9.156067848205566, + 8.919069290161133, + 8.689873695373535, + 8.41275691986084, + 8.040079116821289, + 7.596518039703369, + 7.090358257293701, + 6.59499454498291, + 6.138658046722412, + 5.755801677703857, + 5.3815507888793945, + 4.940726280212402, + 4.450194358825684, + 4.005489349365234, + 3.5275967121124268, + 3.089005470275879, + 2.6832332611083984, + 2.340416431427002, + 2.0679359436035156, + 1.7873553037643433, + 1.5699480772018433, + 1.3911718130111694, + 1.2604461908340454, + 1.1254847049713135, + 0.9850493669509888, + 0.8547093868255615, + 0.7435904741287231, + 0.6580367684364319, + 0.5557888746261597, + 0.45927414298057556, + 0.377909392118454, + 0.30526629090309143, + 0.2263249158859253, + 0.15983007848262787, + 0.1079963892698288, + 0.06327680498361588, + 0.03446195274591446, + 0.01053240429610014, + 0.0, + 60.779296875, + -87.93943786621094, + -0.3295230567455292, + 32.25782775878906, + 32.27289962768555, + 32.34180450439453, + 32.395294189453125, + 31.914072036743164, + 31.686046600341797, + 30.00705337524414, + 29.36052703857422, + 28.87939453125, + 28.518390655517578, + 28.06410789489746, + 27.616165161132812, + 27.25494956970215, + 26.878986358642578, + 26.44842529296875, + 25.863618850708008, + 25.376853942871094, + 24.852371215820312, + 24.371938705444336, + 23.873828887939453, + 23.426124572753906, + 23.029767990112305, + 22.700708389282227, + 22.419292449951172, + 22.155311584472656, + 21.883447647094727, + 21.570472717285156, + 21.2353515625, + 20.818880081176758, + 20.404722213745117, + 19.87188720703125, + 19.278139114379883, + 18.58622169494629, + 17.790977478027344, + 16.779687881469727, + 15.639055252075195, + 14.172965049743652, + 12.40196418762207, + 10.699524879455566, + 9.042342185974121, + 7.034385681152344, + 5.251721382141113, + 3.59009051322937, + 2.0695559978485107, + 1.0921233892440796, + 0.3307676911354065, + 0.0, + -2.629090545269719e-08, + 2.6593310664679848e-08, + -9.050306282176734e-09, + 0.2023606151342392, + 0.3298293352127075, + -1.6439100753589446e-25, + 5.6956853455858436e-08, + 1.198608003960544e-07, + 1.898294783586607e-07, + 1.9758162750349584e-07, + 1.7512732597424474e-07, + 1.3120295250246272e-07, + 9.166119951942164e-08, + 5.662713675747e-08, + 3.120301883541288e-08, + -6.447005596044164e-09, + -3.549177307604623e-08, + -7.173019156425653e-08, + -1.0555431373404645e-07, + -1.3021882239172555e-07, + -1.7206454572260554e-07, + -2.259940288240614e-07, + -2.7644495048662066e-07, + -3.264613326336985e-07, + -3.4550032523839036e-07, + -3.140555975278403e-07, + -3.2974125474538596e-07, + -3.32498217403554e-07, + -3.5670544207278e-07, + -3.649873292488337e-07, + -3.529927994350146e-07, + -3.0973208708928723e-07, + -2.143078887684169e-07, + -1.4938072467884922e-07, + -1.0373175030053972e-07, + -5.739519082226252e-08, + 8.989043465135182e-08, + 1.1984339209902828e-07, + 1.5018591170701256e-07, + 1.4299101280812465e-07, + 1.841902701471554e-07, + 1.700528144965574e-07, + 1.6666346880356286e-07, + 1.4664085767890356e-07, + 1.5999268043742632e-07, + 1.1862257309758206e-07, + 7.032692650454919e-08, + 5.7671957875982116e-08, + 3.9003452911856584e-08, + 2.9826960457057794e-08, + 6.24445783969918e-09, + 0.0, + 0.0, + 12.030770301818848, + 133.3340301513672, + 5.16723908106087e-09, + ], + stds: [ + 2.581596851348877, + 11.79872989654541, + 11.765914916992188, + 11.588679313659668, + 11.331280708312988, + 11.030865669250488, + 10.773600578308105, + 10.455321311950684, + 10.130072593688965, + 9.830999374389648, + 9.5009126663208, + 9.155549049377441, + 8.799298286437988, + 8.380084991455078, + 7.869012832641602, + 7.217276096343994, + 6.4266276359558105, + 5.9623332023620605, + 5.595914363861084, + 5.275561332702637, + 4.869168281555176, + 4.327758312225342, + 3.9097366333007812, + 3.4728970527648926, + 2.987152338027954, + 2.6568620204925537, + 2.3307058811187744, + 2.114126205444336, + 1.9889605045318604, + 1.8327451944351196, + 1.719393014907837, + 1.628853440284729, + 1.5240623950958252, + 1.371037483215332, + 1.193272352218628, + 1.07024347782135, + 0.9300109148025513, + 0.8403263688087463, + 0.7115577459335327, + 0.6486182808876038, + 0.5628896951675415, + 0.48999306559562683, + 0.4170469641685486, + 0.3433611989021301, + 0.2668067216873169, + 0.19668422639369965, + 0.10968272387981415, + 0.0, + 99.74605560302734, + 331.0096130371094, + 0.7397753000259399, + 2.835813045501709, + 2.7967166900634766, + 2.653770923614502, + 2.5984039306640625, + 5.087889671325684, + 5.889260292053223, + 9.266444206237793, + 10.276013374328613, + 10.91092300415039, + 11.30087661743164, + 11.746476173400879, + 12.308387756347656, + 12.8717622756958, + 13.35154914855957, + 13.687381744384766, + 14.264633178710938, + 14.656915664672852, + 14.595285415649414, + 14.82186508178711, + 14.996237754821777, + 15.547465324401855, + 16.139122009277344, + 16.516014099121094, + 16.671541213989258, + 16.80963134765625, + 16.908939361572266, + 17.077251434326172, + 17.285608291625977, + 17.259021759033203, + 17.707401275634766, + 17.85293197631836, + 16.925962448120117, + 16.88168716430664, + 16.561243057250977, + 16.87749671936035, + 17.02285385131836, + 18.063684463500977, + 17.293697357177734, + 15.42487907409668, + 15.315444946289062, + 13.349733352661133, + 11.848274230957031, + 10.155359268188477, + 8.062213897705078, + 5.966065883636475, + 3.3561525344848633, + 0.0, + 2.5878895471009855e-08, + 5.285275150868074e-08, + 1.291050608642763e-07, + 0.3729356527328491, + 0.7975702285766602, + 5.543445871547588e-20, + 5.588115072896471e-06, + 1.0335589649912436e-05, + 1.6178701116587035e-05, + 2.1813944840687327e-05, + 2.4752804165473208e-05, + 2.708939791773446e-05, + 2.8637374271056615e-05, + 2.992037479998544e-05, + 3.103152266703546e-05, + 3.215543256374076e-05, + 3.329905666760169e-05, + 3.47946843248792e-05, + 3.6699773772852495e-05, + 3.9117629057727754e-05, + 4.2478022805880755e-05, + 4.7080098738661036e-05, + 5.210151357459836e-05, + 5.428850636235438e-05, + 5.756724567618221e-05, + 5.879401942365803e-05, + 6.041592132532969e-05, + 6.493082764791325e-05, + 6.970097456360236e-05, + 7.158316293498501e-05, + 7.459081098204479e-05, + 7.53950298530981e-05, + 7.889988773968071e-05, + 8.203001925721765e-05, + 8.323073416249827e-05, + 8.86907655512914e-05, + 9.06201166799292e-05, + 9.152557322522625e-05, + 8.978998812381178e-05, + 9.129796671913937e-05, + 8.702294144313782e-05, + 8.08618133305572e-05, + 7.53730782889761e-05, + 6.788795144530013e-05, + 5.901400072616525e-05, + 5.052442429587245e-05, + 4.2226140067214146e-05, + 3.8516820495715365e-05, + 3.0264105589594692e-05, + 2.31855119636748e-05, + 1.593790511833504e-05, + 0.0, + 0.0, + 11.86808967590332, + 93.89717102050781, + 1.530931825755033e-08, + ], + tendencies: + { + means: [ + 1.08553204e-05, + -6.34894779e-05, + -6.69420158e-05, + -3.07377215e-05, + 3.05101772e-06, + 3.33237738e-06, + 1.27289437e-05, + 1.51596129e-05, + 8.67582730e-06, + 2.97243591e-06, + 1.73679550e-06, + 2.16664896e-06, + 2.82206596e-06, + 5.20396614e-06, + 7.64287000e-06, + 7.04386628e-06, + 7.15478141e-06, + 7.81167799e-06, + 8.95198290e-06, + 1.03445182e-05, + 1.14928987e-05, + 1.16608991e-05, + 1.08678530e-05, + 9.62668587e-06, + 8.35678681e-06, + 6.81451044e-06, + 5.24561986e-06, + 3.99943165e-06, + 3.39644995e-06, + 3.17202272e-06, + 3.25562247e-06, + 3.14789546e-06, + 2.83019184e-06, + 2.70389755e-06, + 2.40527264e-06, + 2.12625332e-06, + 1.90859279e-06, + 1.72040210e-06, + 1.57147062e-06, + 1.47484276e-06, + 1.30437500e-06, + 1.05439956e-06, + 7.45774557e-07, + 4.99327443e-07, + 2.53021704e-07, + 1.13835162e-07, + 2.70807528e-08, + 0.00000000e+00, + -7.30576607e-04, + -8.11467493e-04, + -1.86661550e-06, + 1.08553204e-05, + 1.65893883e-05, + 8.63067304e-06, + 4.56850889e-06, + 1.73315183e-06, + 7.41842714e-07, + 4.18686209e-07, + -1.78602988e-07, + -7.02762024e-07, + -4.25762145e-07, + -8.98618098e-08, + -2.31073792e-08, + -8.11949773e-08, + -2.39149870e-07, + -3.72724056e-07, + -4.51813404e-07, + -3.89285069e-07, + -1.78390674e-07, + 3.14527065e-08, + 1.81721265e-07, + 2.89911220e-07, + 3.65973832e-07, + 3.66448522e-07, + 3.56933856e-07, + 3.53216137e-07, + 3.46135764e-07, + 3.63746855e-07, + 3.82965282e-07, + 4.11476363e-07, + 3.92079500e-07, + 3.43763535e-07, + 3.17899272e-07, + 2.53398892e-07, + 1.79154900e-07, + 1.19000905e-07, + 8.71880004e-08, + 5.58900542e-08, + 3.97198743e-08, + 3.62852272e-08, + 3.47246509e-08, + 3.02578456e-08, + 2.39671724e-08, + 1.64907592e-08, + 8.95730218e-09, + 3.23883818e-09, + 1.29730125e-11, + 0.00000000e+00, + 4.97866363e-14, + -3.38531380e-13, + -6.79781313e-13, + 2.54481305e-07, + 5.45064176e-06, + 2.15775916e-26, + -1.90194888e-12, + -2.68584673e-12, + -5.21627272e-12, + -6.73103439e-12, + -7.77169258e-12, + -5.73951773e-12, + -8.35342814e-13, + -5.11168440e-13, + 1.11789144e-12, + -2.76998321e-12, + -2.25520789e-12, + -2.10599926e-12, + -8.74748014e-13, + -1.70833430e-12, + 1.51000390e-12, + 5.00886054e-12, + 7.11758060e-12, + 1.12511460e-12, + -6.74211640e-12, + -5.51387610e-12, + -9.09105574e-12, + -1.42778144e-11, + -2.44861867e-11, + -3.46059256e-11, + -3.12475830e-11, + -4.17807301e-11, + -3.29290520e-11, + -2.35121529e-11, + -2.60648663e-11, + -3.02904494e-11, + -4.21448825e-11, + -3.27247449e-11, + -2.79798470e-11, + -3.21379157e-11, + -2.05951544e-11, + -2.26240682e-11, + -2.56869696e-11, + -1.63022737e-11, + 3.35932079e-13, + 2.72544909e-12, + 6.45708783e-12, + 3.98133984e-12, + 9.85838639e-12, + 1.72643888e-12, + -1.02497249e-12, + 0.00000000e+00, + 0.00000000e+00, + -6.34894779e-05, + 9.32698043e-04, + -8.87397536e-14, + ], + stds: [ + 7.58071768e-02, + 1.67285587e-01, + 1.69091876e-01, + 1.77923334e-01, + 1.84006827e-01, + 1.81242321e-01, + 1.62556863e-01, + 1.46921227e-01, + 1.40979379e-01, + 1.39628987e-01, + 1.38738124e-01, + 1.36358445e-01, + 1.29100901e-01, + 1.25123787e-01, + 1.30129589e-01, + 1.24200284e-01, + 8.41935609e-02, + 5.03735837e-02, + 3.83453732e-02, + 3.38089131e-02, + 3.45381368e-02, + 3.44053664e-02, + 2.95726569e-02, + 2.43313445e-02, + 2.10946259e-02, + 1.90966766e-02, + 1.67829499e-02, + 1.38513566e-02, + 1.10037776e-02, + 8.31656546e-03, + 6.28047282e-03, + 5.50921141e-03, + 4.96020579e-03, + 4.41643290e-03, + 3.99295513e-03, + 3.51237552e-03, + 2.87235715e-03, + 2.21056302e-03, + 1.50663563e-03, + 1.07773257e-03, + 8.35362020e-04, + 5.88152841e-04, + 3.47463094e-04, + 1.83104496e-04, + 7.53736016e-05, + 2.77308005e-05, + 8.03344841e-06, + 0.00000000e+00, + 5.62015403e+01, + 4.74338260e+01, + 4.11327321e-02, + 7.58071768e-02, + 7.74850675e-02, + 6.74034451e-02, + 5.52706634e-02, + 4.20113581e-02, + 3.46694754e-02, + 2.86898274e-02, + 2.47632023e-02, + 2.17464646e-02, + 1.93649273e-02, + 1.76384926e-02, + 1.56208222e-02, + 1.38653655e-02, + 1.21241274e-02, + 1.00463827e-02, + 7.90936193e-03, + 6.23783001e-03, + 4.69812362e-03, + 3.42273575e-03, + 2.66965570e-03, + 2.10038337e-03, + 1.67487243e-03, + 1.40081320e-03, + 1.19380134e-03, + 1.05158146e-03, + 1.01770152e-03, + 9.11563541e-04, + 7.31161976e-04, + 5.85867878e-04, + 4.29299290e-04, + 3.39338152e-04, + 2.94176512e-04, + 2.78177227e-04, + 2.76891027e-04, + 2.71223424e-04, + 2.34959765e-04, + 1.86160812e-04, + 1.28262676e-04, + 8.83687196e-05, + 7.00725306e-05, + 5.17652926e-05, + 3.13615767e-05, + 1.67321030e-05, + 6.99303724e-06, + 2.81070606e-06, + 9.36067093e-07, + 0.00000000e+00, + 1.31542652e-08, + 4.82336216e-08, + 7.96060822e-08, + 2.57178575e-02, + 3.62428211e-02, + 7.91599013e-20, + 5.43747321e-06, + 1.00660909e-05, + 1.59271056e-05, + 2.13971675e-05, + 2.48596177e-05, + 2.74284559e-05, + 2.94233858e-05, + 3.11118620e-05, + 3.25169211e-05, + 3.38173806e-05, + 3.53307026e-05, + 3.71240091e-05, + 3.91282716e-05, + 4.14555355e-05, + 4.44758707e-05, + 4.77133000e-05, + 5.03959913e-05, + 5.29620883e-05, + 5.64979735e-05, + 5.90113137e-05, + 6.13343207e-05, + 6.36615884e-05, + 6.68561204e-05, + 7.02538706e-05, + 7.47736543e-05, + 7.87593232e-05, + 8.36902591e-05, + 8.99506920e-05, + 9.51723231e-05, + 9.85544992e-05, + 1.01488850e-04, + 1.00709723e-04, + 9.90555788e-05, + 9.71823313e-05, + 9.15134108e-05, + 8.25659522e-05, + 7.32601281e-05, + 6.33998453e-05, + 5.31255151e-05, + 4.16993135e-05, + 3.24094498e-05, + 2.52584493e-05, + 1.83393181e-05, + 1.23434070e-05, + 7.07488706e-06, + 0.00000000e+00, + 0.00000000e+00, + 1.67285587e-01, + 3.45016937e+01, + 1.46953155e-08, + ], + }, + }, + + geometry: [-180, 180, -90, 90], + + dataset: { + // This contains a set of files from the year 2000-2209, each for one year + // example: ocean_node/ocean_node_2000.zarr + dataset_name: 'ocean_elem/ocean_node_{year}.zarr', + type: 'application/zarr', + description: 'Ocean node component of the AWI model using OpenIFS (CY43R3 version). Configured on a non-regular grid with 126858 nodes', + locations: [common.hpc.jsc], + size: '3.4 TB', + inodes: '154,452', + roles: ['data'], + }, +} diff --git a/stac/imerg.jsonnet b/stac/imerg.jsonnet new file mode 100644 index 000000000..56c733d32 --- /dev/null +++ b/stac/imerg.jsonnet @@ -0,0 +1,49 @@ +local common = import 'common.jsonnet'; + +{ + name: 'IMERG', + filename: 'imerg.json', + description: "NASA's Integrated Multi-satellitE Retrievals for GPM (IMERG) product combines information from the GPM satellite constellation to estimate precipitation over the majority of the Earth's surface. ", + title: 'IMERG', + unique_id: '5', + start_datetime: '1998-01-01T06:00:00', + end_datetime: '2024-07-31T18:00:00', + frequency: '6h', + fixed_timesteps: 'True', + keywords: [ + 'atmosphere', + 'precipitation', + 'reanalysis', + 'global', + ], + providers: [ + common.providers.ecmwf_host, + common.providers.nasa, + ], + processing_level: 'NA', + + variables: { + names: ['tp'], + mins: [0], + maxs: [0.814545], + means: [0.00067628], + stds: [0.00326012], + tendencies: + { + means: [-6.54337427e-10], + stds: [0.00350661], + }, + }, + + geometry: [-180, 180, -90, 90], + + dataset: { + dataset_name: 'nasa-imerg-grib-n320-1998-2024-6h-v1.zarr', + type: 'application/vnd+zarr', + description: 'Anemoi dataset', + locations: [common.hpc.hpc2020, common.hpc.ewc, common.hpc.jsc], + size: '18 GB', + inodes: '38,966', + roles: ['data'], + }, +} diff --git a/stac/jsons/abigoes.json b/stac/jsons/abigoes.json new file mode 100644 index 000000000..f3fdc7584 --- /dev/null +++ b/stac/jsons/abigoes.json @@ -0,0 +1,486 @@ +{ + "assets": { + "RP_ABI-L1b-RadF-M6C*_G16_s*_e*_c*.nc": { + "description": "Observation dataset", + "href": "RP_ABI-L1b-RadF-M6C*_G16_s*_e*_c*.nc", + "inodes": "5.426.532", + "locations": [ + "hpc2020" + ], + "roles": [ + "data" + ], + "size": "230.8 TB", + "title": "RP_ABI-L1b-RadF-M6C*_G16_s*_e*_c*.nc", + "type": "application/vnd+netcdf" + } + }, + "bbox": [ + -156, + -81, + 6, + 81 + ], + "geometry": { + "coordinates": [ + [ + [ + -156, + -81 + ], + [ + -156, + 81 + ], + [ + 6, + 81 + ], + [ + 6, + -81 + ], + [ + -156, + -81 + ] + ] + ], + "type": "Polygon" + }, + "id": "weathergen.atmo.ABI-GOES16", + "properties": { + "description": "The Advanced Baseline Imager (ABI) on GOES16 (Geostationary Operational Environmental Satellite 16) provides high-resolution, multispectral imagery for real-time weather monitoring and forecasting. GOES-16 is also called GOES-East as it covers the Eastern side of the Americas.", + "end_datetime": "2024-12-31 23:50:20", + "fixed_timesteps": "True", + "frequency": "10min", + "keywords": [ + "atmosphere", + "observation", + "geostationary", + "satellite" + ], + "name": "ABI-GOES16", + "processing_level": "Operational L1b", + "providers": [ + { + "name": "NASA", + "roles": [ + "provider" + ], + "url": "https://www.nasa.gov" + }, + { + "name": "NASA", + "roles": [ + "processor" + ], + "url": "https://www.nasa.gov" + } + ], + "start_datetime": "2017-12-17T00:00:41", + "title": "ABI-GOES16", + "unique_id": "13", + "variables": { + "DQF": { + "max": "NA", + "mean": "NA", + "min": "NA", + "std": "NA", + "tendency_mean": "NA", + "tendency_std": "NA" + }, + "Rad": { + "max": "NA", + "mean": "NA", + "min": "NA", + "std": "NA", + "tendency_mean": "NA", + "tendency_std": "NA" + }, + "a_h_NRTH": { + "max": "NA", + "mean": "NA", + "min": "NA", + "std": "NA", + "tendency_mean": "NA", + "tendency_std": "NA" + }, + "algorithm_dynamic_input_data_container": { + "max": "NA", + "mean": "NA", + "min": "NA", + "std": "NA", + "tendency_mean": "NA", + "tendency_std": "NA" + }, + "algorithm_product_version_container": { + "max": "NA", + "mean": "NA", + "min": "NA", + "std": "NA", + "tendency_mean": "NA", + "tendency_std": "NA" + }, + "b_h_NRTH": { + "max": "NA", + "mean": "NA", + "min": "NA", + "std": "NA", + "tendency_mean": "NA", + "tendency_std": "NA" + }, + "band_id": { + "max": "NA", + "mean": "NA", + "min": "NA", + "std": "NA", + "tendency_mean": "NA", + "tendency_std": "NA" + }, + "band_wavelength": { + "max": "NA", + "mean": "NA", + "min": "NA", + "std": "NA", + "tendency_mean": "NA", + "tendency_std": "NA" + }, + "band_wavelength_star_look": { + "max": "NA", + "mean": "NA", + "min": "NA", + "std": "NA", + "tendency_mean": "NA", + "tendency_std": "NA" + }, + "earth_sun_distance_anomaly_in_AU": { + "max": "NA", + "mean": "NA", + "min": "NA", + "std": "NA", + "tendency_mean": "NA", + "tendency_std": "NA" + }, + "esun": { + "max": "NA", + "mean": "NA", + "min": "NA", + "std": "NA", + "tendency_mean": "NA", + "tendency_std": "NA" + }, + "focal_plane_temperature_threshold_decreasing": { + "max": "NA", + "mean": "NA", + "min": "NA", + "std": "NA", + "tendency_mean": "NA", + "tendency_std": "NA" + }, + "focal_plane_temperature_threshold_exceeded_count": { + "max": "NA", + "mean": "NA", + "min": "NA", + "std": "NA", + "tendency_mean": "NA", + "tendency_std": "NA" + }, + "focal_plane_temperature_threshold_increasing": { + "max": "NA", + "mean": "NA", + "min": "NA", + "std": "NA", + "tendency_mean": "NA", + "tendency_std": "NA" + }, + "geospatial_lat_lon_extent": { + "max": "NA", + "mean": "NA", + "min": "NA", + "std": "NA", + "tendency_mean": "NA", + "tendency_std": "NA" + }, + "goes_imager_projection": { + "max": "NA", + "mean": "NA", + "min": "NA", + "std": "NA", + "tendency_mean": "NA", + "tendency_std": "NA" + }, + "kappa0": { + "max": "NA", + "mean": "NA", + "min": "NA", + "std": "NA", + "tendency_mean": "NA", + "tendency_std": "NA" + }, + "max_radiance_value_of_valid_pixels": { + "max": "NA", + "mean": "NA", + "min": "NA", + "std": "NA", + "tendency_mean": "NA", + "tendency_std": "NA" + }, + "maximum_focal_plane_temperature": { + "max": "NA", + "mean": "NA", + "min": "NA", + "std": "NA", + "tendency_mean": "NA", + "tendency_std": "NA" + }, + "mean_radiance_value_of_valid_pixels": { + "max": "NA", + "mean": "NA", + "min": "NA", + "std": "NA", + "tendency_mean": "NA", + "tendency_std": "NA" + }, + "min_radiance_value_of_valid_pixels": { + "max": "NA", + "mean": "NA", + "min": "NA", + "std": "NA", + "tendency_mean": "NA", + "tendency_std": "NA" + }, + "missing_pixel_count": { + "max": "NA", + "mean": "NA", + "min": "NA", + "std": "NA", + "tendency_mean": "NA", + "tendency_std": "NA" + }, + "nominal_satellite_height": { + "max": "NA", + "mean": "NA", + "min": "NA", + "std": "NA", + "tendency_mean": "NA", + "tendency_std": "NA" + }, + "nominal_satellite_subpoint_lat": { + "max": "NA", + "mean": "NA", + "min": "NA", + "std": "NA", + "tendency_mean": "NA", + "tendency_std": "NA" + }, + "nominal_satellite_subpoint_lon": { + "max": "NA", + "mean": "NA", + "min": "NA", + "std": "NA", + "tendency_mean": "NA", + "tendency_std": "NA" + }, + "percent_uncorrectable_L0_errors": { + "max": "NA", + "mean": "NA", + "min": "NA", + "std": "NA", + "tendency_mean": "NA", + "tendency_std": "NA" + }, + "planck_bc1": { + "max": "NA", + "mean": "NA", + "min": "NA", + "std": "NA", + "tendency_mean": "NA", + "tendency_std": "NA" + }, + "planck_bc2": { + "max": "NA", + "mean": "NA", + "min": "NA", + "std": "NA", + "tendency_mean": "NA", + "tendency_std": "NA" + }, + "planck_fk1": { + "max": "NA", + "mean": "NA", + "min": "NA", + "std": "NA", + "tendency_mean": "NA", + "tendency_std": "NA" + }, + "planck_fk2": { + "max": "NA", + "mean": "NA", + "min": "NA", + "std": "NA", + "tendency_mean": "NA", + "tendency_std": "NA" + }, + "processing_parm_version_container": { + "max": "NA", + "mean": "NA", + "min": "NA", + "std": "NA", + "tendency_mean": "NA", + "tendency_std": "NA" + }, + "reprocessing_version": { + "max": "NA", + "mean": "NA", + "min": "NA", + "std": "NA", + "tendency_mean": "NA", + "tendency_std": "NA" + }, + "saturated_pixel_count": { + "max": "NA", + "mean": "NA", + "min": "NA", + "std": "NA", + "tendency_mean": "NA", + "tendency_std": "NA" + }, + "star_id": { + "max": "NA", + "mean": "NA", + "min": "NA", + "std": "NA", + "tendency_mean": "NA", + "tendency_std": "NA" + }, + "std_dev_radiance_value_of_valid_pixels": { + "max": "NA", + "mean": "NA", + "min": "NA", + "std": "NA", + "tendency_mean": "NA", + "tendency_std": "NA" + }, + "t": { + "max": "NA", + "mean": "NA", + "min": "NA", + "std": "NA", + "tendency_mean": "NA", + "tendency_std": "NA" + }, + "t_star_look": { + "max": "NA", + "mean": "NA", + "min": "NA", + "std": "NA", + "tendency_mean": "NA", + "tendency_std": "NA" + }, + "time_bounds": { + "max": "NA", + "mean": "NA", + "min": "NA", + "std": "NA", + "tendency_mean": "NA", + "tendency_std": "NA" + }, + "time_bounds_rows": { + "max": "NA", + "mean": "NA", + "min": "NA", + "std": "NA", + "tendency_mean": "NA", + "tendency_std": "NA" + }, + "time_bounds_swaths": { + "max": "NA", + "mean": "NA", + "min": "NA", + "std": "NA", + "tendency_mean": "NA", + "tendency_std": "NA" + }, + "undersaturated_pixel_count": { + "max": "NA", + "mean": "NA", + "min": "NA", + "std": "NA", + "tendency_mean": "NA", + "tendency_std": "NA" + }, + "valid_pixel_count": { + "max": "NA", + "mean": "NA", + "min": "NA", + "std": "NA", + "tendency_mean": "NA", + "tendency_std": "NA" + }, + "x": { + "max": "NA", + "mean": "NA", + "min": "NA", + "std": "NA", + "tendency_mean": "NA", + "tendency_std": "NA" + }, + "x_image": { + "max": "NA", + "mean": "NA", + "min": "NA", + "std": "NA", + "tendency_mean": "NA", + "tendency_std": "NA" + }, + "x_image_bounds": { + "max": "NA", + "mean": "NA", + "min": "NA", + "std": "NA", + "tendency_mean": "NA", + "tendency_std": "NA" + }, + "y": { + "max": "NA", + "mean": "NA", + "min": "NA", + "std": "NA", + "tendency_mean": "NA", + "tendency_std": "NA" + }, + "y_image": { + "max": "NA", + "mean": "NA", + "min": "NA", + "std": "NA", + "tendency_mean": "NA", + "tendency_std": "NA" + }, + "y_image_bounds": { + "max": "NA", + "mean": "NA", + "min": "NA", + "std": "NA", + "tendency_mean": "NA", + "tendency_std": "NA" + }, + "yaw_flip_flag": { + "max": "NA", + "mean": "NA", + "min": "NA", + "std": "NA", + "tendency_mean": "NA", + "tendency_std": "NA" + } + } + }, + "stac_extensions": [ + "https://stac-extensions.github.io/datacube/v2.2.0/schema.json", + "https://stac-extensions.github.io/alternate-assets/v1.2.0/schema.json", + "https://stac-extensions.github.io/xarray-assets/v1.0.0/schema.json" + ], + "stac_version": "1.0.0", + "type": "Feature" +} diff --git a/stac/jsons/catalogue.json b/stac/jsons/catalogue.json new file mode 100644 index 000000000..291297b4a --- /dev/null +++ b/stac/jsons/catalogue.json @@ -0,0 +1,120 @@ +{ + "description": "The data catalogue of the WeatherGenerator project", + "id": "weathergen", + "links": [ + { + "href": "https://raw.githubusercontent.com/ecmwf/WeatherGenerator/refs/heads/develop/stac/jsons/catalogue.json", + "rel": "root", + "title": "The WeatherGenerator data server", + "type": "application/json" + }, + { + "href": "https://raw.githubusercontent.com/ecmwf/WeatherGenerator/refs/heads/develop/stac/jsons/catalogue.json", + "rel": "self", + "type": "application/json" + }, + { + "href": "https://raw.githubusercontent.com/ecmwf/WeatherGenerator/refs/heads/develop/stac/jsons/era5v8.json", + "rel": "child", + "title": "ERA5", + "type": "application/json" + }, + { + "href": "https://raw.githubusercontent.com/ecmwf/WeatherGenerator/refs/heads/develop/stac/jsons/opera.json", + "rel": "child", + "title": "OPERA", + "type": "application/json" + }, + { + "href": "https://raw.githubusercontent.com/ecmwf/WeatherGenerator/refs/heads/develop/stac/jsons/cerra.json", + "rel": "child", + "title": "CERRA", + "type": "application/json" + }, + { + "href": "https://raw.githubusercontent.com/ecmwf/WeatherGenerator/refs/heads/develop/stac/jsons/seviri.json", + "rel": "child", + "title": "SEVIRI", + "type": "application/json" + }, + { + "href": "https://raw.githubusercontent.com/ecmwf/WeatherGenerator/refs/heads/develop/stac/jsons/imerg.json", + "rel": "child", + "title": "IMERG", + "type": "application/json" + }, + { + "href": "https://raw.githubusercontent.com/ecmwf/WeatherGenerator/refs/heads/develop/stac/jsons/npp-atms.json", + "rel": "child", + "title": "NPP-ATMS", + "type": "application/json" + }, + { + "href": "https://raw.githubusercontent.com/ecmwf/WeatherGenerator/refs/heads/develop/stac/jsons/synop.json", + "rel": "child", + "title": "SYNOP", + "type": "application/json" + }, + { + "href": "https://raw.githubusercontent.com/ecmwf/WeatherGenerator/refs/heads/develop/stac/jsons/metopa.json", + "rel": "child", + "title": "Metop-A, MHS", + "type": "application/json" + }, + { + "href": "https://raw.githubusercontent.com/ecmwf/WeatherGenerator/refs/heads/develop/stac/jsons/metopb.json", + "rel": "child", + "title": "Metop-B, MHS", + "type": "application/json" + }, + { + "href": "https://raw.githubusercontent.com/ecmwf/WeatherGenerator/refs/heads/develop/stac/jsons/fy3a.json", + "rel": "child", + "title": "FY-3A, MWHS", + "type": "application/json" + }, + { + "href": "https://raw.githubusercontent.com/ecmwf/WeatherGenerator/refs/heads/develop/stac/jsons/fy3b.json", + "rel": "child", + "title": "FY-3B, MWHS", + "type": "application/json" + }, + { + "href": "https://raw.githubusercontent.com/ecmwf/WeatherGenerator/refs/heads/develop/stac/jsons/fy3c.json", + "rel": "child", + "title": "FY-3C, MWHS", + "type": "application/json" + }, + { + "href": "https://raw.githubusercontent.com/ecmwf/WeatherGenerator/refs/heads/develop/stac/jsons/abigoes.json", + "rel": "child", + "title": "ABI-GOES16", + "type": "application/json" + }, + { + "href": "https://raw.githubusercontent.com/ecmwf/WeatherGenerator/refs/heads/develop/stac/jsons/ifs-fesom_atmos.json", + "rel": "child", + "title": "AWI IFS-FESOM Coupled Climate Model (Atmos)", + "type": "application/json" + }, + { + "href": "https://raw.githubusercontent.com/ecmwf/WeatherGenerator/refs/heads/develop/stac/jsons/ifs-fesom_ocean_elem.json", + "rel": "child", + "title": "AWI IFS-FESOM Coupled Climate Model (Ocean element)", + "type": "application/json" + }, + { + "href": "https://raw.githubusercontent.com/ecmwf/WeatherGenerator/refs/heads/develop/stac/jsons/ifs-fesom_ocean_node.json", + "rel": "child", + "title": "AWI IFS-FESOM Coupled Climate Model (Ocean node)", + "type": "application/json" + } + ], + "stac_extensions": [ + "https://stac-extensions.github.io/datacube/v2.2.0/schema.json", + "https://stac-extensions.github.io/alternate-assets/v1.2.0/schema.json" + ], + "stac_version": "1.0.0", + "title": "The WeatherGenerator data catalogue", + "type": "Catalog" +} diff --git a/stac/jsons/cerra.json b/stac/jsons/cerra.json new file mode 100644 index 000000000..06a9d8f05 --- /dev/null +++ b/stac/jsons/cerra.json @@ -0,0 +1,794 @@ +{ + "assets": { + "cerra-rr-an-oper-0001-mars-5p5km-1984-2020-6h-v2-hmsi.zarr": { + "description": "Anemoi dataset", + "href": "cerra-rr-an-oper-0001-mars-5p5km-1984-2020-6h-v2-hmsi.zarr", + "inodes": "53,192", + "locations": [ + "hpc2020", + "European Weather Cloud", + "marenostrum5", + "leonardo" + ], + "roles": [ + "data" + ], + "size": "9 TB", + "title": "cerra-rr-an-oper-0001-mars-5p5km-1984-2020-6h-v2-hmsi.zarr", + "type": "application/vnd+zarr" + } + }, + "bbox": [ + -15, + 20, + 32, + 60 + ], + "geometry": { + "coordinates": [ + [ + [ + -15, + 20 + ], + [ + -15, + 60 + ], + [ + 32, + 60 + ], + [ + 32, + 20 + ], + [ + -15, + 20 + ] + ] + ], + "type": "Polygon" + }, + "id": "weathergen.atmo.CERRA", + "properties": { + "description": "The Copernicus European Regional Reanalysis (CERRA), developed under the Copernicus Climate Change Service (C3S), provides a high-resolution reanalysis of atmospheric conditions over Europe. Covering the period from 1984 onward, CERRA delivers hourly data at a spatial resolution of 5.5 km, capturing fine-scale climate and weather patterns with improved detail compared to global reanalyses", + "end_datetime": "2020-12-31T18:00:00", + "fixed_timesteps": "True", + "frequency": "3h", + "keywords": [ + "europe", + "copernicus", + "atmosphere", + "reanalysis" + ], + "name": "CERRA", + "processing_level": "NA", + "providers": [ + { + "name": "Copernicus", + "roles": [ + "provider" + ], + "url": "https://copernicus.eu" + } + ], + "start_datetime": "1984-09-01T06:00:00", + "title": "CERRA", + "unique_id": "2", + "variables": { + "10si_10": { + "max": 52.802784000000003, + "mean": 5.3175967100000001, + "min": 5.6239418899999998e-06, + "std": 3.87283445, + "tendency_mean": 3.7335917599999999e-05, + "tendency_std": 2.0768546300000001 + }, + "10wdir_10": { + "max": 360.00363199999998, + "mean": 182.89016000000001, + "min": 1.27694761e-08, + "std": 108.69388499999999, + "tendency_mean": 0.00015300388000000001, + "tendency_std": 101.122612 + }, + "2t_2": { + "max": 324.44674700000002, + "mean": 284.19674300000003, + "min": 202.712051, + "std": 11.8319326, + "tendency_mean": -0.00015768235600000001, + "tendency_std": 4.1949758700000004 + }, + "al": { + "max": 0.84985792599999999, + "mean": 0.277168156, + "min": 0.052082788200000001, + "std": 0.15610863, + "tendency_mean": 1.1036191900000001e-06, + "tendency_std": 0.016401374100000001 + }, + "cos_julian_day": { + "max": 1, + "mean": 0.0041912836299999997, + "min": -0.99999767500000003, + "std": 0.706383117, + "tendency_mean": 3.0570715100000001e-05, + "tendency_std": 0.00304392616 + }, + "cos_latitude": { + "max": 0.93793565000000001, + "mean": 0.65440285600000003, + "min": 0.25296804299999998, + "std": 0.18253498200000001, + "tendency_mean": 0, + "tendency_std": 0 + }, + "cos_local_time": { + "max": 1, + "mean": -1.8381493000000001e-05, + "min": -1, + "std": 0.70710215600000004, + "tendency_mean": -1.5798793300000001e-05, + "tendency_std": 0.99999811299999997 + }, + "cos_longitude": { + "max": 1, + "mean": 0.89516032899999998, + "min": 0.27387392500000002, + "std": 0.120628899, + "tendency_mean": 0, + "tendency_std": 0 + }, + "cos_solar_zenith_angle": { + "max": 0.99994993200000004, + "mean": 0.21295530400000001, + "min": 0, + "std": 0.28887424499999997, + "tendency_mean": 1.2754348500000001e-06, + "tendency_std": 0.400329659 + }, + "lsm": { + "max": 1, + "mean": 0.52814707100000002, + "min": 0, + "std": 0.49218735400000002, + "tendency_mean": 0, + "tendency_std": 0 + }, + "msl_0": { + "max": 106743.219, + "mean": 101390.159, + "min": 91523.539099999995, + "std": 1049.5981400000001, + "tendency_mean": 0.0041830688599999999, + "tendency_std": 239.40753599999999 + }, + "orog": { + "max": 4001.3596200000002, + "mean": 236.831323, + "min": -411.01538099999999, + "std": 444.18457599999999, + "tendency_mean": 0, + "tendency_std": 0 + }, + "r_100": { + "max": 100.070312, + "mean": 7.6524945999999998, + "min": 0, + "std": 9.1475196899999993, + "tendency_mean": -5.0798742600000003e-05, + "tendency_std": 3.4270484099999998 + }, + "r_1000": { + "max": 100.378998, + "mean": 68.847408400000006, + "min": 0, + "std": 24.363773699999999, + "tendency_mean": -9.4505342800000006e-05, + "tendency_std": 12.588672300000001 + }, + "r_150": { + "max": 100.111328, + "mean": 11.504305199999999, + "min": 0, + "std": 15.379339, + "tendency_mean": -8.0482869000000003e-05, + "tendency_std": 6.2332247799999996 + }, + "r_200": { + "max": 100.169922, + "mean": 30.435823299999999, + "min": 0, + "std": 29.950786799999999, + "tendency_mean": -7.5801379500000003e-05, + "tendency_std": 15.1787961 + }, + "r_250": { + "max": 100.207031, + "mean": 48.526095400000003, + "min": 0, + "std": 32.956231000000002, + "tendency_mean": -0.00024464975700000001, + "tendency_std": 21.177760200000002 + }, + "r_300": { + "max": 100.232422, + "mean": 54.2775885, + "min": 0, + "std": 32.1014008, + "tendency_mean": 3.2262991499999999e-05, + "tendency_std": 24.899210700000001 + }, + "r_400": { + "max": 100.261719, + "mean": 50.689377899999997, + "min": 0, + "std": 31.9738902, + "tendency_mean": 0.00030539225599999999, + "tendency_std": 26.746146800000002 + }, + "r_50": { + "max": 100.009766, + "mean": 3.24156859, + "min": 0, + "std": 4.8024093099999998, + "tendency_mean": 7.4640353899999994e-05, + "tendency_std": 1.61529075 + }, + "r_500": { + "max": 100.271484, + "mean": 46.350677300000001, + "min": 0, + "std": 31.467841, + "tendency_mean": 0.00022713606099999999, + "tendency_std": 25.610529700000001 + }, + "r_600": { + "max": 100.214844, + "mean": 45.330855800000002, + "min": 0, + "std": 30.7615251, + "tendency_mean": 0.00013657707199999999, + "tendency_std": 24.022820800000002 + }, + "r_700": { + "max": 100.298828, + "mean": 47.5729489, + "min": 0, + "std": 30.229604899999998, + "tendency_mean": 3.5320238500000003e-05, + "tendency_std": 21.955408299999998 + }, + "r_850": { + "max": 100.373047, + "mean": 59.787480199999997, + "min": 0, + "std": 28.772984099999999, + "tendency_mean": 2.5439660400000002e-06, + "tendency_std": 16.811092599999998 + }, + "r_925": { + "max": 100.378601, + "mean": 66.039494399999995, + "min": 0, + "std": 27.205127900000001, + "tendency_mean": 2.5782706100000001e-05, + "tendency_std": 13.3198475 + }, + "rsn": { + "max": 300.00195300000001, + "mean": 254.93422899999999, + "min": 100.133286, + "std": 59.308215300000001, + "tendency_mean": 0.87062393299999996, + "tendency_std": 6.0380466699999999 + }, + "sde": { + "max": 126.113281, + "mean": 0.29423822399999999, + "min": 0, + "std": 1.5239338099999999, + "tendency_mean": 2.9475915099999999e-06, + "tendency_std": 0.020996789599999999 + }, + "sf": { + "max": 115.117188, + "mean": 0.078806177399999996, + "min": 0, + "std": 0.53285960200000004, + "tendency_mean": 2.0521540299999999e-06, + "tendency_std": 0.46077853899999999 + }, + "sin_julian_day": { + "max": 0.99999940399999998, + "mean": -0.0071129263499999998, + "min": -0.99999940399999998, + "std": 0.707781522, + "tendency_mean": 1.7650183999999999e-05, + "tendency_std": 0.0030440187499999999 + }, + "sin_latitude": { + "max": 0.96747463899999997, + "mean": 0.71447687900000001, + "min": 0.34680929799999999, + "std": 0.16721412099999999, + "tendency_mean": 0, + "tendency_std": 0 + }, + "sin_local_time": { + "max": 1, + "mean": -2.5833502100000001e-06, + "min": -1, + "std": 0.70711132899999996, + "tendency_mean": -2.09657059e-05, + "tendency_std": 1.00000183 + }, + "sin_longitude": { + "max": 0.96176558700000003, + "mean": 0.12580657200000001, + "min": -0.84901857400000003, + "std": 0.41025510599999998, + "tendency_mean": 0, + "tendency_std": 0 + }, + "skt": { + "max": 345.04361, + "mean": 285.01406800000001, + "min": 197.41272000000001, + "std": 13.043481999999999, + "tendency_mean": -0.00013545283000000001, + "tendency_std": 7.5492940300000004 + }, + "sp": { + "max": 108649.039, + "mean": 98666.097399999999, + "min": 59310.847699999998, + "std": 5028.0907399999996, + "tendency_mean": 0.0022480789700000002, + "tendency_std": 228.06795199999999 + }, + "t_100": { + "max": 241.00592, + "mean": 212.625405, + "min": 183.557007, + "std": 7.7993835000000002, + "tendency_mean": -3.98069811e-05, + "tendency_std": 1.0392737599999999 + }, + "t_1000": { + "max": 322.72375499999998, + "mean": 285.54027600000001, + "min": 224.763779, + "std": 11.343139300000001, + "tendency_mean": -0.00017757844900000001, + "tendency_std": 2.6040265300000001 + }, + "t_150": { + "max": 244.25904800000001, + "mean": 215.98793599999999, + "min": 186.210373, + "std": 6.3682475199999997, + "tendency_mean": -1.6656223899999999e-05, + "tendency_std": 1.2925127300000001 + }, + "t_200": { + "max": 247.739609, + "mean": 217.948093, + "min": 189.31210300000001, + "std": 6.19094897, + "tendency_mean": -7.6768938199999997e-05, + "tendency_std": 1.84726254 + }, + "t_250": { + "max": 247.744934, + "mean": 221.59674999999999, + "min": 192.90748600000001, + "std": 5.9960495299999996, + "tendency_mean": -0.00010674933, + "tendency_std": 1.6653471200000001 + }, + "t_300": { + "max": 252.01211499999999, + "mean": 228.07882900000001, + "min": 197.630112, + "std": 7.0061161099999998, + "tendency_mean": -0.00016242700700000001, + "tendency_std": 1.3433040999999999 + }, + "t_400": { + "max": 266.41271999999998, + "mean": 242.18318500000001, + "min": 211.168442, + "std": 8.3248681999999992, + "tendency_mean": -0.00020902023600000001, + "tendency_std": 1.4609569099999999 + }, + "t_50": { + "max": 255.196594, + "mean": 213.49439699999999, + "min": 178.60240200000001, + "std": 6.7594463899999999, + "tendency_mean": -0.00017574466400000001, + "tendency_std": 1.1358536800000001 + }, + "t_500": { + "max": 277.65664700000002, + "mean": 253.74952099999999, + "min": 217.51615899999999, + "std": 8.6098696300000004, + "tendency_mean": -0.00020360945499999999, + "tendency_std": 1.4868025600000001 + }, + "t_600": { + "max": 285.74243200000001, + "mean": 262.79004300000003, + "min": 220.50700399999999, + "std": 8.8049586899999994, + "tendency_mean": -0.00019013026800000001, + "tendency_std": 1.39619037 + }, + "t_700": { + "max": 293.58212300000002, + "mean": 270.02826599999997, + "min": 217.64820900000001, + "std": 9.3441051999999996, + "tendency_mean": -0.00018279457599999999, + "tendency_std": 1.344241 + }, + "t_850": { + "max": 310.35400399999997, + "mean": 278.38052900000002, + "min": 227.57612599999999, + "std": 10.5441258, + "tendency_mean": -0.00018701069200000001, + "tendency_std": 1.55029113 + }, + "t_925": { + "max": 317.67141700000002, + "mean": 281.99432300000001, + "min": 226.39804100000001, + "std": 11.080340100000001, + "tendency_mean": -0.00018734042399999999, + "tendency_std": 1.9640287999999999 + }, + "tciwv_0": { + "max": 70.175804099999993, + "mean": 14.5715313, + "min": -1.0585291400000001, + "std": 7.8497021800000004, + "tendency_mean": -0.00018202007200000001, + "tendency_std": 2.8375046899999998 + }, + "tp": { + "max": 306.929688, + "mean": 0.49854976099999998, + "min": 0, + "std": 1.56107664, + "tendency_mean": 6.4939358900000002e-06, + "tendency_std": 1.5805699099999999 + }, + "u_100": { + "max": 75.646759000000003, + "mean": 11.017252300000001, + "min": -42.920074499999998, + "std": 9.7498440599999991, + "tendency_mean": 0.000238088364, + "tendency_std": 2.7352149400000001 + }, + "u_1000": { + "max": 46.818801899999997, + "mean": 0.49507715600000002, + "min": -50.9635544, + "std": 5.3934638399999999, + "tendency_mean": 2.27066292e-05, + "tendency_std": 2.7920494200000001 + }, + "u_150": { + "max": 95.392791700000004, + "mean": 14.8218923, + "min": -47.731502499999998, + "std": 12.555163, + "tendency_mean": 0.000170577506, + "tendency_std": 3.1468868400000001 + }, + "u_200": { + "max": 104.401718, + "mean": 15.9973408, + "min": -61.821666700000002, + "std": 15.2662887, + "tendency_mean": 0.000146354452, + "tendency_std": 4.5254725799999997 + }, + "u_250": { + "max": 122.580872, + "mean": 15.2368319, + "min": -74.381637600000005, + "std": 16.439691, + "tendency_mean": 0.000139709981, + "tendency_std": 6.0905664000000002 + }, + "u_300": { + "max": 122.861374, + "mean": 13.5926784, + "min": -72.479377700000001, + "std": 15.832105800000001, + "tendency_mean": 0.000124773021, + "tendency_std": 6.5596256000000004 + }, + "u_400": { + "max": 102.492699, + "mean": 10.570305100000001, + "min": -68.611541700000004, + "std": 12.9832669, + "tendency_mean": 8.6891437200000004e-05, + "tendency_std": 5.5276071399999998 + }, + "u_50": { + "max": 90.208953899999997, + "mean": 5.0872464600000002, + "min": -43.219444299999999, + "std": 10.972227699999999, + "tendency_mean": 0.00035599683699999999, + "tendency_std": 2.8408335400000002 + }, + "u_500": { + "max": 85.962005599999998, + "mean": 8.3290586599999994, + "min": -56.915290800000001, + "std": 10.611242600000001, + "tendency_mean": 6.0790521399999997e-05, + "tendency_std": 4.3233595200000003 + }, + "u_600": { + "max": 75.445190400000001, + "mean": 6.5370667300000003, + "min": -55.551620499999999, + "std": 9.0170195599999996, + "tendency_mean": 3.8396511700000002e-05, + "tendency_std": 3.6405750600000002 + }, + "u_700": { + "max": 62.936683700000003, + "mean": 4.8409100299999999, + "min": -66.263404800000004, + "std": 7.9698805799999999, + "tendency_mean": 3.1026220499999997e-05, + "tendency_std": 3.3514639900000001 + }, + "u_850": { + "max": 60.701660199999999, + "mean": 2.3913615799999999, + "min": -69.202484100000007, + "std": 7.2766142299999998, + "tendency_mean": 4.5526567999999998e-05, + "tendency_std": 3.4343297599999998 + }, + "u_925": { + "max": 58.106208799999997, + "mean": 1.2745901500000001, + "min": -63.512340500000001, + "std": 7.2076213100000004, + "tendency_mean": 4.2689505399999999e-05, + "tendency_std": 3.6090987299999999 + }, + "v_100": { + "max": 58.486339600000001, + "mean": 0.33703578000000001, + "min": -64.596298200000007, + "std": 8.1776344400000003, + "tendency_mean": 3.9449744900000001e-05, + "tendency_std": 2.95625245 + }, + "v_1000": { + "max": 43.191776300000001, + "mean": -0.37845869399999998, + "min": -44.299976299999997, + "std": 5.1314651800000002, + "tendency_mean": 8.5534671900000001e-06, + "tendency_std": 2.9791869599999998 + }, + "v_150": { + "max": 69.895523100000005, + "mean": 0.39326430000000001, + "min": -76.590026899999998, + "std": 10.9739614, + "tendency_mean": 3.9014081200000001e-05, + "tendency_std": 3.4932232600000002 + }, + "v_200": { + "max": 84.528190600000002, + "mean": -0.075963676999999993, + "min": -101.85231, + "std": 14.220357999999999, + "tendency_mean": 4.8648691800000002e-05, + "tendency_std": 5.2349334299999999 + }, + "v_250": { + "max": 90.072357199999999, + "mean": -0.26028902199999998, + "min": -115.096771, + "std": 16.190533599999998, + "tendency_mean": 4.5752832899999999e-05, + "tendency_std": 7.1854345000000004 + }, + "v_300": { + "max": 86.2032928, + "mean": -0.20812677600000001, + "min": -105.506874, + "std": 15.8914613, + "tendency_mean": 4.3722200099999998e-05, + "tendency_std": 7.7427104099999999 + }, + "v_400": { + "max": 86.283691399999995, + "mean": -0.114076301, + "min": -94.46978, + "std": 13.01262, + "tendency_mean": 3.8726617000000003e-05, + "tendency_std": 6.4682306799999996 + }, + "v_50": { + "max": 66.838172900000004, + "mean": -0.54391903799999997, + "min": -67.956939700000007, + "std": 6.83899013, + "tendency_mean": 2.36740288e-05, + "tendency_std": 3.1054957600000002 + }, + "v_500": { + "max": 71.250122099999999, + "mean": -0.0506507083, + "min": -80.867630000000005, + "std": 10.4819549, + "tendency_mean": 2.15531982e-05, + "tendency_std": 4.9593611700000002 + }, + "v_600": { + "max": 60.594440499999997, + "mean": 0.0040386865100000002, + "min": -66.203002900000001, + "std": 8.7697239000000007, + "tendency_mean": 1.12621165e-05, + "tendency_std": 4.0530562699999999 + }, + "v_700": { + "max": 65.292716999999996, + "mean": -0.00499432825, + "min": -58.758335099999996, + "std": 7.5542199200000004, + "tendency_mean": 7.8042148299999995e-06, + "tendency_std": 3.6223472600000002 + }, + "v_850": { + "max": 72.592910799999999, + "mean": -0.16088306099999999, + "min": -62.238296499999997, + "std": 6.6025043200000004, + "tendency_mean": 2.0345844699999999e-05, + "tendency_std": 3.6443865899999999 + }, + "v_925": { + "max": 59.066757199999998, + "mean": -0.28350726300000001, + "min": -61.701419799999996, + "std": 6.6181699700000003, + "tendency_mean": 3.0264160000000001e-05, + "tendency_std": 3.8229006000000001 + }, + "z_100": { + "max": 166283.28099999999, + "mean": 158887.15700000001, + "min": 143262.25, + "std": 3404.4652099999998, + "tendency_mean": -0.082858693400000002, + "tendency_std": 180.578498 + }, + "z_1000": { + "max": 4819.1386700000003, + "mean": 1126.81738, + "min": -7098.4296899999999, + "std": 837.777559, + "tendency_mean": 0.0029038624999999998, + "tendency_std": 189.93202600000001 + }, + "z_150": { + "max": 142371.54699999999, + "mean": 133961.29699999999, + "min": 119476.25, + "std": 3608.4684499999998, + "tendency_mean": -0.080871565199999995, + "tendency_std": 211.98136600000001 + }, + "z_200": { + "max": 124159.75, + "mean": 116038.921, + "min": 102385.18799999999, + "std": 3679.8864699999999, + "tendency_mean": -0.077164039099999998, + "tendency_std": 271.02844499999998 + }, + "z_250": { + "max": 109216.508, + "mean": 101985.45600000001, + "min": 89070.710900000005, + "std": 3563.9812499999998, + "tendency_mean": -0.071237014500000001, + "tendency_std": 326.27028000000001 + }, + "z_300": { + "max": 96577.867199999993, + "mean": 90231.363400000002, + "min": 77886.5625, + "std": 3306.6962899999999, + "tendency_mean": -0.064269896300000004, + "tendency_std": 338.56844000000001 + }, + "z_400": { + "max": 75789.804699999993, + "mean": 70829.373200000002, + "min": 59646.925799999997, + "std": 2714.7270699999999, + "tendency_mean": -0.048471916599999998, + "tendency_std": 290.77705300000002 + }, + "z_50": { + "max": 208567.65599999999, + "mean": 201164.886, + "min": 182196.641, + "std": 3751.9689899999998, + "tendency_mean": -0.105326459, + "tendency_std": 192.10885300000001 + }, + "z_500": { + "max": 59125.8125, + "mean": 54939.618999999999, + "min": 45120.265599999999, + "std": 2205.1184499999999, + "tendency_mean": -0.035024242499999997, + "tendency_std": 233.160327 + }, + "z_600": { + "max": 44916.929700000001, + "mean": 41413.027800000003, + "min": 32110.0137, + "std": 1786.2426, + "tendency_mean": -0.024644336700000002, + "tendency_std": 193.620113 + }, + "z_700": { + "max": 32583.123, + "mean": 29610.9251, + "min": 20695.539100000002, + "std": 1431.5929000000001, + "tendency_mean": -0.016261857399999999, + "tendency_std": 172.71374700000001 + }, + "z_850": { + "max": 17218.956999999999, + "mean": 14298.571400000001, + "min": 5832.8613299999997, + "std": 1013.90711, + "tendency_mean": -0.0057689257499999999, + "tendency_std": 169.112109 + }, + "z_925": { + "max": 10643.168, + "mean": 7481.70813, + "min": -844.32794200000001, + "std": 887.88284199999998, + "tendency_mean": -0.00102279581, + "tendency_std": 177.38832099999999 + } + } + }, + "stac_extensions": [ + "https://stac-extensions.github.io/datacube/v2.2.0/schema.json", + "https://stac-extensions.github.io/alternate-assets/v1.2.0/schema.json", + "https://stac-extensions.github.io/xarray-assets/v1.0.0/schema.json" + ], + "stac_version": "1.0.0", + "type": "Feature" +} diff --git a/stac/jsons/era5v8.json b/stac/jsons/era5v8.json new file mode 100644 index 000000000..61096500d --- /dev/null +++ b/stac/jsons/era5v8.json @@ -0,0 +1,899 @@ +{ + "assets": { + "aifs-ea-an-oper-0001-mars-o96-1979-2023-6h-v8.zarr": { + "description": "ERA5 data on O96 healPix grid. version 8. Contains tendencies", + "href": "aifs-ea-an-oper-0001-mars-o96-1979-2023-6h-v8.zarr", + "inodes": "65,863", + "locations": [ + "hpc2020", + "European Weather Cloud", + "juwels_booster", + "marenostrum5", + "leonardo" + ], + "roles": [ + "data" + ], + "size": "593 GB", + "title": "aifs-ea-an-oper-0001-mars-o96-1979-2023-6h-v8.zarr", + "type": "application/vnd+zarr" + } + }, + "bbox": [ + -180, + -90, + 180, + 90 + ], + "geometry": { + "coordinates": [ + [ + [ + -180, + -90 + ], + [ + -180, + 90 + ], + [ + 180, + 90 + ], + [ + 180, + -90 + ], + [ + -180, + -90 + ] + ] + ], + "type": "Polygon" + }, + "id": "weathergen.atmo.ERA5v8", + "properties": { + "description": "ERA5 is a reanalysis dataset produced by ECMWF, providing hourly estimates of a large number of atmospheric, land, and oceanic climate variables.", + "end_datetime": "2023-12-31T18:00:00", + "fixed_timesteps": "True", + "frequency": "6h", + "keywords": [ + "ERA5", + "global", + "atmosphere", + "reanalysis" + ], + "name": "ERA5v8", + "processing_level": "NA", + "providers": [ + { + "name": "ECMWF", + "roles": [ + "provider" + ], + "url": "https://www.ecmwf.int/" + } + ], + "start_datetime": "1979-01-01T00:00:00", + "title": "ERA5", + "unique_id": "1", + "variables": { + "10u": { + "max": 37.293029799999999, + "mean": -0.56074947500000005, + "min": -44.939437900000001, + "std": 5.3987135200000003, + "tendency_mean": 6.4502004799999995e-07, + "tendency_std": 2.1354071100000001 + }, + "10v": { + "max": 86.347732500000006, + "mean": 0.216951956, + "min": -44.315322899999998, + "std": 4.4687826800000003, + "tendency_mean": -3.91051211e-07, + "tendency_std": 2.3981342400000001 + }, + "2d": { + "max": 306.45245399999999, + "mean": 283.074613, + "min": 186.62681599999999, + "std": 15.311397599999999, + "tendency_mean": 9.1301524099999994e-06, + "tendency_std": 1.6949597999999999 + }, + "2t": { + "max": 325.73410000000001, + "mean": 288.06091099999998, + "min": 189.90832499999999, + "std": 15.5242635, + "tendency_mean": 1.7358923099999999e-05, + "tendency_std": 2.7789657000000001 + }, + "cos_julian_day": { + "max": 1, + "mean": 3.2588314600000002e-05, + "min": -0.99999767500000003, + "std": 0.70711829900000001, + "tendency_mean": -6.0320526099999995e-10, + "tendency_std": 0.0030409606099999998 + }, + "cos_latitude": { + "max": 0.99996668099999997, + "mean": 0.79662194600000003, + "min": 0.0124922609, + "std": 0.22745343700000001, + "tendency_mean": 0, + "tendency_std": 0 + }, + "cos_local_time": { + "max": 1, + "mean": 0, + "min": -1, + "std": 0.70710678100000002, + "tendency_mean": -2.6210897499999999e-14, + "tendency_std": 1 + }, + "cos_longitude": { + "max": 1, + "mean": 1.6083793e-09, + "min": -1, + "std": 0.70710678100000002, + "tendency_mean": 0, + "tendency_std": 0 + }, + "cp": { + "max": 0.118124008, + "mean": 0.00041876350599999999, + "min": 0, + "std": 0.0012941074, + "tendency_mean": 7.2381894500000004e-10, + "tendency_std": 0.0013488552400000001 + }, + "insolation": { + "max": 1, + "mean": 0.25328790099999998, + "min": 0, + "std": 0.32597986899999998, + "tendency_mean": 6.8558312400000002e-11, + "tendency_std": 0.46838193 + }, + "lsm": { + "max": 1, + "mean": 0.28403935499999999, + "min": 0, + "std": 0.440217572, + "tendency_mean": 0, + "tendency_std": 0 + }, + "msl": { + "max": 107650.75, + "mean": 101148.72500000001, + "min": 91123.5625, + "std": 1080.1919, + "tendency_mean": 0.00053447879399999999, + "tendency_std": 251.70618400000001 + }, + "q_100": { + "max": 5.6691515099999997e-05, + "mean": 2.6807591899999998e-06, + "min": -0.000117661999, + "std": 6.2790892100000002e-07, + "tendency_mean": 3.77797237e-13, + "tendency_std": 1.9462075399999999e-07 + }, + "q_1000": { + "max": 0.031682185799999998, + "mean": 0.0097633646599999992, + "min": 9.9999972699999998e-09, + "std": 0.00585692778, + "tendency_mean": -1.27874185e-09, + "tendency_std": 0.000852648219 + }, + "q_150": { + "max": 0.000103373837, + "mean": 6.6869996800000002e-06, + "min": -9.8826028999999998e-05, + "std": 4.1853891700000001e-06, + "tendency_mean": 1.2518021199999999e-12, + "tendency_std": 1.8380011200000001e-06 + }, + "q_200": { + "max": 0.00081593002, + "mean": 2.7420352400000001e-05, + "min": -8.8366214199999996e-05, + "std": 2.5758731700000001e-05, + "tendency_mean": 1.11469638e-11, + "tendency_std": 1.2318332099999999e-05 + }, + "q_250": { + "max": 0.0021009224, + "mean": 8.16892759e-05, + "min": -0.00024811178399999998, + "std": 8.6241125800000005e-05, + "tendency_mean": 8.6609997000000003e-11, + "tendency_std": 4.2764744400000001e-05 + }, + "q_300": { + "max": 0.0035953393700000002, + "mean": 0.000177448841, + "min": -0.000164085635, + "std": 0.000196058764, + "tendency_mean": 2.53691253e-10, + "tendency_std": 8.9866241799999995e-05 + }, + "q_400": { + "max": 0.0067789703599999999, + "mean": 0.00052421597599999997, + "min": -8.2219601599999994e-05, + "std": 0.00059396949700000004, + "tendency_mean": 1.1347574899999999e-09, + "tendency_std": 0.00025286378399999999 + }, + "q_50": { + "max": 8.3269296699999993e-06, + "mean": 2.6714215200000001e-06, + "min": 1.04556477e-07, + "std": 2.6213824599999998e-07, + "tendency_mean": -7.9652745200000002e-13, + "tendency_std": 3.6286434200000001e-08 + }, + "q_500": { + "max": 0.0103576845, + "mean": 0.0011593095400000001, + "min": -5.47191739e-05, + "std": 0.00126020895, + "tendency_mean": 2.1939599800000001e-09, + "tendency_std": 0.00049019319599999996 + }, + "q_600": { + "max": 0.016927827100000001, + "mean": 0.0021006952499999999, + "min": 9.9999937199999995e-09, + "std": 0.0020190143699999998, + "tendency_mean": 1.5357680800000001e-09, + "tendency_std": 0.00072442851800000004 + }, + "q_700": { + "max": 0.018530968599999999, + "mean": 0.0033263929400000002, + "min": 7.2579027199999996e-09, + "std": 0.0028303756000000002, + "tendency_mean": 3.4540313299999998e-10, + "tendency_std": 0.0009923471289999999 + }, + "q_850": { + "max": 0.023186840100000002, + "mean": 0.0063004540600000002, + "min": 9.9999937199999995e-09, + "std": 0.0042812035099999996, + "tendency_mean": 3.2802938e-09, + "tendency_std": 0.0013133172100000001 + }, + "q_925": { + "max": 0.027008269000000001, + "mean": 0.0083513232999999996, + "min": 9.9999972699999998e-09, + "std": 0.00506488497, + "tendency_mean": 2.0915614000000002e-09, + "tendency_std": 0.00108039469 + }, + "sdor": { + "max": 880.14910899999995, + "mean": 20.725032599999999, + "min": 0, + "std": 61.710795099999999, + "tendency_mean": 0, + "tendency_std": 0 + }, + "sin_julian_day": { + "max": 0.99999940399999998, + "mean": 4.4853185800000001e-06, + "min": -0.99999940399999998, + "std": 0.707095264, + "tendency_mean": 1.4016969999999999e-07, + "tendency_std": 0.0030470993200000002 + }, + "sin_latitude": { + "max": 0.99992197800000004, + "mean": -2.4220300099999999e-08, + "min": -0.99992197800000004, + "std": 0.56005214800000003, + "tendency_mean": 0, + "tendency_std": 0 + }, + "sin_local_time": { + "max": 1, + "mean": 0, + "min": -1, + "std": 0.70710678100000002, + "tendency_mean": -5.0879977500000001e-14, + "tendency_std": 1 + }, + "sin_longitude": { + "max": 1, + "mean": -1.56107403e-09, + "min": -1, + "std": 0.70710678100000002, + "tendency_mean": 0, + "tendency_std": 0 + }, + "skt": { + "max": 345.493652, + "mean": 288.84737999999999, + "min": 188.41850299999999, + "std": 16.39059, + "tendency_mean": 1.6691701699999999e-05, + "tendency_std": 4.7307539800000002 + }, + "slor": { + "max": 0.145110607, + "mean": 0.00342588576, + "min": 9.9999975599999999e-05, + "std": 0.010003946, + "tendency_mean": 0, + "tendency_std": 0 + }, + "sp": { + "max": 106965.758, + "mean": 98562.907399999996, + "min": 49538.949200000003, + "std": 6802.3130899999996, + "tendency_mean": 0.00067656068099999998, + "tendency_std": 237.874619 + }, + "t_100": { + "max": 248.84127799999999, + "mean": 204.00815, + "min": 171.877274, + "std": 11.4263171, + "tendency_mean": -1.0714072699999999e-05, + "tendency_std": 1.1596995299999999 + }, + "t_1000": { + "max": 326.94561800000002, + "mean": 289.00044000000003, + "min": 213.323837, + "std": 13.409414699999999, + "tendency_mean": 1.2282676600000001e-05, + "tendency_std": 1.8524012299999999 + }, + "t_150": { + "max": 242.687973, + "mean": 211.04308800000001, + "min": 177.81835899999999, + "std": 7.3770069400000002, + "tendency_mean": -3.51486749e-06, + "tendency_std": 1.1011531000000001 + }, + "t_200": { + "max": 243.51191700000001, + "mean": 218.624965, + "min": 183.67652899999999, + "std": 5.20307297, + "tendency_mean": -2.8315422599999999e-06, + "tendency_std": 1.42252252 + }, + "t_250": { + "max": 248.82647700000001, + "mean": 226.07788199999999, + "min": 191.708237, + "std": 7.2639925999999999, + "tendency_mean": 1.71752648e-06, + "tendency_std": 1.336902 + }, + "t_300": { + "max": 258.807434, + "mean": 233.90135699999999, + "min": 197.768753, + "std": 9.3689952200000004, + "tendency_mean": 5.7797776000000002e-06, + "tendency_std": 1.12096427 + }, + "t_400": { + "max": 271.49237099999999, + "mean": 248.17968500000001, + "min": 208.88604699999999, + "std": 10.8657374, + "tendency_mean": 7.47303617e-06, + "tendency_std": 1.2142894099999999 + }, + "t_50": { + "max": 260.852417, + "mean": 211.07011600000001, + "min": 172.23249799999999, + "std": 7.4442996900000002, + "tendency_mean": -3.6452379700000001e-05, + "tendency_std": 1.4399984699999999 + }, + "t_500": { + "max": 285.38769500000001, + "mean": 259.12071400000002, + "min": 212.267517, + "std": 10.9151352, + "tendency_mean": 6.8351654300000004e-06, + "tendency_std": 1.26787362 + }, + "t_600": { + "max": 293.53213499999998, + "mean": 267.41478999999998, + "min": 199.46310399999999, + "std": 10.889709, + "tendency_mean": 7.3781313699999998e-06, + "tendency_std": 1.2424684800000001 + }, + "t_700": { + "max": 304.73049900000001, + "mean": 274.241964, + "min": 204.11537200000001, + "std": 11.5140449, + "tendency_mean": 8.1631809800000001e-06, + "tendency_std": 1.22117762 + }, + "t_850": { + "max": 315.52230800000001, + "mean": 281.74937599999998, + "min": 212.13665800000001, + "std": 12.353778200000001, + "tendency_mean": 5.1368297299999997e-06, + "tendency_std": 1.49259728 + }, + "t_925": { + "max": 321.81384300000002, + "mean": 284.769679, + "min": 215.69049100000001, + "std": 12.7668321, + "tendency_mean": 8.2182800599999998e-06, + "tendency_std": 1.6686812 + }, + "tcw": { + "max": 143.30860899999999, + "mean": 25.486033899999999, + "min": 0.035640176400000001, + "std": 17.159997000000001, + "tendency_mean": 1.0556105100000001e-05, + "tendency_std": 3.1778567199999999 + }, + "tp": { + "max": 0.35000610399999998, + "mean": 0.00074776161700000003, + "min": 0, + "std": 0.00235457201, + "tendency_mean": 9.6176185799999992e-10, + "tendency_std": 0.0023170118799999999 + }, + "u_100": { + "max": 85.496749899999998, + "mean": 9.3193560699999995, + "min": -65.809677100000002, + "std": 14.2119444, + "tendency_mean": -3.5253989800000001e-05, + "tendency_std": 2.8335277799999998 + }, + "u_1000": { + "max": 37.048248299999997, + "mean": -0.61008966499999995, + "min": -41.381591800000002, + "std": 6.0101256799999998, + "tendency_mean": 7.2268145100000003e-07, + "tendency_std": 2.24795232 + }, + "u_150": { + "max": 113.082031, + "mean": 13.9091506, + "min": -60.778442400000003, + "std": 17.4459859, + "tendency_mean": -4.81434987e-05, + "tendency_std": 3.3645549199999998 + }, + "u_200": { + "max": 122.95272799999999, + "mean": 14.7393783, + "min": -72.572769199999996, + "std": 18.834703900000001, + "tendency_mean": -3.8783616299999998e-05, + "tendency_std": 4.23715712 + }, + "u_250": { + "max": 126.730515, + "mean": 13.593199200000001, + "min": -81.648635900000002, + "std": 18.533656000000001, + "tendency_mean": -3.2851894799999999e-05, + "tendency_std": 5.0634266300000004 + }, + "u_300": { + "max": 119.99739099999999, + "mean": 11.789622, + "min": -86.054977399999999, + "std": 17.224090499999999, + "tendency_mean": -3.0894423900000001e-05, + "tendency_std": 5.25654164 + }, + "u_400": { + "max": 106.65005499999999, + "mean": 8.4439893300000008, + "min": -76.537963899999994, + "std": 14.2377588, + "tendency_mean": -1.9805466199999999e-05, + "tendency_std": 4.5396122200000004 + }, + "u_50": { + "max": 99.538619999999995, + "mean": 2.7806357899999998, + "min": -75.373519900000005, + "std": 14.6445933, + "tendency_mean": 1.45402531e-05, + "tendency_std": 2.74218728 + }, + "u_500": { + "max": 86.8177795, + "mean": 6.0036341000000002, + "min": -62.2264099, + "std": 11.9041239, + "tendency_mean": -7.6996532600000004e-06, + "tendency_std": 3.6565220900000002 + }, + "u_600": { + "max": 76.234512300000006, + "mean": 4.2019487499999997, + "min": -59.040878300000003, + "std": 10.2519572, + "tendency_mean": -4.6854763199999999e-06, + "tendency_std": 3.09672817 + }, + "u_700": { + "max": 63.8558655, + "mean": 2.7296153699999999, + "min": -71.268402100000003, + "std": 9.0770363799999991, + "tendency_mean": 1.19672273e-07, + "tendency_std": 2.81688334 + }, + "u_850": { + "max": 60.362197899999998, + "mean": 0.71486512700000004, + "min": -79.971542400000004, + "std": 8.0340003899999992, + "tendency_mean": 2.07348946e-06, + "tendency_std": 2.8357668 + }, + "u_925": { + "max": 60.445129399999999, + "mean": -0.101380186, + "min": -68.002044699999999, + "std": 7.7387406700000003, + "tendency_mean": 3.0556464500000002e-06, + "tendency_std": 2.9392139300000002 + }, + "v_100": { + "max": 61.626205400000003, + "mean": 0.016006228800000001, + "min": -67.324035600000002, + "std": 6.9467798199999997, + "tendency_mean": 2.9616266600000001e-08, + "tendency_std": 3.1601596299999999 + }, + "v_1000": { + "max": 62.592391999999997, + "mean": 0.22459607100000001, + "min": -42.414810199999998, + "std": 4.9855278199999997, + "tendency_mean": -2.3241973700000001e-07, + "tendency_std": 2.5584081099999998 + }, + "v_150": { + "max": 72.531448400000002, + "mean": -0.075751164400000001, + "min": -84.884567300000001, + "std": 9.6003282599999995, + "tendency_mean": 1.0566955e-06, + "tendency_std": 3.7697194399999998 + }, + "v_200": { + "max": 95.696899400000007, + "mean": -0.092314347699999993, + "min": -102.374771, + "std": 11.7983303, + "tendency_mean": 6.3236980200000001e-06, + "tendency_std": 4.9431834099999996 + }, + "v_250": { + "max": 104.253052, + "mean": -0.059248690600000001, + "min": -98.748901399999994, + "std": 12.7098636, + "tendency_mean": 5.8053394700000004e-06, + "tendency_std": 6.1059364199999999 + }, + "v_300": { + "max": 101.47081, + "mean": -0.031292018599999999, + "min": -96.398162799999994, + "std": 12.240216, + "tendency_mean": 3.24771399e-06, + "tendency_std": 6.4001740299999996 + }, + "v_400": { + "max": 91.755523699999998, + "mean": -0.019855796500000002, + "min": -89.656494100000003, + "std": 10.0717113, + "tendency_mean": 5.8667019300000001e-07, + "tendency_std": 5.5127653900000002 + }, + "v_50": { + "max": 82.010269199999996, + "mean": 0.00023885593999999999, + "min": -81.3757935, + "std": 5.5123934700000001, + "tendency_mean": 2.61542959e-06, + "tendency_std": 3.10712264 + }, + "v_500": { + "max": 79.519653300000002, + "mean": -0.040261799899999999, + "min": -76.046905499999994, + "std": 8.20669586, + "tendency_mean": -5.7593263099999996e-07, + "tendency_std": 4.3935030299999998 + }, + "v_600": { + "max": 66.614547700000003, + "mean": -0.057403836399999998, + "min": -67.269103999999999, + "std": 6.9772236300000001, + "tendency_mean": -5.5273130199999998e-06, + "tendency_std": 3.6402039799999999 + }, + "v_700": { + "max": 64.905517599999996, + "mean": -0.025966472800000001, + "min": -63.332061799999998, + "std": 6.1565984499999997, + "tendency_mean": -4.6005276500000004e-06, + "tendency_std": 3.22130334 + }, + "v_850": { + "max": 65.529876700000003, + "mean": 0.097475898899999996, + "min": -63.884445200000002, + "std": 5.6527801799999997, + "tendency_mean": -1.7567187400000001e-06, + "tendency_std": 3.16803977 + }, + "v_925": { + "max": 62.425567600000001, + "mean": 0.215186084, + "min": -62.892944300000003, + "std": 5.9619714200000002, + "tendency_mean": -8.7353725400000002e-07, + "tendency_std": 3.3472448199999998 + }, + "w_100": { + "max": 1.6652498200000001, + "mean": -3.2572908400000003e-05, + "min": -2.28956413, + "std": 0.023963990500000001, + "tendency_mean": 1.2068942700000001e-10, + "tendency_std": 0.030985877299999999 + }, + "w_1000": { + "max": 12.6513729, + "mean": 0.0086188029199999999, + "min": -6.4900474499999996, + "std": 0.087084636300000004, + "tendency_mean": -9.7016804399999998e-08, + "tendency_std": 0.069366357500000003 + }, + "w_150": { + "max": 1.93596268, + "mean": -9.6270556999999995e-05, + "min": -5.5607128100000001, + "std": 0.051294005099999998, + "tendency_mean": 1.5617037699999999e-09, + "tendency_std": 0.063144888900000001 + }, + "w_200": { + "max": 2.6908731499999998, + "mean": -0.00020129398799999999, + "min": -8.0072946500000004, + "std": 0.0828576756, + "tendency_mean": 2.80638622e-09, + "tendency_std": 0.096649566500000006 + }, + "w_250": { + "max": 3.6825256300000002, + "mean": -0.00029963110900000002, + "min": -9.8379640599999991, + "std": 0.112645184, + "tendency_mean": 3.76853905e-09, + "tendency_std": 0.12578481999999999 + }, + "w_300": { + "max": 3.82894421, + "mean": -0.00034286509299999999, + "min": -10.925571400000001, + "std": 0.13941400900000001, + "tendency_mean": 3.8389775599999998e-09, + "tendency_std": 0.155715626 + }, + "w_400": { + "max": 5.2380542800000001, + "mean": -0.00035032225199999999, + "min": -12.1863241, + "std": 0.17609791799999999, + "tendency_mean": 1.43899677e-09, + "tendency_std": 0.201741484 + }, + "w_50": { + "max": 1.1088218700000001, + "mean": -1.6003727000000001e-05, + "min": -1.1580133399999999, + "std": 0.0111556009, + "tendency_mean": -1.13018981e-10, + "tendency_std": 0.015363412599999999 + }, + "w_500": { + "max": 6.82522392, + "mean": -0.00038001823400000002, + "min": -11.794881800000001, + "std": 0.18895094300000001, + "tendency_mean": 2.31637265e-09, + "tendency_std": 0.218826936 + }, + "w_600": { + "max": 6.7095575299999997, + "mean": -0.00025880796599999999, + "min": -10.954759599999999, + "std": 0.191547566, + "tendency_mean": 3.4261358800000002e-09, + "tendency_std": 0.225074095 + }, + "w_700": { + "max": 7.4590539900000001, + "mean": 0.00046537171600000003, + "min": -11.1764212, + "std": 0.193147395, + "tendency_mean": -2.24547834e-09, + "tendency_std": 0.22758341400000001 + }, + "w_850": { + "max": 5.9530038799999998, + "mean": 0.0041729967700000004, + "min": -7.03121376, + "std": 0.176087048, + "tendency_mean": -2.58354341e-08, + "tendency_std": 0.20092968999999999 + }, + "w_925": { + "max": 7.2545261400000003, + "mean": 0.0070406749100000001, + "min": -4.1729602799999999, + "std": 0.13920302900000001, + "tendency_mean": -6.0303742200000002e-08, + "tendency_std": 0.147015124 + }, + "z": { + "max": 54284.230499999998, + "mean": 2277.2726200000002, + "min": -986.00781199999994, + "std": 6171.4011499999997, + "tendency_mean": 0, + "tendency_std": 0 + }, + "z_100": { + "max": 166822.31200000001, + "mean": 159892.212, + "min": 138013.81200000001, + "std": 4102.48992, + "tendency_mean": 0.0021960818799999999, + "tendency_std": 192.59950599999999 + }, + "z_1000": { + "max": 5217.0615200000002, + "mean": 944.66131499999995, + "min": -7261.1445299999996, + "std": 872.02628300000003, + "tendency_mean": 0.00043820007799999999, + "tendency_std": 202.467895 + }, + "z_150": { + "max": 142851.93799999999, + "mean": 135806.647, + "min": 115293.5, + "std": 4705.8004099999998, + "tendency_mean": 0.00300449376, + "tendency_std": 211.414379 + }, + "z_200": { + "max": 124576.43799999999, + "mean": 118073.33199999999, + "min": 98691.3125, + "std": 4819.4816000000001, + "tendency_mean": 0.0032620830999999999, + "tendency_std": 248.94324700000001 + }, + "z_250": { + "max": 109547.25, + "mean": 103842.624, + "min": 85460.9375, + "std": 4586.4886800000004, + "tendency_mean": 0.00332488946, + "tendency_std": 285.79996899999998 + }, + "z_300": { + "max": 96904.625, + "mean": 91810.945800000001, + "min": 74455.625, + "std": 4195.9761099999996, + "tendency_mean": 0.0031230778299999999, + "tendency_std": 295.860432 + }, + "z_400": { + "max": 76165.093800000002, + "mean": 71908.180999999997, + "min": 56699.410199999998, + "std": 3385.25828, + "tendency_mean": 0.0025265539300000001, + "tendency_std": 263.736377 + }, + "z_50": { + "max": 208846.25, + "mean": 200988.90700000001, + "min": 175447.31200000001, + "std": 3847.5536400000001, + "tendency_mean": -0.0024367219000000001, + "tendency_std": 213.22452000000001 + }, + "z_500": { + "max": 59314.652300000002, + "mean": 55648.3413, + "min": 42419.781199999998, + "std": 2713.36157, + "tendency_mean": 0.0020690968599999999, + "tendency_std": 222.459475 + }, + "z_600": { + "max": 45082.652300000002, + "mean": 41853.2647, + "min": 30229.148399999998, + "std": 2172.0050299999998, + "tendency_mean": 0.0016786896100000001, + "tendency_std": 194.51780500000001 + }, + "z_700": { + "max": 32771.390599999999, + "mean": 29846.086599999999, + "min": 19483.578099999999, + "std": 1719.6775700000001, + "tendency_mean": 0.0013134248800000001, + "tendency_std": 180.75443100000001 + }, + "z_850": { + "max": 17360.472699999998, + "mean": 14290.801799999999, + "min": 5280.3789100000004, + "std": 1178.4855700000001, + "tendency_mean": 0.00085035381299999999, + "tendency_std": 181.42816300000001 + }, + "z_925": { + "max": 10891.266600000001, + "mean": 7385.6299499999996, + "min": -1122.8596199999999, + "std": 988.62301500000001, + "tendency_mean": 0.00066737378599999997, + "tendency_std": 190.652074 + } + } + }, + "stac_extensions": [ + "https://stac-extensions.github.io/datacube/v2.2.0/schema.json", + "https://stac-extensions.github.io/alternate-assets/v1.2.0/schema.json", + "https://stac-extensions.github.io/xarray-assets/v1.0.0/schema.json" + ], + "stac_version": "1.0.0", + "type": "Feature" +} diff --git a/stac/jsons/fy3.json b/stac/jsons/fy3.json new file mode 100644 index 000000000..eda21f349 --- /dev/null +++ b/stac/jsons/fy3.json @@ -0,0 +1,229 @@ +{ + "assets": { + "MICROWAVE_FCDR_V1.1-20200512/FY3/*/*.nc": { + "description": "Observation dataset", + "href": "MICROWAVE_FCDR_V1.1-20200512/FY3/*/*.nc", + "inodes": "10", + "locations": [ + "hpc2020" + ], + "roles": [ + "data" + ], + "size": "0.5 TB", + "title": "MICROWAVE_FCDR_V1.1-20200512/FY3/*/*.nc", + "type": "application/vnd+netcdf" + } + }, + "bbox": [ + -180, + -90, + 180, + 90 + ], + "geometry": { + "coordinates": [ + [ + [ + -180, + -90 + ], + [ + -180, + 90 + ], + [ + 180, + 90 + ], + [ + 180, + -90 + ], + [ + -180, + -90 + ] + ] + ], + "type": "Polygon" + }, + "id": "weathergen.atmo.MWHS FY-3", + "properties": { + "description": "The FY-3 data come from the MWHS microwave radiometer on China's Fengyun satellites. Data is available for three FY-3 satellites, FY-3A, FY-3B and FY-3C.", + "end_datetime": "2018-12-31T23:58:08", + "fixed_timesteps": "False", + "frequency": "NA", + "keywords": [ + "atmosphere", + "observation", + "polar-orbiter", + "satellite" + ], + "name": "MWHS FY-3", + "providers": [ + { + "name": "ECMWF", + "roles": [ + "host" + ], + "url": "https://ecmwf.int" + }, + { + "name": "EUMETSAT", + "roles": [ + "provider" + ], + "url": "https://eumetsat.int" + } + ], + "start_datetime": "2008-07-01T00:19:46", + "title": "MWHS FY-3", + "unique_id": "10", + "variables": { + "btemps": { + "max": "NA", + "mean": "NA", + "min": "NA", + "std": "NA", + "tendency_mean": "NA", + "tendency_std": "NA" + }, + "coldnedt": { + "max": "NA", + "mean": "NA", + "min": "NA", + "std": "NA", + "tendency_mean": "NA", + "tendency_std": "NA" + }, + "data_quality_bitmask": { + "max": "NA", + "mean": "NA", + "min": "NA", + "std": "NA", + "tendency_mean": "NA", + "tendency_std": "NA" + }, + "instrtemp": { + "max": "NA", + "mean": "NA", + "min": "NA", + "std": "NA", + "tendency_mean": "NA", + "tendency_std": "NA" + }, + "quality_issue_pixel_bitmask": { + "max": "NA", + "mean": "NA", + "min": "NA", + "std": "NA", + "tendency_mean": "NA", + "tendency_std": "NA" + }, + "quality_pixel_bitmask": { + "max": "NA", + "mean": "NA", + "min": "NA", + "std": "NA", + "tendency_mean": "NA", + "tendency_std": "NA" + }, + "quality_scanline_bitmask": { + "max": "NA", + "mean": "NA", + "min": "NA", + "std": "NA", + "tendency_mean": "NA", + "tendency_std": "NA" + }, + "satellite_azimuth_angle": { + "max": "NA", + "mean": "NA", + "min": "NA", + "std": "NA", + "tendency_mean": "NA", + "tendency_std": "NA" + }, + "satellite_zenith_angle": { + "max": "NA", + "mean": "NA", + "min": "NA", + "std": "NA", + "tendency_mean": "NA", + "tendency_std": "NA" + }, + "scnlin": { + "max": "NA", + "mean": "NA", + "min": "NA", + "std": "NA", + "tendency_mean": "NA", + "tendency_std": "NA" + }, + "solar_azimuth_angle": { + "max": "NA", + "mean": "NA", + "min": "NA", + "std": "NA", + "tendency_mean": "NA", + "tendency_std": "NA" + }, + "solar_zenith_angle": { + "max": "NA", + "mean": "NA", + "min": "NA", + "std": "NA", + "tendency_mean": "NA", + "tendency_std": "NA" + }, + "time": { + "max": "NA", + "mean": "NA", + "min": "NA", + "std": "NA", + "tendency_mean": "NA", + "tendency_std": "NA" + }, + "u_common_btemps": { + "max": "NA", + "mean": "NA", + "min": "NA", + "std": "NA", + "tendency_mean": "NA", + "tendency_std": "NA" + }, + "u_independent_btemps": { + "max": "NA", + "mean": "NA", + "min": "NA", + "std": "NA", + "tendency_mean": "NA", + "tendency_std": "NA" + }, + "u_structured_btemps": { + "max": "NA", + "mean": "NA", + "min": "NA", + "std": "NA", + "tendency_mean": "NA", + "tendency_std": "NA" + }, + "warmnedt": { + "max": "NA", + "mean": "NA", + "min": "NA", + "std": "NA", + "tendency_mean": "NA", + "tendency_std": "NA" + } + } + }, + "stac_extensions": [ + "https://stac-extensions.github.io/datacube/v2.2.0/schema.json", + "https://stac-extensions.github.io/alternate-assets/v1.2.0/schema.json", + "https://stac-extensions.github.io/xarray-assets/v1.0.0/schema.json" + ], + "stac_version": "1.0.0", + "type": "Feature" +} diff --git a/stac/jsons/fy3a.json b/stac/jsons/fy3a.json new file mode 100644 index 000000000..da7e09957 --- /dev/null +++ b/stac/jsons/fy3a.json @@ -0,0 +1,237 @@ +{ + "assets": { + "MICROWAVE_FCDR_V1.1-20200512/FY3A/*/*.nc": { + "description": "Observation dataset", + "href": "MICROWAVE_FCDR_V1.1-20200512/FY3A/*/*.nc", + "inodes": "31039", + "locations": [ + "hpc2020" + ], + "roles": [ + "data" + ], + "size": "664.9 GB", + "title": "MICROWAVE_FCDR_V1.1-20200512/FY3A/*/*.nc", + "type": "application/vnd+netcdf" + } + }, + "bbox": [ + -180, + -90, + 180, + 90 + ], + "geometry": { + "coordinates": [ + [ + [ + -180, + -90 + ], + [ + -180, + 90 + ], + [ + 180, + 90 + ], + [ + 180, + -90 + ], + [ + -180, + -90 + ] + ] + ], + "type": "Polygon" + }, + "id": "weathergen.atmo.FY-3A, MWHS", + "properties": { + "description": "The data from the MWHS microwave radiometer onboard FY-3A, a Fengyun satellite. Data is available for three FY-3 satellites, FY-3A, FY-3B and FY-3C.", + "end_datetime": "2014-05-05T00:33:45", + "fixed_timesteps": "False", + "frequency": "NA", + "keywords": [ + "atmosphere", + "observation", + "polar-orbiter", + "satellite" + ], + "name": "FY-3A, MWHS", + "processing_level": "1C", + "providers": [ + { + "name": "ECMWF", + "roles": [ + "host" + ], + "url": "https://ecmwf.int" + }, + { + "name": "CMA", + "roles": [ + "provider" + ], + "url": "https://www.cma.gov.cn/" + }, + { + "name": "EUMETSAT", + "roles": [ + "processor" + ], + "url": "https://eumetsat.int" + } + ], + "start_datetime": "2008-07-01T00:19:46", + "title": "FY-3A, MWHS", + "unique_id": "10", + "variables": { + "btemps": { + "max": "NA", + "mean": "NA", + "min": "NA", + "std": "NA", + "tendency_mean": "NA", + "tendency_std": "NA" + }, + "coldnedt": { + "max": "NA", + "mean": "NA", + "min": "NA", + "std": "NA", + "tendency_mean": "NA", + "tendency_std": "NA" + }, + "data_quality_bitmask": { + "max": "NA", + "mean": "NA", + "min": "NA", + "std": "NA", + "tendency_mean": "NA", + "tendency_std": "NA" + }, + "instrtemp": { + "max": "NA", + "mean": "NA", + "min": "NA", + "std": "NA", + "tendency_mean": "NA", + "tendency_std": "NA" + }, + "quality_issue_pixel_bitmask": { + "max": "NA", + "mean": "NA", + "min": "NA", + "std": "NA", + "tendency_mean": "NA", + "tendency_std": "NA" + }, + "quality_pixel_bitmask": { + "max": "NA", + "mean": "NA", + "min": "NA", + "std": "NA", + "tendency_mean": "NA", + "tendency_std": "NA" + }, + "quality_scanline_bitmask": { + "max": "NA", + "mean": "NA", + "min": "NA", + "std": "NA", + "tendency_mean": "NA", + "tendency_std": "NA" + }, + "satellite_azimuth_angle": { + "max": "NA", + "mean": "NA", + "min": "NA", + "std": "NA", + "tendency_mean": "NA", + "tendency_std": "NA" + }, + "satellite_zenith_angle": { + "max": "NA", + "mean": "NA", + "min": "NA", + "std": "NA", + "tendency_mean": "NA", + "tendency_std": "NA" + }, + "scnlin": { + "max": "NA", + "mean": "NA", + "min": "NA", + "std": "NA", + "tendency_mean": "NA", + "tendency_std": "NA" + }, + "solar_azimuth_angle": { + "max": "NA", + "mean": "NA", + "min": "NA", + "std": "NA", + "tendency_mean": "NA", + "tendency_std": "NA" + }, + "solar_zenith_angle": { + "max": "NA", + "mean": "NA", + "min": "NA", + "std": "NA", + "tendency_mean": "NA", + "tendency_std": "NA" + }, + "time": { + "max": "NA", + "mean": "NA", + "min": "NA", + "std": "NA", + "tendency_mean": "NA", + "tendency_std": "NA" + }, + "u_common_btemps": { + "max": "NA", + "mean": "NA", + "min": "NA", + "std": "NA", + "tendency_mean": "NA", + "tendency_std": "NA" + }, + "u_independent_btemps": { + "max": "NA", + "mean": "NA", + "min": "NA", + "std": "NA", + "tendency_mean": "NA", + "tendency_std": "NA" + }, + "u_structured_btemps": { + "max": "NA", + "mean": "NA", + "min": "NA", + "std": "NA", + "tendency_mean": "NA", + "tendency_std": "NA" + }, + "warmnedt": { + "max": "NA", + "mean": "NA", + "min": "NA", + "std": "NA", + "tendency_mean": "NA", + "tendency_std": "NA" + } + } + }, + "stac_extensions": [ + "https://stac-extensions.github.io/datacube/v2.2.0/schema.json", + "https://stac-extensions.github.io/alternate-assets/v1.2.0/schema.json", + "https://stac-extensions.github.io/xarray-assets/v1.0.0/schema.json" + ], + "stac_version": "1.0.0", + "type": "Feature" +} diff --git a/stac/jsons/fy3b.json b/stac/jsons/fy3b.json new file mode 100644 index 000000000..a2ac66081 --- /dev/null +++ b/stac/jsons/fy3b.json @@ -0,0 +1,237 @@ +{ + "assets": { + "MICROWAVE_FCDR_V1.1-20200512/FY3C/*/*.nc": { + "description": "Observation dataset", + "href": "MICROWAVE_FCDR_V1.1-20200512/FY3C/*/*.nc", + "inodes": "44204", + "locations": [ + "hpc2020" + ], + "roles": [ + "data" + ], + "size": "961.4 GB", + "title": "MICROWAVE_FCDR_V1.1-20200512/FY3C/*/*.nc", + "type": "application/vnd+netcdf" + } + }, + "bbox": [ + -180, + -90, + 180, + 90 + ], + "geometry": { + "coordinates": [ + [ + [ + -180, + -90 + ], + [ + -180, + 90 + ], + [ + 180, + 90 + ], + [ + 180, + -90 + ], + [ + -180, + -90 + ] + ] + ], + "type": "Polygon" + }, + "id": "weathergen.atmo.FY-3B, MWHS", + "properties": { + "description": "The data from the MWHS microwave radiometer onboard FY-3B, a Fengyun satellite. Data is available for three FY-3 satellites, FY-3A, FY-3B and FY-3C.", + "end_datetime": "2018-12-31T22:29:55", + "fixed_timesteps": "False", + "frequency": "NA", + "keywords": [ + "atmosphere", + "observation", + "polar-orbiter", + "satellite" + ], + "name": "FY-3B, MWHS", + "processing_level": "1C", + "providers": [ + { + "name": "ECMWF", + "roles": [ + "host" + ], + "url": "https://ecmwf.int" + }, + { + "name": "CMA", + "roles": [ + "provider" + ], + "url": "https://www.cma.gov.cn/" + }, + { + "name": "EUMETSAT", + "roles": [ + "processor" + ], + "url": "https://eumetsat.int" + } + ], + "start_datetime": "2010-11-18T22:23:16", + "title": "FY-3B, MWHS", + "unique_id": "11", + "variables": { + "btemps": { + "max": "NA", + "mean": "NA", + "min": "NA", + "std": "NA", + "tendency_mean": "NA", + "tendency_std": "NA" + }, + "coldnedt": { + "max": "NA", + "mean": "NA", + "min": "NA", + "std": "NA", + "tendency_mean": "NA", + "tendency_std": "NA" + }, + "data_quality_bitmask": { + "max": "NA", + "mean": "NA", + "min": "NA", + "std": "NA", + "tendency_mean": "NA", + "tendency_std": "NA" + }, + "instrtemp": { + "max": "NA", + "mean": "NA", + "min": "NA", + "std": "NA", + "tendency_mean": "NA", + "tendency_std": "NA" + }, + "quality_issue_pixel_bitmask": { + "max": "NA", + "mean": "NA", + "min": "NA", + "std": "NA", + "tendency_mean": "NA", + "tendency_std": "NA" + }, + "quality_pixel_bitmask": { + "max": "NA", + "mean": "NA", + "min": "NA", + "std": "NA", + "tendency_mean": "NA", + "tendency_std": "NA" + }, + "quality_scanline_bitmask": { + "max": "NA", + "mean": "NA", + "min": "NA", + "std": "NA", + "tendency_mean": "NA", + "tendency_std": "NA" + }, + "satellite_azimuth_angle": { + "max": "NA", + "mean": "NA", + "min": "NA", + "std": "NA", + "tendency_mean": "NA", + "tendency_std": "NA" + }, + "satellite_zenith_angle": { + "max": "NA", + "mean": "NA", + "min": "NA", + "std": "NA", + "tendency_mean": "NA", + "tendency_std": "NA" + }, + "scnlin": { + "max": "NA", + "mean": "NA", + "min": "NA", + "std": "NA", + "tendency_mean": "NA", + "tendency_std": "NA" + }, + "solar_azimuth_angle": { + "max": "NA", + "mean": "NA", + "min": "NA", + "std": "NA", + "tendency_mean": "NA", + "tendency_std": "NA" + }, + "solar_zenith_angle": { + "max": "NA", + "mean": "NA", + "min": "NA", + "std": "NA", + "tendency_mean": "NA", + "tendency_std": "NA" + }, + "time": { + "max": "NA", + "mean": "NA", + "min": "NA", + "std": "NA", + "tendency_mean": "NA", + "tendency_std": "NA" + }, + "u_common_btemps": { + "max": "NA", + "mean": "NA", + "min": "NA", + "std": "NA", + "tendency_mean": "NA", + "tendency_std": "NA" + }, + "u_independent_btemps": { + "max": "NA", + "mean": "NA", + "min": "NA", + "std": "NA", + "tendency_mean": "NA", + "tendency_std": "NA" + }, + "u_structured_btemps": { + "max": "NA", + "mean": "NA", + "min": "NA", + "std": "NA", + "tendency_mean": "NA", + "tendency_std": "NA" + }, + "warmnedt": { + "max": "NA", + "mean": "NA", + "min": "NA", + "std": "NA", + "tendency_mean": "NA", + "tendency_std": "NA" + } + } + }, + "stac_extensions": [ + "https://stac-extensions.github.io/datacube/v2.2.0/schema.json", + "https://stac-extensions.github.io/alternate-assets/v1.2.0/schema.json", + "https://stac-extensions.github.io/xarray-assets/v1.0.0/schema.json" + ], + "stac_version": "1.0.0", + "type": "Feature" +} diff --git a/stac/jsons/fy3c.json b/stac/jsons/fy3c.json new file mode 100644 index 000000000..f22a6a773 --- /dev/null +++ b/stac/jsons/fy3c.json @@ -0,0 +1,237 @@ +{ + "assets": { + "MICROWAVE_FCDR_V1.1-20200512/FY3C/*/*.nc": { + "description": "Observation dataset", + "href": "MICROWAVE_FCDR_V1.1-20200512/FY3C/*/*.nc", + "inodes": "27805", + "locations": [ + "hpc2020" + ], + "roles": [ + "data" + ], + "size": "1.51 TB", + "title": "MICROWAVE_FCDR_V1.1-20200512/FY3C/*/*.nc", + "type": "application/vnd+netcdf" + } + }, + "bbox": [ + -180, + -90, + 180, + 90 + ], + "geometry": { + "coordinates": [ + [ + [ + -180, + -90 + ], + [ + -180, + 90 + ], + [ + 180, + 90 + ], + [ + 180, + -90 + ], + [ + -180, + -90 + ] + ] + ], + "type": "Polygon" + }, + "id": "weathergen.atmo.FY-3C, MWHS", + "properties": { + "description": "The data from the MWHS microwave radiometer onboard FY-3C, a Fengyun satellite. Data is available for three FY-3 satellites, FY-3A, FY-3B and FY-3C.", + "end_datetime": "2018-12-31T19:58:58", + "fixed_timesteps": "False", + "frequency": "NA", + "keywords": [ + "atmosphere", + "observation", + "polar-orbiter", + "satellite" + ], + "name": "FY-3C, MWHS", + "processing_level": "1C", + "providers": [ + { + "name": "ECMWF", + "roles": [ + "host" + ], + "url": "https://ecmwf.int" + }, + { + "name": "CMA", + "roles": [ + "provider" + ], + "url": "https://www.cma.gov.cn/" + }, + { + "name": "EUMETSAT", + "roles": [ + "processor" + ], + "url": "https://eumetsat.int" + } + ], + "start_datetime": "2013-09-30T21:05:22", + "title": "FY-3C, MWHS", + "unique_id": "12", + "variables": { + "btemps": { + "max": "NA", + "mean": "NA", + "min": "NA", + "std": "NA", + "tendency_mean": "NA", + "tendency_std": "NA" + }, + "coldnedt": { + "max": "NA", + "mean": "NA", + "min": "NA", + "std": "NA", + "tendency_mean": "NA", + "tendency_std": "NA" + }, + "data_quality_bitmask": { + "max": "NA", + "mean": "NA", + "min": "NA", + "std": "NA", + "tendency_mean": "NA", + "tendency_std": "NA" + }, + "instrtemp": { + "max": "NA", + "mean": "NA", + "min": "NA", + "std": "NA", + "tendency_mean": "NA", + "tendency_std": "NA" + }, + "quality_issue_pixel_bitmask": { + "max": "NA", + "mean": "NA", + "min": "NA", + "std": "NA", + "tendency_mean": "NA", + "tendency_std": "NA" + }, + "quality_pixel_bitmask": { + "max": "NA", + "mean": "NA", + "min": "NA", + "std": "NA", + "tendency_mean": "NA", + "tendency_std": "NA" + }, + "quality_scanline_bitmask": { + "max": "NA", + "mean": "NA", + "min": "NA", + "std": "NA", + "tendency_mean": "NA", + "tendency_std": "NA" + }, + "satellite_azimuth_angle": { + "max": "NA", + "mean": "NA", + "min": "NA", + "std": "NA", + "tendency_mean": "NA", + "tendency_std": "NA" + }, + "satellite_zenith_angle": { + "max": "NA", + "mean": "NA", + "min": "NA", + "std": "NA", + "tendency_mean": "NA", + "tendency_std": "NA" + }, + "scnlin": { + "max": "NA", + "mean": "NA", + "min": "NA", + "std": "NA", + "tendency_mean": "NA", + "tendency_std": "NA" + }, + "solar_azimuth_angle": { + "max": "NA", + "mean": "NA", + "min": "NA", + "std": "NA", + "tendency_mean": "NA", + "tendency_std": "NA" + }, + "solar_zenith_angle": { + "max": "NA", + "mean": "NA", + "min": "NA", + "std": "NA", + "tendency_mean": "NA", + "tendency_std": "NA" + }, + "time": { + "max": "NA", + "mean": "NA", + "min": "NA", + "std": "NA", + "tendency_mean": "NA", + "tendency_std": "NA" + }, + "u_common_btemps": { + "max": "NA", + "mean": "NA", + "min": "NA", + "std": "NA", + "tendency_mean": "NA", + "tendency_std": "NA" + }, + "u_independent_btemps": { + "max": "NA", + "mean": "NA", + "min": "NA", + "std": "NA", + "tendency_mean": "NA", + "tendency_std": "NA" + }, + "u_structured_btemps": { + "max": "NA", + "mean": "NA", + "min": "NA", + "std": "NA", + "tendency_mean": "NA", + "tendency_std": "NA" + }, + "warmnedt": { + "max": "NA", + "mean": "NA", + "min": "NA", + "std": "NA", + "tendency_mean": "NA", + "tendency_std": "NA" + } + } + }, + "stac_extensions": [ + "https://stac-extensions.github.io/datacube/v2.2.0/schema.json", + "https://stac-extensions.github.io/alternate-assets/v1.2.0/schema.json", + "https://stac-extensions.github.io/xarray-assets/v1.0.0/schema.json" + ], + "stac_version": "1.0.0", + "type": "Feature" +} diff --git a/stac/jsons/ifs-fesom_atmos.json b/stac/jsons/ifs-fesom_atmos.json new file mode 100644 index 000000000..d4dbb6005 --- /dev/null +++ b/stac/jsons/ifs-fesom_atmos.json @@ -0,0 +1,1398 @@ +{ + "assets": { + "atmos_all/{year}.zarr": { + "description": "Atmospheric component of the AWI model using OpenIFS (CY43R3 version). Configured on a regular grid of 400 (longitude) × 192 (latitude) points", + "href": "atmos_all/{year}.zarr", + "inodes": "614,657", + "locations": [ + "juwels_booster" + ], + "roles": [ + "data" + ], + "size": "12 TB", + "title": "atmos_all/{year}.zarr", + "type": "application/zarr" + } + }, + "bbox": [ + 0, + -90, + 360, + 90 + ], + "geometry": { + "coordinates": [ + [ + [ + 0, + -90 + ], + [ + 0, + 90 + ], + [ + 360, + 90 + ], + [ + 360, + -90 + ], + [ + 0, + -90 + ] + ] + ], + "type": "Polygon" + }, + "id": "weathergen.atmo.AWI IFS-FESOM (ATMOS)", + "properties": { + "description": "The atmosphere element component of the AWI IFS-FESOM coupled climate dataset that integrates atmospheric simulations from the IFS model with ocean and sea ice dynamics from the FESOM framework, capturing detailed Earth system interactions at high resolution.", + "end_datetime": "2209-12-31T23:59:59", + "fixed_timesteps": "True", + "frequency": "6h", + "keywords": [ + "coupled model", + "climate simulation", + "atmosphere" + ], + "name": "AWI IFS-FESOM (ATMOS)", + "processing_level": "model output", + "providers": [ + { + "name": "AWI", + "roles": [ + "provider" + ], + "url": "https://www.awi.de" + } + ], + "start_datetime": "2000-01-01T00:00:00", + "title": "AWI IFS-FESOM Coupled Climate Model (Atmos)", + "unique_id": "14", + "variables": { + "10u": { + "max": 38.770099999999999, + "mean": 0.0015128931263461709, + "min": -40.214889999999997, + "std": 5.6931643486022949, + "tendency_mean": 6.7996442699999994e-05, + "tendency_std": 2.04880912 + }, + "10v": { + "max": 38.609580000000001, + "mean": 0.16890795528888702, + "min": -40.738483000000002, + "std": 4.7152938842773438, + "tendency_mean": 0.00020083202300000001, + "tendency_std": 2.2772152800000001 + }, + "2d": { + "max": 313.69130000000001, + "mean": 273.94232177734375, + "min": 143.93333000000001, + "std": 20.876657485961914, + "tendency_mean": -0.00028996596599999998, + "tendency_std": 1.77107824 + }, + "2t": { + "max": 330.63412, + "mean": 278.69680786132812, + "min": 188.48482999999999, + "std": 21.427907943725586, + "tendency_mean": 0.00333103804, + "tendency_std": 2.6192202099999999 + }, + "ci": { + "max": 1.464299, + "mean": 0.10867653042078018, + "min": -0.44484984999999999, + "std": 0.29084774851799011, + "tendency_mean": 1.5705922699999999e-08, + "tendency_std": 0.0030642274200000002 + }, + "cp": { + "max": 0.068286459999999993, + "mean": 0.00031672287150286138, + "min": -0.015498905, + "std": 0.0011661256430670619, + "tendency_mean": 2.99882965e-08, + "tendency_std": 0.00118098068 + }, + "lsp": { + "max": 0.072854730000000006, + "mean": 0.00026623933808878064, + "min": -0.00588887, + "std": 0.00083501078188419342, + "tendency_mean": -3.5865412499999998e-08, + "tendency_std": 0.000698917234 + }, + "msl": { + "max": 108770.53999999999, + "mean": 100885.84375, + "min": 89390.630000000005, + "std": 1470.149169921875, + "tendency_mean": 0.051531909600000002, + "tendency_std": 332.80675600000001 + }, + "q_100": { + "max": 4.455992e-06, + "mean": 3.6850308333669091e-06, + "min": 1.9187053000000001e-06, + "std": 2.5300025185970298e-07, + "tendency_mean": 5.9926924599999999e-12, + "tendency_std": 8.3501391800000004e-08 + }, + "q_1000": { + "max": 4.0245777000000004e-06, + "mean": 2.7587032036535675e-06, + "min": 1.9208283000000001e-06, + "std": 4.2893992713288753e-07, + "tendency_mean": -1.0293545099999999e-12, + "tendency_std": 4.1009848699999998e-08 + }, + "q_10000": { + "max": 2.6267575000000001e-05, + "mean": 2.1044027107564034e-06, + "min": -3.2454303999999998e-07, + "std": 4.3417983874860511e-07, + "tendency_mean": 7.0000934699999999e-13, + "tendency_std": 1.3039522399999999e-07 + }, + "q_100000": { + "max": 0.034769632000000002, + "mean": 0.0070860134437680244, + "min": -0.0032257328000000001, + "std": 0.0059550674632191658, + "tendency_mean": -2.5943259699999999e-07, + "tendency_std": 0.00075283313699999995 + }, + "q_15000": { + "max": 9.3536490000000001e-05, + "mean": 4.9563427637622226e-06, + "min": -7.5110623999999999e-06, + "std": 4.3518234633665998e-06, + "tendency_mean": 1.3869492100000001e-11, + "tendency_std": 1.4358868000000001e-06 + }, + "q_2000": { + "max": 3.9768949999999996e-06, + "mean": 2.5137062493740814e-06, + "min": 2.5308586000000002e-07, + "std": 3.7307140132725181e-07, + "tendency_mean": -1.33723992e-12, + "tendency_std": 3.5826353700000001e-08 + }, + "q_20000": { + "max": 0.00045164419999999997, + "mean": 2.0305025827838108e-05, + "min": -3.9266934999999997e-05, + "std": 2.611067975522019e-05, + "tendency_mean": -5.7903482300000001e-10, + "tendency_std": 9.5957054900000005e-06 + }, + "q_25000": { + "max": 0.0013586742999999999, + "mean": 6.1260267102625221e-05, + "min": -0.00012335853999999999, + "std": 8.4744198829866946e-05, + "tendency_mean": -3.2497763900000002e-09, + "tendency_std": 3.2650922100000001e-05 + }, + "q_3000": { + "max": 4.1117733000000003e-06, + "mean": 2.3476166006730637e-06, + "min": 2.3665773e-07, + "std": 2.9992926897648431e-07, + "tendency_mean": 1.2413264799999999e-12, + "tendency_std": 3.6039289200000002e-08 + }, + "q_30000": { + "max": 0.0027995353999999998, + "mean": 0.00013691304775420576, + "min": -0.00023944113000000001, + "std": 0.00019360071746632457, + "tendency_mean": -7.8543394099999992e-09, + "tendency_std": 7.15903528e-05 + }, + "q_40000": { + "max": 0.0060766796, + "mean": 0.00042273945291526616, + "min": -0.00072672329999999997, + "std": 0.00057812005979940295, + "tendency_mean": -1.30448688e-08, + "tendency_std": 0.00019422875899999999 + }, + "q_500": { + "max": 4.1173450000000001e-06, + "mean": 3.0263486223702785e-06, + "min": 2.0499270000000002e-06, + "std": 4.395521386868495e-07, + "tendency_mean": -7.5976691199999995e-12, + "tendency_std": 4.8554633100000001e-08 + }, + "q_5000": { + "max": 3.9249039999999996e-06, + "mean": 2.1257467324176105e-06, + "min": 9.4898944000000004e-08, + "std": 2.6434832989252754e-07, + "tendency_mean": -6.2791842e-14, + "tendency_std": 3.2336867599999998e-08 + }, + "q_50000": { + "max": 0.011848074, + "mean": 0.00091623439220711589, + "min": -0.0010373775999999999, + "std": 0.0011611981317400932, + "tendency_mean": -5.9187633700000003e-09, + "tendency_std": 0.00036913559399999999 + }, + "q_60000": { + "max": 0.028947311999999999, + "mean": 0.001613425207324326, + "min": -0.0021193704999999999, + "std": 0.0018132260302081704, + "tendency_mean": 3.6323769700000003e-08, + "tendency_std": 0.00054478495199999997 + }, + "q_7000": { + "max": 1.7458366999999998e-05, + "mean": 2.0306606529629789e-06, + "min": -1.9565950000000001e-07, + "std": 2.7038581151828112e-07, + "tendency_mean": -4.8256528899999999e-13, + "tendency_std": 3.9712913800000002e-08 + }, + "q_70000": { + "max": 0.030375967, + "mean": 0.0024882657453417778, + "min": -0.0031778869999999999, + "std": 0.0025368400383740664, + "tendency_mean": 8.9899038100000004e-08, + "tendency_std": 0.00073710588799999997 + }, + "q_85000": { + "max": 0.030566513999999999, + "mean": 0.0046473778784275055, + "min": -0.0031701056000000001, + "std": 0.0041436250321567059, + "tendency_mean": 2.15168976e-07, + "tendency_std": 0.00092950930499999999 + }, + "q_92500": { + "max": 0.030592319, + "mean": 0.0060128928162157536, + "min": -0.0031701056000000001, + "std": 0.0050333035178482533, + "tendency_mean": -4.37241875e-08, + "tendency_std": 0.000803809095 + }, + "r_100": { + "max": 0.91808559999999995, + "mean": 0.00053122692042961717, + "min": -0.00076792099999999997, + "std": 0.0038774770218878984, + "tendency_mean": 7.1644367799999998e-08, + "tendency_std": 0.00030822399199999999 + }, + "r_1000": { + "max": 67.079123999999993, + "mean": 0.41817143559455872, + "min": -1.4799066000000001, + "std": 1.3862596750259399, + "tendency_mean": -3.33110089e-05, + "tendency_std": 0.115702544 + }, + "r_10000": { + "max": 191.97601, + "mean": 25.178134918212891, + "min": -33.406531999999999, + "std": 32.578296661376953, + "tendency_mean": -0.00018937522400000001, + "tendency_std": 7.21208312 + }, + "r_100000": { + "max": 161.42363, + "mean": 78.049949645996094, + "min": -53.231029999999997, + "std": 18.818065643310547, + "tendency_mean": -0.0084466849100000001, + "tendency_std": 9.6543355799999997 + }, + "r_15000": { + "max": 187.68016, + "mean": 26.590721130371094, + "min": -42.770125999999998, + "std": 31.784322738647461, + "tendency_mean": -0.00039255384200000002, + "tendency_std": 9.3689856900000006 + }, + "r_2000": { + "max": 169.28546, + "mean": 2.6305429935455322, + "min": -0.99167346999999995, + "std": 10.726360321044922, + "tendency_mean": -0.000174388986, + "tendency_std": 0.491520191 + }, + "r_20000": { + "max": 179.46992, + "mean": 37.627315521240234, + "min": -36.665990000000001, + "std": 34.387592315673828, + "tendency_mean": -0.00043435868899999999, + "tendency_std": 14.969338199999999 + }, + "r_25000": { + "max": 183.61960999999999, + "mean": 50.212940216064453, + "min": -26.557971999999999, + "std": 33.665397644042969, + "tendency_mean": -0.00076720194800000002, + "tendency_std": 18.204826400000002 + }, + "r_3000": { + "max": 176.46902, + "mean": 4.3379673957824707, + "min": -1.5745564999999999, + "std": 15.370347023010254, + "tendency_mean": -0.000105533425, + "tendency_std": 0.76601423300000004 + }, + "r_30000": { + "max": 198.21472, + "mean": 55.493244171142578, + "min": -27.472764999999999, + "std": 33.446044921875, + "tendency_mean": -0.00071141248900000002, + "tendency_std": 19.337004 + }, + "r_40000": { + "max": 178.09285, + "mean": 53.947608947753906, + "min": -31.716781999999998, + "std": 33.798023223876953, + "tendency_mean": -0.000437428437, + "tendency_std": 19.787085099999999 + }, + "r_500": { + "max": 13.270657, + "mean": 0.040791686624288559, + "min": -0.17825795999999999, + "std": 0.11761236190795898, + "tendency_mean": -3.2061427799999999e-06, + "tendency_std": 0.015941028600000001 + }, + "r_5000": { + "max": 179.96686, + "mean": 6.3809704780578613, + "min": -2.6138271999999998, + "std": 15.265466690063477, + "tendency_mean": -0.00014460552300000001, + "tendency_std": 1.29973296 + }, + "r_50000": { + "max": 162.42506, + "mean": 51.635749816894531, + "min": -35.416355000000003, + "std": 33.10162353515625, + "tendency_mean": -0.00018079915700000001, + "tendency_std": 18.9854789 + }, + "r_60000": { + "max": 170.21552, + "mean": 52.182693481445312, + "min": -31.286251, + "std": 31.940793991088867, + "tendency_mean": 0.00026778502000000001, + "tendency_std": 17.717674599999999 + }, + "r_7000": { + "max": 194.51105000000001, + "mean": 13.596026420593262, + "min": -11.312215, + "std": 18.89381217956543, + "tendency_mean": -0.000472313239, + "tendency_std": 4.6278365700000004 + }, + "r_70000": { + "max": 168.89702, + "mean": 55.073024749755859, + "min": -34.056683, + "std": 30.729440689086914, + "tendency_mean": 0.00071104067500000004, + "tendency_std": 16.2510613 + }, + "r_85000": { + "max": 164.81138999999999, + "mean": 69.010093688964844, + "min": -35.849890000000002, + "std": 26.313985824584961, + "tendency_mean": 3.3712717499999998e-05, + "tendency_std": 12.7996748 + }, + "r_92500": { + "max": 167.2123, + "mean": 77.303390502929688, + "min": -32.99315, + "std": 21.75848388671875, + "tendency_mean": -0.0046081436200000004, + "tendency_std": 9.7973455600000001 + }, + "sst": { + "max": 317.23689999999999, + "mean": 282.61029052734375, + "min": 263.05362000000002, + "std": 11.575693130493164, + "tendency_mean": -1.8916722200000001e-05, + "tendency_std": 0.071295636800000006 + }, + "t_100": { + "max": 337.55914000000001, + "mean": 264.4266357421875, + "min": 194.98403999999999, + "std": 10.358405113220215, + "tendency_mean": 0.00029311531700000002, + "tendency_std": 3.7274986499999998 + }, + "t_1000": { + "max": 299.07825000000003, + "mean": 227.13165283203125, + "min": 181.50676000000001, + "std": 11.142080307006836, + "tendency_mean": 0.000114100015, + "tendency_std": 1.6366096699999999 + }, + "t_10000": { + "max": 255.10864000000001, + "mean": 207.46293640136719, + "min": 175.46312, + "std": 12.376836776733398, + "tendency_mean": 4.5226755299999999e-06, + "tendency_std": 0.88801247800000005 + }, + "t_100000": { + "max": 330.43340000000001, + "mean": 281.59408569335938, + "min": 209.53673000000001, + "std": 17.003585815429688, + "tendency_mean": 0.0019137341500000001, + "tendency_std": 1.78357112 + }, + "t_15000": { + "max": 244.59236000000001, + "mean": 212.2685546875, + "min": 174.20729, + "std": 8.4092464447021484, + "tendency_mean": 5.4277879200000003e-05, + "tendency_std": 1.0928867600000001 + }, + "t_2000": { + "max": 277.08294999999998, + "mean": 219.34602355957031, + "min": 172.37871999999999, + "std": 10.396921157836914, + "tendency_mean": -3.0316403200000001e-05, + "tendency_std": 1.2655355500000001 + }, + "t_20000": { + "max": 244.12702999999999, + "mean": 216.33879089355469, + "min": 181.76021, + "std": 7.3432989120483398, + "tendency_mean": 8.2803421100000007e-06, + "tendency_std": 1.49683856 + }, + "t_25000": { + "max": 248.77850000000001, + "mean": 221.35006713867188, + "min": 188.24637000000001, + "std": 9.7444295883178711, + "tendency_mean": -7.6266462099999997e-06, + "tendency_std": 1.2650956600000001 + }, + "t_3000": { + "max": 271.34480000000002, + "mean": 215.20797729492188, + "min": 171.84059999999999, + "std": 9.8411855697631836, + "tendency_mean": -4.1345897099999997e-05, + "tendency_std": 1.1377641599999999 + }, + "t_30000": { + "max": 256.09888000000001, + "mean": 228.26901245117188, + "min": 194.88297, + "std": 11.87940502166748, + "tendency_mean": -1.9448251000000001e-05, + "tendency_std": 1.07941062 + }, + "t_40000": { + "max": 269.76137999999997, + "mean": 242.13131713867188, + "min": 205.68857, + "std": 13.275673866271973, + "tendency_mean": -1.8538546899999999e-05, + "tendency_std": 1.28533681 + }, + "t_500": { + "max": 321.45254999999997, + "mean": 239.08250427246094, + "min": 187.43496999999999, + "std": 11.874174118041992, + "tendency_mean": 0.00010276903999999999, + "tendency_std": 2.32605101 + }, + "t_5000": { + "max": 265.68290000000002, + "mean": 210.68716430664062, + "min": 173.24173999999999, + "std": 10.002999305725098, + "tendency_mean": -7.8118406799999997e-06, + "tendency_std": 1.0561164599999999 + }, + "t_50000": { + "max": 284.49257999999998, + "mean": 253.16494750976562, + "min": 204.76635999999999, + "std": 13.280254364013672, + "tendency_mean": -1.20824439e-05, + "tendency_std": 1.31440936 + }, + "t_60000": { + "max": 295.51859999999999, + "mean": 261.53656005859375, + "min": 199.28086999999999, + "std": 13.351896286010742, + "tendency_mean": 2.8123591200000002e-05, + "tendency_std": 1.26096422 + }, + "t_7000": { + "max": 265.05079999999998, + "mean": 207.837158203125, + "min": 167.00592, + "std": 11.767593383789062, + "tendency_mean": -9.0926202999999995e-06, + "tendency_std": 1.0752354500000001 + }, + "t_70000": { + "max": 305.18176, + "mean": 267.95596313476562, + "min": 203.57578000000001, + "std": 14.41689395904541, + "tendency_mean": 7.7838616600000004e-05, + "tendency_std": 1.2521926999999999 + }, + "t_85000": { + "max": 317.69085999999999, + "mean": 275.17654418945312, + "min": 211.83623, + "std": 15.198408126831055, + "tendency_mean": 0.000462075543, + "tendency_std": 1.4762626400000001 + }, + "t_92500": { + "max": 324.74954000000002, + "mean": 278.05239868164062, + "min": 212.61693, + "std": 15.770951271057129, + "tendency_mean": 0.00134967154, + "tendency_std": 1.64942268 + }, + "tcc": { + "max": 4.3051899999999996, + "mean": 0.70543080568313599, + "min": -3.2750026999999999, + "std": 0.36660861968994141, + "tendency_mean": -3.3161204899999999e-06, + "tendency_std": 0.25954248299999999 + }, + "tsr": { + "max": 27487506, + "mean": 4194826, + "min": -557693.80000000005, + "std": 5783975, + "tendency_mean": -294.90284200000002, + "tendency_std": 7477880.1200000001 + }, + "tsrc": { + "max": 26153708, + "mean": 5060187, + "min": -39063.144999999997, + "std": 6760642, + "tendency_mean": -844.09644400000002, + "tendency_std": 8755826.8900000006 + }, + "u_100": { + "max": 223.96637999999999, + "mean": 8.0323400497436523, + "min": -184.13701, + "std": 33.989223480224609, + "tendency_mean": -0.00238567897, + "tendency_std": 8.3532698100000005 + }, + "u_1000": { + "max": 168.16025999999999, + "mean": 6.2553310394287109, + "min": -127.53636, + "std": 23.427761077880859, + "tendency_mean": 7.5227586700000001e-05, + "tendency_std": 3.6366616899999999 + }, + "u_10000": { + "max": 97.758949999999999, + "mean": 11.479822158813477, + "min": -66.969695999999999, + "std": 14.68474292755127, + "tendency_mean": 0.00058867434799999999, + "tendency_std": 2.3954573699999999 + }, + "u_100000": { + "max": 40.155436999999999, + "mean": 0.0020402644295245409, + "min": -39.497549999999997, + "std": 6.2907986640930176, + "tendency_mean": 0.000106989361, + "tendency_std": 2.2097482799999999 + }, + "u_15000": { + "max": 117.52265, + "mean": 14.545326232910156, + "min": -60.658965999999999, + "std": 17.239391326904297, + "tendency_mean": 0.00043525944600000002, + "tendency_std": 2.9618022700000002 + }, + "u_2000": { + "max": 138.81279000000001, + "mean": 5.3576936721801758, + "min": -108.87735000000001, + "std": 19.961837768554688, + "tendency_mean": -8.4829472400000001e-05, + "tendency_std": 2.8972327199999999 + }, + "u_20000": { + "max": 136.11878999999999, + "mean": 14.832322120666504, + "min": -78.850814999999997, + "std": 18.749900817871094, + "tendency_mean": 0.00026604778300000001, + "tendency_std": 4.0951867599999998 + }, + "u_25000": { + "max": 132.63382999999999, + "mean": 13.575922012329102, + "min": -85.081810000000004, + "std": 18.649103164672852, + "tendency_mean": 0.00034685229699999998, + "tendency_std": 5.0701336100000001 + }, + "u_3000": { + "max": 121.987144, + "mean": 5.2773394584655762, + "min": -89.188469999999995, + "std": 17.796215057373047, + "tendency_mean": 0.00013574831799999999, + "tendency_std": 2.6027043700000001 + }, + "u_30000": { + "max": 126.30293, + "mean": 11.855074882507324, + "min": -83.334075999999996, + "std": 17.416984558105469, + "tendency_mean": 0.00044307034199999999, + "tendency_std": 5.2546666999999996 + }, + "u_40000": { + "max": 109.809, + "mean": 8.7855014801025391, + "min": -79.164565999999994, + "std": 14.362478256225586, + "tendency_mean": 0.00049068681700000004, + "tendency_std": 4.3568994999999999 + }, + "u_500": { + "max": 182.0907, + "mean": 6.3308963775634766, + "min": -157.51915, + "std": 27.163213729858398, + "tendency_mean": 0.0025141850600000001, + "tendency_std": 4.88743821 + }, + "u_5000": { + "max": 100.84689, + "mean": 6.2774558067321777, + "min": -70.115629999999996, + "std": 15.222061157226562, + "tendency_mean": -0.00031699407699999998, + "tendency_std": 2.3834423999999999 + }, + "u_50000": { + "max": 95.695983999999996, + "mean": 6.5217714309692383, + "min": -67.683689999999999, + "std": 11.949355125427246, + "tendency_mean": 0.00048080821899999999, + "tendency_std": 3.41474418 + }, + "u_60000": { + "max": 78.668940000000006, + "mean": 4.8109569549560547, + "min": -60.847479999999997, + "std": 10.282099723815918, + "tendency_mean": 0.00050834917999999999, + "tendency_std": 2.8545778500000001 + }, + "u_7000": { + "max": 93.92313, + "mean": 7.9226927757263184, + "min": -56.715522999999997, + "std": 14.276371002197266, + "tendency_mean": 0.000192673089, + "tendency_std": 2.33201832 + }, + "u_70000": { + "max": 67.542140000000003, + "mean": 3.4032742977142334, + "min": -62.723464999999997, + "std": 9.1440048217773438, + "tendency_mean": 0.00041424865099999998, + "tendency_std": 2.62225975 + }, + "u_85000": { + "max": 67.380780000000001, + "mean": 1.4681116342544556, + "min": -69.614716000000001, + "std": 8.3893146514892578, + "tendency_mean": 0.00056378781700000004, + "tendency_std": 2.7580882999999998 + }, + "u_92500": { + "max": 59.099589999999999, + "mean": 0.66271692514419556, + "min": -58.333472999999998, + "std": 8.2235603332519531, + "tendency_mean": 0.00049054267199999998, + "tendency_std": 2.9271307000000002 + }, + "v_100": { + "max": 193.99387999999999, + "mean": -0.09301469475030899, + "min": -190.46520000000001, + "std": 14.136162757873535, + "tendency_mean": 0.0043959974700000003, + "tendency_std": 9.3223918999999995 + }, + "v_1000": { + "max": 137.59280000000001, + "mean": -0.0056085358373820782, + "min": -140.97489999999999, + "std": 10.609649658203125, + "tendency_mean": 0.000157996531, + "tendency_std": 3.97109376 + }, + "v_10000": { + "max": 68.071129999999997, + "mean": 0.0032623808365315199, + "min": -67.309524999999994, + "std": 7.7878108024597168, + "tendency_mean": -9.8385837000000004e-05, + "tendency_std": 2.7389793400000002 + }, + "v_100000": { + "max": 38.972403999999997, + "mean": 0.17362920939922333, + "min": -41.477730000000001, + "std": 5.238987922668457, + "tendency_mean": 0.00025443271699999997, + "tendency_std": 2.4894197500000002 + }, + "v_15000": { + "max": 92.86712, + "mean": -0.034894410520792007, + "min": -89.464164999999994, + "std": 10.123950958251953, + "tendency_mean": -0.000112178783, + "tendency_std": 3.4866348700000001 + }, + "v_2000": { + "max": 115.729485, + "mean": 5.7199886214220896e-06, + "min": -120.96432, + "std": 8.9707822799682617, + "tendency_mean": -0.0010111787599999999, + "tendency_std": 3.2023117800000001 + }, + "v_20000": { + "max": 116.39874, + "mean": -0.014768774621188641, + "min": -109.69841, + "std": 12.595921516418457, + "tendency_mean": -0.000169981918, + "tendency_std": 4.9910051299999996 + }, + "v_25000": { + "max": 118.32786, + "mean": -0.0071123773232102394, + "min": -117.30996, + "std": 13.707006454467773, + "tendency_mean": -0.00012936635599999999, + "tendency_std": 6.2316363399999997 + }, + "v_3000": { + "max": 103.92847399999999, + "mean": 0.0050326506607234478, + "min": -105.105446, + "std": 7.985318660736084, + "tendency_mean": -0.00090113629599999998, + "tendency_std": 2.8794884500000002 + }, + "v_30000": { + "max": 108.13975499999999, + "mean": -0.0053521147929131985, + "min": -113.10843, + "std": 13.222649574279785, + "tendency_mean": -0.00014268412299999999, + "tendency_std": 6.4496358899999997 + }, + "v_40000": { + "max": 97.313379999999995, + "mean": -0.019304752349853516, + "min": -89.990070000000003, + "std": 10.84925365447998, + "tendency_mean": -0.000127236665, + "tendency_std": 5.36130891 + }, + "v_500": { + "max": 178.90351999999999, + "mean": -0.0096246562898159027, + "min": -172.73587000000001, + "std": 12.076651573181152, + "tendency_mean": 0.000876248798, + "tendency_std": 5.40261631 + }, + "v_5000": { + "max": 88.963769999999997, + "mean": 0.0090648690238595009, + "min": -78.382199999999997, + "std": 7.0326056480407715, + "tendency_mean": -0.00043104236099999999, + "tendency_std": 2.6659460899999998 + }, + "v_50000": { + "max": 81.334854000000007, + "mean": -0.017805138602852821, + "min": -76.530074999999997, + "std": 8.8256978988647461, + "tendency_mean": -2.2528751100000001e-05, + "tendency_std": 4.1944645500000002 + }, + "v_60000": { + "max": 73.983504999999994, + "mean": -0.025567071512341499, + "min": -61.743110000000001, + "std": 7.4922566413879395, + "tendency_mean": -3.9267812300000001e-06, + "tendency_std": 3.4386224300000001 + }, + "v_7000": { + "max": 78.728165000000004, + "mean": 0.013180352747440338, + "min": -65.792829999999995, + "std": 6.9217257499694824, + "tendency_mean": -0.00016701883099999999, + "tendency_std": 2.6325254 + }, + "v_70000": { + "max": 67.903464999999997, + "mean": 0.012522663921117783, + "min": -64.998930000000001, + "std": 6.625279426574707, + "tendency_mean": -3.1911216899999999e-06, + "tendency_std": 3.05781795 + }, + "v_85000": { + "max": 70.870670000000004, + "mean": 0.1304398775100708, + "min": -63.374625999999999, + "std": 6.1808795928955078, + "tendency_mean": 0.00010768909099999999, + "tendency_std": 3.14060359 + }, + "v_92500": { + "max": 61.81277, + "mean": 0.1929241269826889, + "min": -59.021275000000003, + "std": 6.4264640808105469, + "tendency_mean": 0.00023123223400000001, + "tendency_std": 3.36883723 + }, + "vo_100": { + "max": 0.00068711130000000001, + "mean": -1.2818295545002911e-06, + "min": -0.00069142879999999996, + "std": 2.9744123821728863e-05, + "tendency_mean": -5.2095875400000002e-10, + "tendency_std": 1.6012069099999999e-05 + }, + "vo_1000": { + "max": 0.00036430012999999999, + "mean": -1.5500266954404651e-06, + "min": -0.0003241241, + "std": 2.2719475964549929e-05, + "tendency_mean": -2.9680787499999998e-10, + "tendency_std": 7.0969675400000004e-06 + }, + "vo_10000": { + "max": 0.00025113301999999998, + "mean": -7.9686861909067375e-07, + "min": -0.00031327379999999998, + "std": 1.9502518625813536e-05, + "tendency_mean": 9.6777205100000006e-11, + "tendency_std": 9.0001709999999997e-06 + }, + "vo_100000": { + "max": 0.00041510473000000003, + "mean": 1.5382498474991735e-07, + "min": -0.00037913326999999998, + "std": 2.44894308707444e-05, + "tendency_mean": -7.2830683900000006e-11, + "tendency_std": 1.78130313e-05 + }, + "vo_15000": { + "max": 0.00038612371999999999, + "mean": -5.7420862731305533e-07, + "min": -0.00036060939999999999, + "std": 2.8239079256309196e-05, + "tendency_mean": 8.1390601699999998e-11, + "tendency_std": 1.4393016599999999e-05 + }, + "vo_2000": { + "max": 0.00031946469, + "mean": -1.4645942201241269e-06, + "min": -0.00032030220000000002, + "std": 1.9893635908374563e-05, + "tendency_mean": -9.5778469900000003e-11, + "tendency_std": 5.92017734e-06 + }, + "vo_20000": { + "max": 0.00049568620000000002, + "mean": -4.1039447751245461e-07, + "min": -0.00054162790000000002, + "std": 3.9628928789170459e-05, + "tendency_mean": 7.7190223300000003e-11, + "tendency_std": 2.4516104600000001e-05 + }, + "vo_25000": { + "max": 0.00061463849999999999, + "mean": -3.0235239023568283e-07, + "min": -0.00067104614999999995, + "std": 4.9871730880113319e-05, + "tendency_mean": 9.3509506699999998e-11, + "tendency_std": 3.6925112599999999e-05 + }, + "vo_3000": { + "max": 0.00029074712000000001, + "mean": -1.3526175735023571e-06, + "min": -0.00027541278000000003, + "std": 1.8020313291344792e-05, + "tendency_mean": -3.8902032700000003e-11, + "tendency_std": 5.6253689099999996e-06 + }, + "vo_30000": { + "max": 0.00067318279999999998, + "mean": -2.3988690145415603e-07, + "min": -0.00068743114000000005, + "std": 5.3281848522601649e-05, + "tendency_mean": 5.4807331100000003e-11, + "tendency_std": 4.3232560999999997e-05 + }, + "vo_40000": { + "max": 0.00056641565999999998, + "mean": -1.5469552749891591e-07, + "min": -0.00052390219999999999, + "std": 4.5852288167225197e-05, + "tendency_mean": -1.2146871299999999e-12, + "tendency_std": 3.8418720899999998e-05 + }, + "vo_500": { + "max": 0.00060223090000000002, + "mean": -1.530355007162143e-06, + "min": -0.00056122569999999996, + "std": 2.5089355403906666e-05, + "tendency_mean": -8.1191502399999994e-11, + "tendency_std": 9.4038909100000001e-06 + }, + "vo_5000": { + "max": 0.00023848890000000001, + "mean": -1.1486886251077522e-06, + "min": -0.00023743724, + "std": 1.6184549167519435e-05, + "tendency_mean": 8.0234656599999997e-11, + "tendency_std": 6.0436085300000003e-06 + }, + "vo_50000": { + "max": 0.00044997702999999999, + "mean": -7.5666427790110902e-08, + "min": -0.00040656205999999999, + "std": 3.6192916013533249e-05, + "tendency_mean": -2.5703955000000001e-11, + "tendency_std": 2.9164995100000001e-05 + }, + "vo_60000": { + "max": 0.00043306226, + "mean": 3.9789906480791615e-08, + "min": -0.00039931165999999998, + "std": 3.0602495826315135e-05, + "tendency_mean": -5.9017397799999997e-11, + "tendency_std": 2.3251367199999999e-05 + }, + "vo_7000": { + "max": 0.00024372012000000001, + "mean": -9.8537043413671199e-07, + "min": -0.00028238420000000001, + "std": 1.6364561815862544e-05, + "tendency_mean": 9.9229753299999999e-11, + "tendency_std": 7.0888442400000002e-06 + }, + "vo_70000": { + "max": 0.00054302246999999996, + "mean": 2.033883959029481e-07, + "min": -0.00045018728000000002, + "std": 2.8758262487826869e-05, + "tendency_mean": -4.7320113300000002e-11, + "tendency_std": 2.0957142899999999e-05 + }, + "vo_85000": { + "max": 0.00068120175000000002, + "mean": 1.5975916767274612e-07, + "min": -0.00058197870000000005, + "std": 3.0399089155253023e-05, + "tendency_mean": -6.6168548900000002e-11, + "tendency_std": 2.3048769900000002e-05 + }, + "vo_92500": { + "max": 0.00062943785000000002, + "mean": 1.4541262771672336e-07, + "min": -0.00056680816000000004, + "std": 3.194465534761548e-05, + "tendency_mean": -4.28992138e-11, + "tendency_std": 2.52892026e-05 + }, + "w_100": { + "max": 0.014582065, + "mean": -3.5805519473797176e-06, + "min": -0.013002554, + "std": 0.00037181892548687756, + "tendency_mean": -3.2759907400000002e-07, + "tendency_std": 0.00056762367599999997 + }, + "w_1000": { + "max": 0.10181906, + "mean": -4.3542597268242389e-05, + "min": -0.11003586999999999, + "std": 0.0019001071341335773, + "tendency_mean": -1.55614034e-06, + "tendency_std": 0.0025326601099999999 + }, + "w_10000": { + "max": 0.57757999999999998, + "mean": -1.1339187039993703e-05, + "min": -1.0679354999999999, + "std": 0.015411750413477421, + "tendency_mean": -8.62978716e-06, + "tendency_std": 0.015728862699999999 + }, + "w_100000": { + "max": 3.4876290000000001, + "mean": 0.01239265501499176, + "min": -2.3794217, + "std": 0.082953087985515594, + "tendency_mean": -6.4421831400000005e-05, + "tendency_std": 0.050804086999999998 + }, + "w_15000": { + "max": 1.2802703, + "mean": 2.1207799363764934e-05, + "min": -2.0269370000000002, + "std": 0.033592168241739273, + "tendency_mean": -1.1919855700000001e-05, + "tendency_std": 0.032319715800000003 + }, + "w_2000": { + "max": 0.17515035000000001, + "mean": -5.7000215747393668e-05, + "min": -0.19839412000000001, + "std": 0.003107759403064847, + "tendency_mean": -2.4495006399999998e-06, + "tendency_std": 0.0039705556599999997 + }, + "w_20000": { + "max": 1.7123986, + "mean": 1.0659641702659428e-05, + "min": -2.6305285, + "std": 0.058755502104759216, + "tendency_mean": -1.4919891599999999e-05, + "tendency_std": 0.052424933899999998 + }, + "w_25000": { + "max": 2.0645501999999998, + "mean": 4.9225109250983223e-05, + "min": -2.7588723000000002, + "std": 0.083717852830886841, + "tendency_mean": -1.7607649499999999e-05, + "tendency_std": 0.073165986599999996 + }, + "w_3000": { + "max": 0.25339993999999999, + "mean": -5.9799775044666603e-05, + "min": -0.26056573, + "std": 0.0042126188054680824, + "tendency_mean": -3.30867047e-06, + "tendency_std": 0.0052065766200000002 + }, + "w_30000": { + "max": 2.3855765, + "mean": 0.00015658812480978668, + "min": -2.6972523000000002, + "std": 0.10578497499227524, + "tendency_mean": -2.0140296100000002e-05, + "tendency_std": 0.096162315100000006 + }, + "w_40000": { + "max": 3.1767487999999999, + "mean": 0.00035662477603182197, + "min": -3.3970039999999999, + "std": 0.13639713823795319, + "tendency_mean": -2.5053872099999999e-05, + "tendency_std": 0.13294577699999999 + }, + "w_500": { + "max": 0.062111653000000003, + "mean": -2.7555350243346766e-05, + "min": -0.063782950000000005, + "std": 0.001208994654007256, + "tendency_mean": -1.0757152e-06, + "tendency_std": 0.00167653088 + }, + "w_5000": { + "max": 0.41270494000000002, + "mean": -5.3324438340496272e-05, + "min": -0.38408493999999999, + "std": 0.0064498595893383026, + "tendency_mean": -4.8737638300000001e-06, + "tendency_std": 0.0075656806899999998 + }, + "w_50000": { + "max": 4.3311529999999996, + "mean": 0.00043865299085155129, + "min": -4.2111052999999998, + "std": 0.14864742755889893, + "tendency_mean": -2.9813803299999999e-05, + "tendency_std": 0.14959335600000001 + }, + "w_60000": { + "max": 5.0073619999999996, + "mean": 0.00050252286018803716, + "min": -4.9540806000000002, + "std": 0.15072277188301086, + "tendency_mean": -3.4458252800000003e-05, + "tendency_std": 0.154034956 + }, + "w_7000": { + "max": 0.50165919999999997, + "mean": -3.9379450754495338e-05, + "min": -0.45983859999999999, + "std": 0.009107847698032856, + "tendency_mean": -6.4451662800000002e-06, + "tendency_std": 0.010041423799999999 + }, + "w_70000": { + "max": 5.7401359999999997, + "mean": 0.0021713741589337587, + "min": -5.6364093000000004, + "std": 0.15029902756214142, + "tendency_mean": -3.9007608900000002e-05, + "tendency_std": 0.15133581500000001 + }, + "w_85000": { + "max": 4.5582310000000001, + "mean": 0.0086234547197818756, + "min": -4.080012, + "std": 0.14036460220813751, + "tendency_mean": -4.8075497800000002e-05, + "tendency_std": 0.13034441199999999 + }, + "w_92500": { + "max": 3.7909079000000001, + "mean": 0.011709229089319706, + "min": -2.8785672, + "std": 0.11522987484931946, + "tendency_mean": -5.5663601199999999e-05, + "tendency_std": 0.093993277900000005 + }, + "z_100": { + "max": 496567.78000000003, + "mean": 462808.75, + "min": 406875.53000000003, + "std": 14963.015625, + "tendency_mean": 0.15845503799999999, + "tendency_std": 1497.5199299999999 + }, + "z_1000": { + "max": 316533.53000000003, + "mean": 299615.15625, + "min": 259841.79999999999, + "std": 9563.99609375, + "tendency_mean": 0.094407119299999995, + "tendency_std": 564.23139900000001 + }, + "z_10000": { + "max": 167507.81, + "mean": 157299.203125, + "min": 136547.81, + "std": 5964.66552734375, + "tendency_mean": 0.100855414, + "tendency_std": 273.95979299999999 + }, + "z_100000": { + "max": 6053.4160000000002, + "mean": 687.061767578125, + "min": -8928.598, + "std": 1191.3887939453125, + "tendency_mean": 0.040237724599999997, + "tendency_std": 271.02085 + }, + "z_15000": { + "max": 143236.56, + "mean": 132898.375, + "min": 113948.19, + "std": 6272.06640625, + "tendency_mean": 0.096168242500000001, + "tendency_std": 291.120499 + }, + "z_2000": { + "max": 268846, + "mean": 255225.375, + "min": 222145.62, + "std": 7879.7607421875, + "tendency_mean": 0.090435026000000002, + "tendency_std": 438.908117 + }, + "z_20000": { + "max": 124831.3, + "mean": 115204.234375, + "min": 97517.766000000003, + "std": 6189.51220703125, + "tendency_mean": 0.092951886100000006, + "tendency_std": 334.29344200000003 + }, + "z_25000": { + "max": 109869.08, + "mean": 101204.390625, + "min": 84506.266000000003, + "std": 5789.27880859375, + "tendency_mean": 0.093228084200000005, + "tendency_std": 370.35356899999999 + }, + "z_3000": { + "max": 241449.14000000001, + "mean": 229939.734375, + "min": 200521.56, + "std": 6968.5791015625, + "tendency_mean": 0.094772491900000005, + "tendency_std": 379.93283000000002 + }, + "z_30000": { + "max": 97070.369999999995, + "mean": 89447.765625, + "min": 73567.945000000007, + "std": 5258.80810546875, + "tendency_mean": 0.093949289800000002, + "tendency_std": 373.786022 + }, + "z_40000": { + "max": 76398.335999999996, + "mean": 70032.015625, + "min": 55468.086000000003, + "std": 4243.91064453125, + "tendency_mean": 0.095784144900000007, + "tendency_std": 334.18693000000002 + }, + "z_500": { + "max": 366629.25, + "mean": 345898.21875, + "min": 300532.90000000002, + "std": 11225.1708984375, + "tendency_mean": 0.124999706, + "tendency_std": 747.18628999999999 + }, + "z_5000": { + "max": 208595.45000000001, + "mean": 198723.4375, + "min": 173627.42000000001, + "std": 6076.25, + "tendency_mean": 0.098779101600000002, + "tendency_std": 318.68890599999997 + }, + "z_50000": { + "max": 59503.042999999998, + "mean": 54159.7578125, + "min": 41297.199999999997, + "std": 3423.122802734375, + "tendency_mean": 0.096859975799999998, + "tendency_std": 293.97826400000002 + }, + "z_60000": { + "max": 45258.120000000003, + "mean": 40675.6640625, + "min": 29268.950000000001, + "std": 2764.9619140625, + "tendency_mean": 0.096514179300000003, + "tendency_std": 269.48986400000001 + }, + "z_7000": { + "max": 188266.88, + "mean": 178509.984375, + "min": 155837.04999999999, + "std": 5809.5302734375, + "tendency_mean": 0.098716163699999998, + "tendency_std": 287.78437000000002 + }, + "z_70000": { + "max": 33276.464999999997, + "mean": 28935, + "min": 18127.866999999998, + "std": 2217.778564453125, + "tendency_mean": 0.094863328900000002, + "tendency_std": 258.32288899999998 + }, + "z_85000": { + "max": 18554.504000000001, + "mean": 13724.701171875, + "min": 3921.4450000000002, + "std": 1575.7747802734375, + "tendency_mean": 0.085662557400000006, + "tendency_std": 258.95755000000003 + }, + "z_92500": { + "max": 12096.938, + "mean": 6976.41845703125, + "min": -2718.5, + "std": 1344.78076171875, + "tendency_mean": 0.067373742700000003, + "tendency_std": 264.57703900000001 + } + } + }, + "stac_extensions": [ + "https://stac-extensions.github.io/datacube/v2.2.0/schema.json", + "https://stac-extensions.github.io/alternate-assets/v1.2.0/schema.json", + "https://stac-extensions.github.io/xarray-assets/v1.0.0/schema.json" + ], + "stac_version": "1.0.0", + "type": "Feature" +} diff --git a/stac/jsons/ifs-fesom_ocean_elem.json b/stac/jsons/ifs-fesom_ocean_elem.json new file mode 100644 index 000000000..354253b18 --- /dev/null +++ b/stac/jsons/ifs-fesom_ocean_elem.json @@ -0,0 +1,855 @@ +{ + "assets": { + "ocean_elem/ocean_elem_{year}.zarr": { + "description": "Ocean element component of the AWI model using OpenIFS (CY43R3 version). Configured on a regular grid of 400 (longitude) × 192 (latitude) points", + "href": "ocean_elem/ocean_elem_{year}.zarr", + "inodes": "154,452", + "locations": [ + "juwels_booster" + ], + "roles": [ + "data" + ], + "size": "4.4 TB", + "title": "ocean_elem/ocean_elem_{year}.zarr", + "type": "application/zarr" + } + }, + "bbox": [ + -180, + -90, + 180, + 90 + ], + "geometry": { + "coordinates": [ + [ + [ + -180, + -90 + ], + [ + -180, + 90 + ], + [ + 180, + 90 + ], + [ + 180, + -90 + ], + [ + -180, + -90 + ] + ] + ], + "type": "Polygon" + }, + "id": "weathergen.atmo.AWI IFS-FESOM (OCEAN ELEM)", + "properties": { + "description": "The ocean element component of the AWI IFS-FESOM coupled climate dataset that integrates atmospheric simulations from the IFS model with ocean and sea ice dynamics from the FESOM framework, capturing detailed Earth system interactions at high resolution.", + "end_datetime": "2209-12-31T23:59:59", + "fixed_timesteps": "True", + "frequency": "1 day", + "keywords": [ + "coupled model", + "climate simulation", + "ocean", + "ocean element" + ], + "name": "AWI IFS-FESOM (OCEAN ELEM)", + "processing_level": "model output", + "providers": [ + { + "name": "AWI", + "roles": [ + "provider" + ], + "url": "https://www.awi.de" + } + ], + "start_datetime": "2000-01-01T00:00:00", + "title": "AWI IFS-FESOM Coupled Climate Model (Ocean element)", + "unique_id": "15", + "variables": { + "tx_sur": { + "max": 3.8045974, + "mean": 0.012413257732987404, + "min": -4.283493, + "std": 0.13546857237815857, + "tendency_mean": 1.2932041399999999e-06, + "tendency_std": 0.112256086 + }, + "ty_sur": { + "max": 3.2065999999999999, + "mean": 0.0014986582100391388, + "min": -4.0582279999999997, + "std": 0.11496457457542419, + "tendency_mean": 7.9141153799999998e-07, + "tendency_std": 0.109235074 + }, + "u_107": { + "max": 2.0444855999999998, + "mean": 0.011934586800634861, + "min": -1.8306454000000001, + "std": 0.10360421985387802, + "tendency_mean": 4.8979386000000002e-07, + "tendency_std": 0.022717426799999999 + }, + "u_1110": { + "max": 0.81059219999999998, + "mean": 0.0016205976717174053, + "min": -0.95739660000000004, + "std": 0.034241445362567902, + "tendency_mean": -2.1150730599999999e-08, + "tendency_std": 0.0088474533400000003 + }, + "u_125": { + "max": 1.7590083999999999, + "mean": 0.014339592307806015, + "min": -1.8069223000000001, + "std": 0.10032349824905396, + "tendency_mean": 4.5508925499999998e-07, + "tendency_std": 0.0210596797 + }, + "u_1255": { + "max": 0.76575899999999997, + "mean": 0.0016530966386198997, + "min": -0.96650016000000005, + "std": 0.03183373436331749, + "tendency_mean": -2.00954922e-08, + "tendency_std": 0.0084759490700000006 + }, + "u_1415": { + "max": 0.81878830000000002, + "mean": 0.0016257022507488728, + "min": -0.92615955999999999, + "std": 0.029296832159161568, + "tendency_mean": -2.3271694100000002e-08, + "tendency_std": 0.0080344807899999995 + }, + "u_147": { + "max": 1.6732571999999999, + "mean": 0.016497921198606491, + "min": -1.7432113, + "std": 0.10111156851053238, + "tendency_mean": 3.5582739199999999e-07, + "tendency_std": 0.0191869021 + }, + "u_15": { + "max": 3.3929497999999998, + "mean": -0.015702081844210625, + "min": -2.5844307, + "std": 0.17691950500011444, + "tendency_mean": -5.5823164000000001e-07, + "tendency_std": 0.056101258199999997 + }, + "u_1600": { + "max": 0.75972530000000005, + "mean": 0.0014280588366091251, + "min": -0.89592640000000001, + "std": 0.02722606249153614, + "tendency_mean": -1.4863738799999999e-08, + "tendency_std": 0.0075580021900000004 + }, + "u_175": { + "max": 1.6008948999999999, + "mean": 0.017170889303088188, + "min": -1.6728932999999999, + "std": 0.10410355776548386, + "tendency_mean": 2.1894224200000001e-07, + "tendency_std": 0.017166920400000001 + }, + "u_1810": { + "max": 0.70052970000000003, + "mean": 0.0012494398979470134, + "min": -1.0664256000000001, + "std": 0.025550022721290588, + "tendency_mean": 4.9229897600000004e-09, + "tendency_std": 0.00716947278 + }, + "u_2": { + "max": 3.7002554000000001, + "mean": -0.020640794187784195, + "min": -2.9025717000000002, + "std": 0.20200459659099579, + "tendency_mean": -2.22174631e-07, + "tendency_std": 0.071491434199999995 + }, + "u_2035": { + "max": 0.6821933, + "mean": 0.0011284278007224202, + "min": -1.5019529, + "std": 0.023759884759783745, + "tendency_mean": 1.12446451e-08, + "tendency_std": 0.0067820154599999996 + }, + "u_210": { + "max": 1.3891168, + "mean": 0.01494122575968504, + "min": -1.6616204000000001, + "std": 0.096715040504932404, + "tendency_mean": 1.28819354e-07, + "tendency_std": 0.015360237699999999 + }, + "u_2275": { + "max": 0.64605844000000001, + "mean": 0.00099690037313848734, + "min": -0.77180207000000001, + "std": 0.022116774693131447, + "tendency_mean": 9.7607744299999993e-09, + "tendency_std": 0.0064103881299999999 + }, + "u_25": { + "max": 3.2544903999999999, + "mean": -0.0096774566918611526, + "min": -2.4951612999999999, + "std": 0.15911765396595001, + "tendency_mean": -5.2939965799999999e-07, + "tendency_std": 0.046547206000000001 + }, + "u_2525": { + "max": 0.64016949999999995, + "mean": 0.00097234209533780813, + "min": -0.68484469999999997, + "std": 0.020654674619436264, + "tendency_mean": 1.9852388100000001e-08, + "tendency_std": 0.0061113282000000001 + }, + "u_255": { + "max": 1.32687, + "mean": 0.011007098481059074, + "min": -1.5269358, + "std": 0.079445309937000275, + "tendency_mean": 1.36181718e-07, + "tendency_std": 0.0138918866 + }, + "u_2775": { + "max": 0.67766320000000002, + "mean": 0.00082349340664222836, + "min": -0.76975749999999998, + "std": 0.019162526354193687, + "tendency_mean": 3.1886918299999998e-08, + "tendency_std": 0.0058103875299999998 + }, + "u_3025": { + "max": 0.63887720000000003, + "mean": 0.00060537137323990464, + "min": -0.76509464000000005, + "std": 0.017585473135113716, + "tendency_mean": 4.4479180400000002e-08, + "tendency_std": 0.0054273055399999999 + }, + "u_310": { + "max": 1.3053319999999999, + "mean": 0.0071308780461549759, + "min": -1.4851416, + "std": 0.065274268388748169, + "tendency_mean": 1.49456377e-07, + "tendency_std": 0.012693951199999999 + }, + "u_3275": { + "max": 0.63741539999999997, + "mean": 0.00034477506414987147, + "min": -0.793736, + "std": 0.016074825078248978, + "tendency_mean": 4.7916887399999999e-08, + "tendency_std": 0.0050039001299999998 + }, + "u_35": { + "max": 3.0558964999999998, + "mean": -0.0045551913790404797, + "min": -2.4066795999999999, + "std": 0.14473904669284821, + "tendency_mean": -1.80900954e-07, + "tendency_std": 0.038570607399999998 + }, + "u_3525": { + "max": 0.63713025999999995, + "mean": 5.0320195441599935e-05, + "min": -0.93123626999999998, + "std": 0.015048585832118988, + "tendency_mean": 4.1506635000000002e-08, + "tendency_std": 0.0045939062799999998 + }, + "u_375": { + "max": 1.1505624999999999, + "mean": 0.0038376031443476677, + "min": -1.3985734000000001, + "std": 0.058040555566549301, + "tendency_mean": 1.33374894e-07, + "tendency_std": 0.011759933 + }, + "u_3775": { + "max": 0.63101779999999996, + "mean": -3.8458725612144917e-05, + "min": -0.91588247, + "std": 0.013628973625600338, + "tendency_mean": 3.3329090300000001e-08, + "tendency_std": 0.0041779013 + }, + "u_4025": { + "max": 0.63136119999999996, + "mean": 6.3934421632438898e-05, + "min": -0.81911456999999999, + "std": 0.011980430223047733, + "tendency_mean": 1.5557726200000001e-08, + "tendency_std": 0.00372892167 + }, + "u_4275": { + "max": 0.42479487999999999, + "mean": 2.4087945348583162e-05, + "min": -0.76556057, + "std": 0.010402435436844826, + "tendency_mean": 6.1731545700000002e-09, + "tendency_std": 0.0032794748800000001 + }, + "u_45": { + "max": 2.9591017000000002, + "mean": -0.00081446138210594654, + "min": -2.2729607000000001, + "std": 0.13438086211681366, + "tendency_mean": 6.0715849199999997e-08, + "tendency_std": 0.033585018500000001 + }, + "u_450": { + "max": 1.2942317999999999, + "mean": 0.0026369555853307247, + "min": -1.091459, + "std": 0.053686633706092834, + "tendency_mean": 1.3898609299999999e-07, + "tendency_std": 0.011075290999999999 + }, + "u_4525": { + "max": 0.57067466, + "mean": -5.3932162700220942e-06, + "min": -0.90366840000000004, + "std": 0.0085462331771850586, + "tendency_mean": 3.3315045000000001e-09, + "tendency_std": 0.0026813210600000002 + }, + "u_4775": { + "max": 0.46733794000000001, + "mean": 1.4672230463474989e-05, + "min": -0.64535520000000002, + "std": 0.0070096231065690517, + "tendency_mean": 1.9324997799999999e-09, + "tendency_std": 0.0021486314800000001 + }, + "u_5025": { + "max": 0.35270760000000001, + "mean": 5.7334632401762065e-06, + "min": -0.3397425, + "std": 0.0055397315882146358, + "tendency_mean": -2.9959612399999999e-10, + "tendency_std": 0.00167806686 + }, + "u_5275": { + "max": 0.22920768, + "mean": 5.1837669161614031e-06, + "min": -0.22456239, + "std": 0.0039489474147558212, + "tendency_mean": 9.0583631499999997e-10, + "tendency_std": 0.00119492225 + }, + "u_535": { + "max": 1.1145213, + "mean": 0.0034559974446892738, + "min": -1.0825936, + "std": 0.04974823072552681, + "tendency_mean": 1.4846617999999999e-07, + "tendency_std": 0.010580653799999999 + }, + "u_55": { + "max": 2.7377775, + "mean": 0.0020940911490470171, + "min": -2.2171401999999998, + "std": 0.12675102055072784, + "tendency_mean": 1.8939531e-07, + "tendency_std": 0.0302729178 + }, + "u_5525": { + "max": 0.18695634999999999, + "mean": -1.9348926798556931e-06, + "min": -0.22596005, + "std": 0.002579973079264164, + "tendency_mean": 1.9799140200000002e-09, + "tendency_std": 0.00078468561699999995 + }, + "u_5825": { + "max": 0.16958144, + "mean": -9.5788172984612174e-06, + "min": -0.18618456, + "std": 0.0013529193820431828, + "tendency_mean": 4.15476784e-10, + "tendency_std": 0.00038154179700000001 + }, + "u_6125": { + "max": 0, + "mean": 0, + "min": 0, + "std": 0, + "tendency_mean": 0, + "tendency_std": 0 + }, + "u_630": { + "max": 1.0550592000000001, + "mean": 0.0034204740077257156, + "min": -1.0908420000000001, + "std": 0.046106941998004913, + "tendency_mean": 1.15512194e-07, + "tendency_std": 0.010204804 + }, + "u_65": { + "max": 2.5696015000000001, + "mean": 0.0044489167630672455, + "min": -2.2163409999999999, + "std": 0.12076697498559952, + "tendency_mean": 2.4642524999999999e-07, + "tendency_std": 0.028075954199999999 + }, + "u_7": { + "max": 3.6100919999999999, + "mean": -0.019474577158689499, + "min": -2.765968, + "std": 0.19344387948513031, + "tendency_mean": -3.9530957199999999e-07, + "tendency_std": 0.066001235800000002 + }, + "u_735": { + "max": 0.92577803000000003, + "mean": 0.0024481220170855522, + "min": -0.99649613999999997, + "std": 0.042936950922012329, + "tendency_mean": 6.1802719099999996e-08, + "tendency_std": 0.0098879662699999996 + }, + "u_75": { + "max": 2.3955218999999999, + "mean": 0.0064959796145558357, + "min": -2.083869, + "std": 0.11574187129735947, + "tendency_mean": 3.0404992500000002e-07, + "tendency_std": 0.026488247900000001 + }, + "u_85": { + "max": 2.3090484, + "mean": 0.0083420649170875549, + "min": -1.927494, + "std": 0.11137962341308594, + "tendency_mean": 3.6647178599999999e-07, + "tendency_std": 0.025237170900000001 + }, + "u_850": { + "max": 0.86133970000000004, + "mean": 0.002080702455714345, + "min": -0.82946604000000002, + "std": 0.039821598678827286, + "tendency_mean": 1.8974809599999998e-08, + "tendency_std": 0.0095722822899999994 + }, + "u_95": { + "max": 2.1052729999999999, + "mean": 0.010011048056185246, + "min": -1.8130457, + "std": 0.10760815441608429, + "tendency_mean": 4.3761478099999999e-07, + "tendency_std": 0.024190015200000001 + }, + "u_975": { + "max": 0.82047000000000003, + "mean": 0.0018841188866645098, + "min": -0.82140650000000004, + "std": 0.03689362108707428, + "tendency_mean": -7.6553928200000005e-09, + "tendency_std": 0.0092261044800000004 + }, + "v_107": { + "max": 1.8376049000000001, + "mean": 0.0021404412109404802, + "min": -1.8316836000000001, + "std": 0.079163737595081329, + "tendency_mean": -2.9336253499999999e-08, + "tendency_std": 0.024546719700000001 + }, + "v_1110": { + "max": 0.82129717000000002, + "mean": -0.00013379332085605711, + "min": -0.84790109999999996, + "std": 0.028232414275407791, + "tendency_mean": -6.3197496099999999e-09, + "tendency_std": 0.010040906299999999 + }, + "v_125": { + "max": 1.9014046, + "mean": 0.0026531077455729246, + "min": -1.6442711000000001, + "std": 0.075025148689746857, + "tendency_mean": 1.7843078999999998e-08, + "tendency_std": 0.022718663399999998 + }, + "v_1255": { + "max": 0.7633065, + "mean": -0.00019301949942018837, + "min": -0.79565620000000004, + "std": 0.02645212784409523, + "tendency_mean": -4.2338810199999999e-09, + "tendency_std": 0.0095254739799999991 + }, + "v_1415": { + "max": 0.81779312999999998, + "mean": -0.0001688509073574096, + "min": -0.87837549999999998, + "std": 0.024581205099821091, + "tendency_mean": 5.9470359699999998e-09, + "tendency_std": 0.0089307379199999991 + }, + "v_147": { + "max": 1.6932275999999999, + "mean": 0.0031408765353262424, + "min": -1.6593593, + "std": 0.069875970482826233, + "tendency_mean": 7.1817727800000004e-08, + "tendency_std": 0.0205752903 + }, + "v_15": { + "max": 2.4277329999999999, + "mean": -0.002354219788685441, + "min": -2.6242467999999999, + "std": 0.11449543386697769, + "tendency_mean": -2.2796324099999999e-07, + "tendency_std": 0.057375915700000002 + }, + "v_1600": { + "max": 0.92849389999999998, + "mean": -0.00022771942894905806, + "min": -0.75134769999999995, + "std": 0.022934954613447189, + "tendency_mean": 1.53592836e-08, + "tendency_std": 0.0083248609399999996 + }, + "v_175": { + "max": 1.539504, + "mean": 0.0033360728994011879, + "min": -1.7167543000000001, + "std": 0.064971014857292175, + "tendency_mean": 5.8776705299999998e-08, + "tendency_std": 0.018506046000000002 + }, + "v_1810": { + "max": 0.93508720000000001, + "mean": -0.00020466132264118642, + "min": -0.88186120000000001, + "std": 0.021731702610850334, + "tendency_mean": 1.3973203000000001e-08, + "tendency_std": 0.0078300365300000008 + }, + "v_2": { + "max": 2.7932899999999998, + "mean": -0.00071975425817072392, + "min": -3.0944414, + "std": 0.1263405829668045, + "tendency_mean": -1.0098529099999999e-06, + "tendency_std": 0.072502208900000004 + }, + "v_2035": { + "max": 0.84283319999999995, + "mean": -0.00014178291894495487, + "min": -0.70925534000000001, + "std": 0.020434178411960602, + "tendency_mean": 1.18299139e-08, + "tendency_std": 0.0073489357700000004 + }, + "v_210": { + "max": 1.3518203, + "mean": 0.0029038796201348305, + "min": -1.5812557, + "std": 0.059990733861923218, + "tendency_mean": 3.5743277799999997e-08, + "tendency_std": 0.016573435399999999 + }, + "v_2275": { + "max": 0.83102699999999996, + "mean": -8.7288739450741559e-05, + "min": -0.72645389999999999, + "std": 0.019267592579126358, + "tendency_mean": 1.26023282e-08, + "tendency_std": 0.0069227982299999999 + }, + "v_25": { + "max": 2.3388140000000002, + "mean": -0.0026954310014843941, + "min": -2.5240952999999999, + "std": 0.10603608936071396, + "tendency_mean": -2.1589991299999999e-10, + "tendency_std": 0.047911688199999997 + }, + "v_2525": { + "max": 1.0111228000000001, + "mean": -6.1195773014333099e-05, + "min": -0.71330139999999997, + "std": 0.018275270238518715, + "tendency_mean": 8.0174890599999993e-09, + "tendency_std": 0.00663333667 + }, + "v_255": { + "max": 1.2130425, + "mean": 0.0021690507419407368, + "min": -1.3457774, + "std": 0.055512402206659317, + "tendency_mean": 1.11473991e-08, + "tendency_std": 0.0151242241 + }, + "v_2775": { + "max": 0.81830263000000003, + "mean": -3.2843217923073098e-05, + "min": -0.72791850000000002, + "std": 0.017277603968977928, + "tendency_mean": -7.2884702699999999e-10, + "tendency_std": 0.00635052677 + }, + "v_3025": { + "max": 0.68300899999999998, + "mean": -1.8775799617287703e-05, + "min": -0.68622243000000005, + "std": 0.016195766627788544, + "tendency_mean": -8.5689098800000002e-09, + "tendency_std": 0.0060091873700000003 + }, + "v_310": { + "max": 1.1150717000000001, + "mean": 0.0014845682308077812, + "min": -1.1744064999999999, + "std": 0.051570791751146317, + "tendency_mean": 8.8235542299999998e-09, + "tendency_std": 0.0140793494 + }, + "v_3275": { + "max": 0.59210620000000003, + "mean": -3.6100653233006597e-05, + "min": -0.74636113999999998, + "std": 0.01515091210603714, + "tendency_mean": -1.9493397999999999e-08, + "tendency_std": 0.0056540539199999998 + }, + "v_35": { + "max": 2.2350140000000001, + "mean": -0.002416547853499651, + "min": -2.3469882000000002, + "std": 0.098872482776641846, + "tendency_mean": -1.2608284100000001e-07, + "tendency_std": 0.040021484699999998 + }, + "v_3525": { + "max": 0.65758159999999999, + "mean": -4.8059635446406901e-05, + "min": -0.58753250000000001, + "std": 0.014132826589047909, + "tendency_mean": -1.61988606e-08, + "tendency_std": 0.0052769416500000003 + }, + "v_375": { + "max": 1.1306092999999999, + "mean": 0.00093481963267549872, + "min": -0.96204319999999999, + "std": 0.048018895089626312, + "tendency_mean": -1.0642224000000001e-08, + "tendency_std": 0.0133096091 + }, + "v_3775": { + "max": 0.72521679999999999, + "mean": -1.0421264960314147e-05, + "min": -0.49068519999999999, + "std": 0.012835639528930187, + "tendency_mean": -1.62305739e-08, + "tendency_std": 0.0048913932299999999 + }, + "v_4025": { + "max": 0.80357825999999999, + "mean": 4.1858340409817174e-05, + "min": -0.55123920000000004, + "std": 0.011547588743269444, + "tendency_mean": -1.57105212e-08, + "tendency_std": 0.00442795779 + }, + "v_4275": { + "max": 0.93994580000000005, + "mean": 6.1936756537761539e-05, + "min": -0.48222026000000001, + "std": 0.010262034833431244, + "tendency_mean": -1.7997699299999999e-08, + "tendency_std": 0.0039397866299999999 + }, + "v_45": { + "max": 2.2094296999999998, + "mean": -0.0015123166376724839, + "min": -2.2733680000000001, + "std": 0.093871288001537323, + "tendency_mean": -1.8845319300000001e-07, + "tendency_std": 0.035097312899999997 + }, + "v_450": { + "max": 0.88101890000000005, + "mean": 0.00074957509059458971, + "min": -0.9972512, + "std": 0.044479995965957642, + "tendency_mean": -8.8055767100000001e-09, + "tendency_std": 0.012638832399999999 + }, + "v_4525": { + "max": 1.0046124000000001, + "mean": 7.9739125794731081e-05, + "min": -0.39743578000000002, + "std": 0.0084750046953558922, + "tendency_mean": -1.28000349e-08, + "tendency_std": 0.0031751524300000001 + }, + "v_4775": { + "max": 0.71673774999999995, + "mean": 6.645732355536893e-05, + "min": -0.36283539999999997, + "std": 0.0068798735737800598, + "tendency_mean": -6.0686470999999996e-09, + "tendency_std": 0.0025030526000000002 + }, + "v_5025": { + "max": 0.29273336999999999, + "mean": 5.8640387578634545e-05, + "min": -0.28070276999999999, + "std": 0.0054594776593148708, + "tendency_mean": -3.2961852399999999e-09, + "tendency_std": 0.0019674522300000001 + }, + "v_5275": { + "max": 0.22689197999999999, + "mean": 4.246804746799171e-05, + "min": -0.24647521999999999, + "std": 0.0039657209999859333, + "tendency_mean": 2.1825117300000002e-09, + "tendency_std": 0.0013921611399999999 + }, + "v_535": { + "max": 0.96284144999999999, + "mean": 0.00076343590626493096, + "min": -1.1496198, + "std": 0.041126281023025513, + "tendency_mean": -1.8970783399999999e-08, + "tendency_std": 0.0120953951 + }, + "v_55": { + "max": 2.1489929999999999, + "mean": -0.00066041608806699514, + "min": -2.205546, + "std": 0.090504102408885956, + "tendency_mean": -1.80041054e-07, + "tendency_std": 0.031972588099999998 + }, + "v_5525": { + "max": 0.20424655, + "mean": 2.3591643184772693e-05, + "min": -0.22433320000000001, + "std": 0.0026946677826344967, + "tendency_mean": 7.2625644300000003e-10, + "tendency_std": 0.000885007257 + }, + "v_5825": { + "max": 0.19415867000000001, + "mean": 1.9365375010238495e-06, + "min": -0.21625385999999999, + "std": 0.0013288235059008002, + "tendency_mean": 9.0809900700000005e-11, + "tendency_std": 0.00038631242199999998 + }, + "v_6125": { + "max": 0, + "mean": 0, + "min": 0, + "std": 0, + "tendency_mean": 0, + "tendency_std": 0 + }, + "v_630": { + "max": 0.92862750000000005, + "mean": 0.00057606131304055452, + "min": -1.1447194000000001, + "std": 0.038085881620645523, + "tendency_mean": -1.7702204800000001e-08, + "tendency_std": 0.0116861764 + }, + "v_65": { + "max": 2.0616682000000002, + "mean": 6.3179919379763305e-05, + "min": -2.1183752999999999, + "std": 0.088054262101650238, + "tendency_mean": -1.3475283800000001e-07, + "tendency_std": 0.029924803999999999 + }, + "v_7": { + "max": 2.6042135000000002, + "mean": -0.0015933234244585037, + "min": -2.9850292, + "std": 0.12241507321596146, + "tendency_mean": -8.4900644900000004e-07, + "tendency_std": 0.066989295300000001 + }, + "v_735": { + "max": 0.87020576000000005, + "mean": 0.0002760784700512886, + "min": -1.0804818, + "std": 0.035354346036911011, + "tendency_mean": -8.48856017e-09, + "tendency_std": 0.0113517762 + }, + "v_75": { + "max": 1.9758667000000001, + "mean": 0.00066658819559961557, + "min": -2.0217320000000001, + "std": 0.085977077484130859, + "tendency_mean": -9.3360586300000003e-08, + "tendency_std": 0.028403967799999999 + }, + "v_85": { + "max": 1.8608863, + "mean": 0.0012129677925258875, + "min": -1.8974656999999999, + "std": 0.08399682492017746, + "tendency_mean": -6.7464901700000006e-08, + "tendency_std": 0.027142390200000002 + }, + "v_850": { + "max": 0.79505939999999997, + "mean": 0.00011936565715586767, + "min": -0.86573160000000005, + "std": 0.03266824409365654, + "tendency_mean": -1.3721627500000001e-08, + "tendency_std": 0.0109852609 + }, + "v_95": { + "max": 1.7960708999999999, + "mean": 0.001664375071413815, + "min": -1.8576273000000001, + "std": 0.081971645355224609, + "tendency_mean": -4.6356780600000002e-08, + "tendency_std": 0.025986764799999999 + }, + "v_975": { + "max": 0.75625430000000005, + "mean": -5.2408768169698305e-06, + "min": -0.84577453000000002, + "std": 0.030315089970827103, + "tendency_mean": -1.3982217600000001e-08, + "tendency_std": 0.010539646499999999 + } + } + }, + "stac_extensions": [ + "https://stac-extensions.github.io/datacube/v2.2.0/schema.json", + "https://stac-extensions.github.io/alternate-assets/v1.2.0/schema.json", + "https://stac-extensions.github.io/xarray-assets/v1.0.0/schema.json" + ], + "stac_version": "1.0.0", + "type": "Feature" +} diff --git a/stac/jsons/ifs-fesom_ocean_node.json b/stac/jsons/ifs-fesom_ocean_node.json new file mode 100644 index 000000000..a09e4c831 --- /dev/null +++ b/stac/jsons/ifs-fesom_ocean_node.json @@ -0,0 +1,1319 @@ +{ + "assets": { + "ocean_elem/ocean_node_{year}.zarr": { + "description": "Ocean node component of the AWI model using OpenIFS (CY43R3 version). Configured on a non-regular grid with 126858 nodes", + "href": "ocean_elem/ocean_node_{year}.zarr", + "inodes": "154,452", + "locations": [ + "juwels_booster" + ], + "roles": [ + "data" + ], + "size": "3.4 TB", + "title": "ocean_elem/ocean_node_{year}.zarr", + "type": "application/zarr" + } + }, + "bbox": [ + -180, + -90, + 180, + 90 + ], + "geometry": { + "coordinates": [ + [ + [ + -180, + -90 + ], + [ + -180, + 90 + ], + [ + 180, + 90 + ], + [ + 180, + -90 + ], + [ + -180, + -90 + ] + ] + ], + "type": "Polygon" + }, + "id": "weathergen.atmo.AWI IFS-FESOM (OCEAN NODE)", + "properties": { + "description": "The ocean node component of the AWI IFS-FESOM coupled climate dataset that integrates atmospheric simulations from the IFS model with ocean and sea ice dynamics from the FESOM framework, capturing detailed Earth system interactions at high resolution.", + "end_datetime": "2209-12-31T23:59:59", + "fixed_timesteps": "True", + "frequency": "1 day", + "keywords": [ + "coupled model", + "climate simulation", + "ocean", + "ocean node" + ], + "name": "AWI IFS-FESOM (OCEAN NODE)", + "processing_level": "model output", + "providers": [ + { + "name": "AWI", + "roles": [ + "provider" + ], + "url": "https://www.awi.de" + } + ], + "start_datetime": "2000-01-01T00:00:00", + "title": "AWI IFS-FESOM Coupled Climate Model (Ocean node)", + "unique_id": "16", + "variables": { + "MLD2": { + "max": -6.0055455999999996, + "mean": -87.939437866210938, + "min": -5535.5, + "std": 331.00961303710938, + "tendency_mean": -0.00081146749300000005, + "tendency_std": 47.433826000000003 + }, + "a_ice": { + "max": 1, + "mean": 0.2023606151342392, + "min": 0, + "std": 0.37293565273284912, + "tendency_mean": 2.5448130500000001e-07, + "tendency_std": 0.0257178575 + }, + "evap": { + "max": 1.0181167000000001e-07, + "mean": -2.6290905452697189e-08, + "min": -4.3932092999999998e-07, + "std": 2.5878895471009855e-08, + "tendency_mean": 4.9786636299999998e-14, + "tendency_std": 1.31542652e-08 + }, + "fh": { + "max": 4030.0234, + "mean": 60.779296875, + "min": -626.90563999999995, + "std": 99.746055603027344, + "tendency_mean": -0.00073057660699999999, + "tendency_std": 56.201540299999998 + }, + "fw": { + "max": 3.8376616000000002e-06, + "mean": -9.0503062821767344e-09, + "min": -1.23689515e-05, + "std": 1.2910506086427631e-07, + "tendency_mean": -6.7978131299999999e-13, + "tendency_std": 7.9606082200000001e-08 + }, + "m_ice": { + "max": 25.936427999999999, + "mean": 0.32982933521270752, + "min": 0, + "std": 0.79757022857666016, + "tendency_mean": 5.4506417599999998e-06, + "tendency_std": 0.036242821100000003 + }, + "prec": { + "max": 2.525462e-06, + "mean": 2.6593310664679848e-08, + "min": 0, + "std": 5.285275150868074e-08, + "tendency_mean": -3.3853138e-13, + "tendency_std": 4.8233621599999999e-08 + }, + "salt_107": { + "max": 43.014960000000002, + "mean": 27.616165161132812, + "min": 0, + "std": 12.308387756347656, + "tendency_mean": -2.3107379200000001e-08, + "tendency_std": 0.0156208222 + }, + "salt_1110": { + "max": 41.029778, + "mean": 21.883447647094727, + "min": 0, + "std": 16.908939361572266, + "tendency_mean": 3.4613576399999999e-07, + "tendency_std": 0.0010177015200000001 + }, + "salt_125": { + "max": 43.006659999999997, + "mean": 27.254949569702148, + "min": 0, + "std": 12.871762275695801, + "tendency_mean": -8.1194977300000001e-08, + "tendency_std": 0.013865365500000001 + }, + "salt_1255": { + "max": 41.029870000000003, + "mean": 21.570472717285156, + "min": 0, + "std": 17.077251434326172, + "tendency_mean": 3.6374685499999998e-07, + "tendency_std": 0.00091156354100000001 + }, + "salt_1415": { + "max": 41.029679999999999, + "mean": 21.2353515625, + "min": 0, + "std": 17.285608291625977, + "tendency_mean": 3.8296528199999998e-07, + "tendency_std": 0.00073116197600000003 + }, + "salt_147": { + "max": 43.000317000000003, + "mean": 26.878986358642578, + "min": 0, + "std": 13.35154914855957, + "tendency_mean": -2.3914987000000003e-07, + "tendency_std": 0.0121241274 + }, + "salt_15": { + "max": 43.929763999999999, + "mean": 32.341804504394531, + "min": 4.5865315999999998, + "std": 2.653770923614502, + "tendency_mean": 8.6306730400000007e-06, + "tendency_std": 0.067403445100000001 + }, + "salt_1600": { + "max": 41.013556999999999, + "mean": 20.818880081176758, + "min": 0, + "std": 17.259021759033203, + "tendency_mean": 4.1147636300000002e-07, + "tendency_std": 0.00058586787799999997 + }, + "salt_175": { + "max": 42.992840000000001, + "mean": 26.44842529296875, + "min": 0, + "std": 13.687381744384766, + "tendency_mean": -3.7272405600000001e-07, + "tendency_std": 0.0100463827 + }, + "salt_1810": { + "max": 40.997109999999999, + "mean": 20.404722213745117, + "min": 0, + "std": 17.707401275634766, + "tendency_mean": 3.9207949999999999e-07, + "tendency_std": 0.00042929928999999999 + }, + "salt_2": { + "max": 43.706963000000002, + "mean": 32.257827758789062, + "min": 4.0914599999999997, + "std": 2.835813045501709, + "tendency_mean": 1.08553204e-05, + "tendency_std": 0.075807176800000001 + }, + "salt_2035": { + "max": 40.996749999999999, + "mean": 19.87188720703125, + "min": 0, + "std": 17.852931976318359, + "tendency_mean": 3.4376353500000001e-07, + "tendency_std": 0.000339338152 + }, + "salt_210": { + "max": 42.990400000000001, + "mean": 25.863618850708008, + "min": 0, + "std": 14.264633178710938, + "tendency_mean": -4.5181340399999998e-07, + "tendency_std": 0.0079093619300000008 + }, + "salt_2275": { + "max": 40.997096999999997, + "mean": 19.278139114379883, + "min": 0, + "std": 16.925962448120117, + "tendency_mean": 3.1789927200000001e-07, + "tendency_std": 0.00029417651200000001 + }, + "salt_25": { + "max": 44.028145000000002, + "mean": 32.395294189453125, + "min": 4.7742089999999999, + "std": 2.5984039306640625, + "tendency_mean": 4.5685088900000002e-06, + "tendency_std": 0.055270663400000003 + }, + "salt_2525": { + "max": 40.995289999999997, + "mean": 18.586221694946289, + "min": 0, + "std": 16.881687164306641, + "tendency_mean": 2.5339889199999998e-07, + "tendency_std": 0.00027817722700000002 + }, + "salt_255": { + "max": 42.987380000000002, + "mean": 25.376853942871094, + "min": 0, + "std": 14.656915664672852, + "tendency_mean": -3.8928506900000002e-07, + "tendency_std": 0.0062378300099999997 + }, + "salt_2775": { + "max": 40.985477000000003, + "mean": 17.790977478027344, + "min": 0, + "std": 16.561243057250977, + "tendency_mean": 1.791549e-07, + "tendency_std": 0.00027689102699999998 + }, + "salt_3025": { + "max": 40.984366999999999, + "mean": 16.779687881469727, + "min": 0, + "std": 16.877496719360352, + "tendency_mean": 1.1900090499999999e-07, + "tendency_std": 0.000271223424 + }, + "salt_310": { + "max": 42.981915000000001, + "mean": 24.852371215820312, + "min": 0, + "std": 14.595285415649414, + "tendency_mean": -1.7839067400000001e-07, + "tendency_std": 0.0046981236200000004 + }, + "salt_3275": { + "max": 40.98245, + "mean": 15.639055252075195, + "min": 0, + "std": 17.022853851318359, + "tendency_mean": 8.7188000400000006e-08, + "tendency_std": 0.000234959765 + }, + "salt_35": { + "max": 43.192055000000003, + "mean": 31.914072036743164, + "min": 0, + "std": 5.0878896713256836, + "tendency_mean": 1.7331518299999999e-06, + "tendency_std": 0.0420113581 + }, + "salt_3525": { + "max": 40.978157000000003, + "mean": 14.172965049743652, + "min": 0, + "std": 18.063684463500977, + "tendency_mean": 5.58900542e-08, + "tendency_std": 0.000186160812 + }, + "salt_375": { + "max": 42.960459999999998, + "mean": 24.371938705444336, + "min": 0, + "std": 14.821865081787109, + "tendency_mean": 3.1452706499999998e-08, + "tendency_std": 0.0034227357500000001 + }, + "salt_3775": { + "max": 35.102127000000003, + "mean": 12.40196418762207, + "min": 0, + "std": 17.293697357177734, + "tendency_mean": 3.9719874299999999e-08, + "tendency_std": 0.00012826267600000001 + }, + "salt_4025": { + "max": 35.095239999999997, + "mean": 10.699524879455566, + "min": 0, + "std": 15.42487907409668, + "tendency_mean": 3.6285227200000003e-08, + "tendency_std": 8.8368719599999995e-05 + }, + "salt_4275": { + "max": 35.079619999999998, + "mean": 9.0423421859741211, + "min": 0, + "std": 15.315444946289062, + "tendency_mean": 3.4724650899999998e-08, + "tendency_std": 7.0072530600000002e-05 + }, + "salt_45": { + "max": 43.144410000000001, + "mean": 31.686046600341797, + "min": 0, + "std": 5.8892602920532227, + "tendency_mean": 7.4184271399999998e-07, + "tendency_std": 0.034669475399999997 + }, + "salt_450": { + "max": 42.959229999999998, + "mean": 23.873828887939453, + "min": 0, + "std": 14.996237754821777, + "tendency_mean": 1.81721265e-07, + "tendency_std": 0.0026696557000000002 + }, + "salt_4525": { + "max": 35.076569999999997, + "mean": 7.0343856811523438, + "min": 0, + "std": 13.349733352661133, + "tendency_mean": 3.0257845600000002e-08, + "tendency_std": 5.1765292599999998e-05 + }, + "salt_4775": { + "max": 35.072400000000002, + "mean": 5.2517213821411133, + "min": 0, + "std": 11.848274230957031, + "tendency_mean": 2.3967172400000001e-08, + "tendency_std": 3.1361576699999997e-05 + }, + "salt_5025": { + "max": 34.952525999999999, + "mean": 3.5900905132293701, + "min": 0, + "std": 10.155359268188477, + "tendency_mean": 1.6490759200000001e-08, + "tendency_std": 1.6732103e-05 + }, + "salt_5275": { + "max": 34.922150000000002, + "mean": 2.0695559978485107, + "min": 0, + "std": 8.0622138977050781, + "tendency_mean": 8.9573021799999994e-09, + "tendency_std": 6.9930372400000002e-06 + }, + "salt_535": { + "max": 42.957520000000002, + "mean": 23.426124572753906, + "min": 0, + "std": 15.547465324401855, + "tendency_mean": 2.8991122000000001e-07, + "tendency_std": 0.0021003833700000002 + }, + "salt_55": { + "max": 43.097000000000001, + "mean": 30.007053375244141, + "min": 0, + "std": 9.266444206237793, + "tendency_mean": 4.1868620900000001e-07, + "tendency_std": 0.028689827399999999 + }, + "salt_5525": { + "max": 34.910617999999999, + "mean": 1.0921233892440796, + "min": 0, + "std": 5.9660658836364746, + "tendency_mean": 3.2388381800000001e-09, + "tendency_std": 2.81070606e-06 + }, + "salt_5825": { + "max": 34.905388000000002, + "mean": 0.33076769113540649, + "min": 0, + "std": 3.3561525344848633, + "tendency_mean": 1.2973012500000001e-11, + "tendency_std": 9.3606709299999998e-07 + }, + "salt_6125": { + "max": 0, + "mean": 0, + "min": 0, + "std": 0, + "tendency_mean": 0, + "tendency_std": 0 + }, + "salt_630": { + "max": 42.950659999999999, + "mean": 23.029767990112305, + "min": 0, + "std": 16.139122009277344, + "tendency_mean": 3.6597383200000002e-07, + "tendency_std": 0.00167487243 + }, + "salt_65": { + "max": 43.073394999999998, + "mean": 29.360527038574219, + "min": 0, + "std": 10.276013374328613, + "tendency_mean": -1.7860298800000001e-07, + "tendency_std": 0.024763202299999999 + }, + "salt_7": { + "max": 43.808242999999997, + "mean": 32.272899627685547, + "min": 4.2887278000000002, + "std": 2.7967166900634766, + "tendency_mean": 1.6589388299999999e-05, + "tendency_std": 0.077485067500000004 + }, + "salt_735": { + "max": 42.944823999999997, + "mean": 22.700708389282227, + "min": 0, + "std": 16.516014099121094, + "tendency_mean": 3.6644852199999999e-07, + "tendency_std": 0.0014008132 + }, + "salt_75": { + "max": 43.056846999999998, + "mean": 28.87939453125, + "min": 0, + "std": 10.910923004150391, + "tendency_mean": -7.0276202399999996e-07, + "tendency_std": 0.021746464600000001 + }, + "salt_85": { + "max": 43.026769999999999, + "mean": 28.518390655517578, + "min": 0, + "std": 11.300876617431641, + "tendency_mean": -4.2576214500000002e-07, + "tendency_std": 0.019364927300000001 + }, + "salt_850": { + "max": 42.938020000000002, + "mean": 22.419292449951172, + "min": 0, + "std": 16.671541213989258, + "tendency_mean": 3.5693385600000002e-07, + "tendency_std": 0.0011938013399999999 + }, + "salt_95": { + "max": 43.024383999999998, + "mean": 28.064107894897461, + "min": 0, + "std": 11.746476173400879, + "tendency_mean": -8.9861809799999997e-08, + "tendency_std": 0.0176384926 + }, + "salt_975": { + "max": 42.924103000000002, + "mean": 22.155311584472656, + "min": 0, + "std": 16.80963134765625, + "tendency_mean": 3.53216137e-07, + "tendency_std": 0.0010515814599999999 + }, + "snow": { + "max": 7.6605510000000003e-07, + "mean": 5.1672390810608704e-09, + "min": 0, + "std": 1.5309318257550331e-08, + "tendency_mean": -8.8739753600000002e-14, + "tendency_std": 1.46953155e-08 + }, + "ssh": { + "max": 2.5286111999999998, + "mean": -0.32952305674552917, + "min": -3.078033, + "std": 0.73977530002593994, + "tendency_mean": -1.8666155e-06, + "tendency_std": 0.0411327321 + }, + "sss": { + "max": 43.706963000000002, + "mean": 33.480148315429688, + "min": 4.0914599999999997, + "std": 2.581596851348877, + "tendency_mean": 1.08553204e-05, + "tendency_std": 0.075807176800000001 + }, + "sst": { + "max": 36.818249999999999, + "mean": 12.030770301818848, + "min": -2.4110692, + "std": 11.86808967590332, + "tendency_mean": -6.3489477899999997e-05, + "tendency_std": 0.16728558700000001 + }, + "swr": { + "max": 402.56995000000001, + "mean": 133.33403015136719, + "min": 2.2013327999999999e-14, + "std": 93.897171020507812, + "tendency_mean": 0.00093269804299999997, + "tendency_std": 34.501693699999997 + }, + "temp_107": { + "max": 31.318180000000002, + "mean": 8.4127569198608398, + "min": -3.409824, + "std": 8.7992982864379883, + "tendency_mean": 2.8220659599999998e-06, + "tendency_std": 0.12910090099999999 + }, + "temp_1110": { + "max": 15.815804, + "mean": 2.340416431427002, + "min": -1.2505630000000001, + "std": 2.3307058811187744, + "tendency_mean": 5.2456198599999998e-06, + "tendency_std": 0.0167829499 + }, + "temp_125": { + "max": 30.953156, + "mean": 8.0400791168212891, + "min": -2.0373347000000002, + "std": 8.3800849914550781, + "tendency_mean": 5.2039661400000001e-06, + "tendency_std": 0.12512378699999999 + }, + "temp_1255": { + "max": 15.792374000000001, + "mean": 2.0679359436035156, + "min": -1.0943400999999999, + "std": 2.1141262054443359, + "tendency_mean": 3.9994316499999996e-06, + "tendency_std": 0.0138513566 + }, + "temp_1415": { + "max": 15.76568, + "mean": 1.7873553037643433, + "min": -1.1140639999999999, + "std": 1.9889605045318604, + "tendency_mean": 3.3964499499999999e-06, + "tendency_std": 0.0110037776 + }, + "temp_147": { + "max": 30.234096999999998, + "mean": 7.5965180397033691, + "min": -2.0434070000000002, + "std": 7.8690128326416016, + "tendency_mean": 7.6428700000000006e-06, + "tendency_std": 0.13012958899999999 + }, + "temp_15": { + "max": 37.635505999999999, + "mean": 12.063199996948242, + "min": -2.3786550000000002, + "std": 11.588679313659668, + "tendency_mean": -3.0737721499999998e-05, + "tendency_std": 0.17792333399999999 + }, + "temp_1600": { + "max": 15.692436000000001, + "mean": 1.5699480772018433, + "min": -1.1531532, + "std": 1.8327451944351196, + "tendency_mean": 3.1720227200000001e-06, + "tendency_std": 0.0083165654599999996 + }, + "temp_175": { + "max": 28.899840999999999, + "mean": 7.0903582572937012, + "min": -2.0487039999999999, + "std": 7.2172760963439941, + "tendency_mean": 7.0438662800000001e-06, + "tendency_std": 0.12420028399999999 + }, + "temp_1810": { + "max": 15.62989, + "mean": 1.3911718130111694, + "min": -1.1840645999999999, + "std": 1.7193930149078369, + "tendency_mean": 3.2556224700000002e-06, + "tendency_std": 0.0062804728199999998 + }, + "temp_2": { + "max": 36.818249999999999, + "mean": 12.473414421081543, + "min": -2.4110692, + "std": 11.79872989654541, + "tendency_mean": -6.3489477899999997e-05, + "tendency_std": 0.16728558700000001 + }, + "temp_2035": { + "max": 15.585190000000001, + "mean": 1.2604461908340454, + "min": -1.2232417, + "std": 1.628853440284729, + "tendency_mean": 3.14789546e-06, + "tendency_std": 0.0055092114100000001 + }, + "temp_210": { + "max": 26.640518, + "mean": 6.5949945449829102, + "min": -2.0514749999999999, + "std": 6.4266276359558105, + "tendency_mean": 7.1547814099999998e-06, + "tendency_std": 0.084193560900000006 + }, + "temp_2275": { + "max": 15.582091999999999, + "mean": 1.1254847049713135, + "min": -1.2424046, + "std": 1.5240623950958252, + "tendency_mean": 2.8301918400000001e-06, + "tendency_std": 0.00496020579 + }, + "temp_25": { + "max": 38.074005, + "mean": 11.513513565063477, + "min": -2.3638077000000002, + "std": 11.331280708312988, + "tendency_mean": 3.05101772e-06, + "tendency_std": 0.18400682700000001 + }, + "temp_2525": { + "max": 15.579272, + "mean": 0.98504936695098877, + "min": -1.2627721999999999, + "std": 1.371037483215332, + "tendency_mean": 2.7038975499999999e-06, + "tendency_std": 0.0044164329000000004 + }, + "temp_255": { + "max": 25.084097, + "mean": 6.1386580467224121, + "min": -2.0583277, + "std": 5.9623332023620605, + "tendency_mean": 7.8116779900000001e-06, + "tendency_std": 0.050373583700000002 + }, + "temp_2775": { + "max": 15.573591, + "mean": 0.85470938682556152, + "min": -1.2739971000000001, + "std": 1.1932723522186279, + "tendency_mean": 2.4052726399999999e-06, + "tendency_std": 0.0039929551299999998 + }, + "temp_3025": { + "max": 15.559989, + "mean": 0.74359047412872314, + "min": -1.2829132000000001, + "std": 1.0702434778213501, + "tendency_mean": 2.1262533199999999e-06, + "tendency_std": 0.00351237552 + }, + "temp_310": { + "max": 23.818480000000001, + "mean": 5.7558016777038574, + "min": -2.0583176999999999, + "std": 5.595914363861084, + "tendency_mean": 8.9519829000000002e-06, + "tendency_std": 0.038345373199999998 + }, + "temp_3275": { + "max": 15.5507145, + "mean": 0.65803676843643188, + "min": -1.2811551000000001, + "std": 0.93001091480255127, + "tendency_mean": 1.9085927900000001e-06, + "tendency_std": 0.0028723571500000001 + }, + "temp_35": { + "max": 35.503450000000001, + "mean": 10.672279357910156, + "min": -2.3884690000000002, + "std": 11.030865669250488, + "tendency_mean": 3.3323773800000002e-06, + "tendency_std": 0.18124232100000001 + }, + "temp_3525": { + "max": 15.513709, + "mean": 0.55578887462615967, + "min": -1.0834497999999999, + "std": 0.84032636880874634, + "tendency_mean": 1.7204021000000001e-06, + "tendency_std": 0.0022105630199999999 + }, + "temp_375": { + "max": 22.259909, + "mean": 5.3815507888793945, + "min": -2.0372400000000002, + "std": 5.2755613327026367, + "tendency_mean": 1.03445182e-05, + "tendency_std": 0.033808913099999997 + }, + "temp_3775": { + "max": 9.8797630000000005, + "mean": 0.45927414298057556, + "min": -1.0719993999999999, + "std": 0.71155774593353271, + "tendency_mean": 1.5714706200000001e-06, + "tendency_std": 0.00150663563 + }, + "temp_4025": { + "max": 9.8655039999999996, + "mean": 0.37790939211845398, + "min": -1.0572032, + "std": 0.64861828088760376, + "tendency_mean": 1.4748427599999999e-06, + "tendency_std": 0.00107773257 + }, + "temp_4275": { + "max": 4.81351, + "mean": 0.30526629090309143, + "min": -1.0367694999999999, + "std": 0.5628896951675415, + "tendency_mean": 1.3043749999999999e-06, + "tendency_std": 0.00083536202 + }, + "temp_45": { + "max": 34.042189999999998, + "mean": 10.141136169433594, + "min": -2.6714932999999998, + "std": 10.773600578308105, + "tendency_mean": 1.27289437e-05, + "tendency_std": 0.162556863 + }, + "temp_450": { + "max": 22.151342, + "mean": 4.9407262802124023, + "min": -2.0242230000000001, + "std": 4.8691682815551758, + "tendency_mean": 1.14928987e-05, + "tendency_std": 0.034538136800000001 + }, + "temp_4525": { + "max": 4.7899039999999999, + "mean": 0.22632491588592529, + "min": -0.84896819999999995, + "std": 0.48999306559562683, + "tendency_mean": 1.0543995599999999e-06, + "tendency_std": 0.00058815284099999999 + }, + "temp_4775": { + "max": 4.7582655000000003, + "mean": 0.15983007848262787, + "min": -0.85299899999999995, + "std": 0.41704696416854858, + "tendency_mean": 7.4577455699999995e-07, + "tendency_std": 0.00034746309400000001 + }, + "temp_5025": { + "max": 3.2925393999999999, + "mean": 0.1079963892698288, + "min": -0.8750926, + "std": 0.34336119890213013, + "tendency_mean": 4.9932744299999995e-07, + "tendency_std": 0.00018310449600000001 + }, + "temp_5275": { + "max": 2.7564503999999999, + "mean": 0.063276804983615875, + "min": -0.82152605000000001, + "std": 0.26680672168731689, + "tendency_mean": 2.5302170400000001e-07, + "tendency_std": 7.5373601599999999e-05 + }, + "temp_535": { + "max": 22.128520999999999, + "mean": 4.4501943588256836, + "min": -2.0239381999999999, + "std": 4.3277583122253418, + "tendency_mean": 1.1660899099999999e-05, + "tendency_std": 0.034405366399999998 + }, + "temp_55": { + "max": 33.092384000000003, + "mean": 9.7377958297729492, + "min": -3.010891, + "std": 10.455321311950684, + "tendency_mean": 1.51596129e-05, + "tendency_std": 0.14692122699999999 + }, + "temp_5525": { + "max": 2.2064853000000002, + "mean": 0.034461952745914459, + "min": -0.79653300000000005, + "std": 0.19668422639369965, + "tendency_mean": 1.1383516200000001e-07, + "tendency_std": 2.77308005e-05 + }, + "temp_5825": { + "max": 2.1230167999999998, + "mean": 0.01053240429610014, + "min": -0.23236397, + "std": 0.10968272387981415, + "tendency_mean": 2.70807528e-08, + "tendency_std": 8.0334484099999992e-06 + }, + "temp_6125": { + "max": 0, + "mean": 0, + "min": 0, + "std": 0, + "tendency_mean": 0, + "tendency_std": 0 + }, + "temp_630": { + "max": 21.984722000000001, + "mean": 4.0054893493652344, + "min": -1.9585052000000001, + "std": 3.9097366333007812, + "tendency_mean": 1.0867853e-05, + "tendency_std": 0.029572656900000001 + }, + "temp_65": { + "max": 32.482480000000002, + "mean": 9.4215126037597656, + "min": -3.2254858, + "std": 10.130072593688965, + "tendency_mean": 8.6758272999999995e-06, + "tendency_std": 0.14097937899999999 + }, + "temp_7": { + "max": 37.26155, + "mean": 12.414287567138672, + "min": -2.3921801999999999, + "std": 11.765914916992188, + "tendency_mean": -6.6942015800000004e-05, + "tendency_std": 0.169091876 + }, + "temp_735": { + "max": 21.842459000000002, + "mean": 3.5275967121124268, + "min": -1.9385995, + "std": 3.4728970527648926, + "tendency_mean": 9.6266858699999994e-06, + "tendency_std": 0.024331344500000001 + }, + "temp_75": { + "max": 31.971512000000001, + "mean": 9.1560678482055664, + "min": -3.1831768, + "std": 9.8309993743896484, + "tendency_mean": 2.9724359100000002e-06, + "tendency_std": 0.13962898700000001 + }, + "temp_85": { + "max": 31.833645000000001, + "mean": 8.9190692901611328, + "min": -2.8613887, + "std": 9.5009126663208008, + "tendency_mean": 1.7367955e-06, + "tendency_std": 0.13873812399999999 + }, + "temp_850": { + "max": 21.802900000000001, + "mean": 3.0890054702758789, + "min": -2.0103811999999999, + "std": 2.9871523380279541, + "tendency_mean": 8.3567868100000004e-06, + "tendency_std": 0.0210946259 + }, + "temp_95": { + "max": 31.468529, + "mean": 8.6898736953735352, + "min": -3.1342865999999998, + "std": 9.1555490493774414, + "tendency_mean": 2.1666489599999999e-06, + "tendency_std": 0.13635844499999999 + }, + "temp_975": { + "max": 21.642899, + "mean": 2.6832332611083984, + "min": -1.3440561, + "std": 2.6568620204925537, + "tendency_mean": 6.8145104400000003e-06, + "tendency_std": 0.019096676600000001 + }, + "w_0": { + "max": 5.6393914999999997e-18, + "mean": -1.6439100753589446e-25, + "min": -5.7431174999999998e-18, + "std": 5.5434458715475878e-20, + "tendency_mean": 2.1577591599999999e-26, + "tendency_std": 7.9159901299999997e-20 + }, + "w_10": { + "max": 0.00095991545999999999, + "mean": 1.198608003960544e-07, + "min": -0.0012854622000000001, + "std": 1.0335589649912436e-05, + "tendency_mean": -2.6858467299999999e-12, + "tendency_std": 1.0066090900000001e-05 + }, + "w_100": { + "max": 0.0049806273999999998, + "mean": -3.5491773076046229e-08, + "min": -0.0070670749999999999, + "std": 3.329905666760169e-05, + "tendency_mean": -2.25520789e-12, + "tendency_std": 3.5330702599999999e-05 + }, + "w_1040": { + "max": 0.0078852330000000002, + "mean": -3.529927994350146e-07, + "min": -0.015573787, + "std": 7.4590810982044786e-05, + "tendency_mean": -3.1247583000000003e-11, + "tendency_std": 7.4773654300000001e-05 + }, + "w_115": { + "max": 0.0041305003999999998, + "mean": -7.1730191564256529e-08, + "min": -0.0092341750000000007, + "std": 3.4794684324879199e-05, + "tendency_mean": -2.1059992600000002e-12, + "tendency_std": 3.7124009100000002e-05 + }, + "w_1180": { + "max": 0.010083091000000001, + "mean": -3.0973208708928723e-07, + "min": -0.021765616000000002, + "std": 7.5395029853098094e-05, + "tendency_mean": -4.1780730100000002e-11, + "tendency_std": 7.8759323200000003e-05 + }, + "w_1330": { + "max": 0.01006845, + "mean": -2.1430788876841689e-07, + "min": -0.022852609999999999, + "std": 7.8899887739680707e-05, + "tendency_mean": -3.2929051999999998e-11, + "tendency_std": 8.3690259099999993e-05 + }, + "w_135": { + "max": 0.0060241623000000001, + "mean": -1.0555431373404645e-07, + "min": -0.010842528000000001, + "std": 3.6699773772852495e-05, + "tendency_mean": -8.74748014e-13, + "tendency_std": 3.91282716e-05 + }, + "w_1500": { + "max": 0.011133700999999999, + "mean": -1.4938072467884922e-07, + "min": -0.024544206999999998, + "std": 8.2030019257217646e-05, + "tendency_mean": -2.35121529e-11, + "tendency_std": 8.9950692000000004e-05 + }, + "w_160": { + "max": 0.0051827243999999998, + "mean": -1.3021882239172555e-07, + "min": -0.010714311000000001, + "std": 3.9117629057727754e-05, + "tendency_mean": -1.7083342999999999e-12, + "tendency_std": 4.1455535499999997e-05 + }, + "w_1700": { + "max": 0.012374543, + "mean": -1.0373175030053972e-07, + "min": -0.024955188999999999, + "std": 8.3230734162498266e-05, + "tendency_mean": -2.60648663e-11, + "tendency_std": 9.5172323100000005e-05 + }, + "w_190": { + "max": 0.0040194799999999998, + "mean": -1.7206454572260554e-07, + "min": -0.010387663, + "std": 4.2478022805880755e-05, + "tendency_mean": 1.5100039000000001e-12, + "tendency_std": 4.4475870699999998e-05 + }, + "w_1920": { + "max": 0.013321202000000001, + "mean": -5.7395190822262521e-08, + "min": -0.025170587000000001, + "std": 8.8690765551291406e-05, + "tendency_mean": -3.0290449400000001e-11, + "tendency_std": 9.85544992e-05 + }, + "w_20": { + "max": 0.0014447765, + "mean": 1.8982947835866071e-07, + "min": -0.0024158969000000001, + "std": 1.6178701116587035e-05, + "tendency_mean": -5.2162727199999997e-12, + "tendency_std": 1.5927105599999999e-05 + }, + "w_2150": { + "max": 0.020149535999999999, + "mean": 8.9890434651351825e-08, + "min": -0.022115349999999999, + "std": 9.0620116679929197e-05, + "tendency_mean": -4.21448825e-11, + "tendency_std": 0.00010148885000000001 + }, + "w_230": { + "max": 0.0066316760000000004, + "mean": -2.2599402882406139e-07, + "min": -0.0097712200000000006, + "std": 4.7080098738661036e-05, + "tendency_mean": 5.00886054e-12, + "tendency_std": 4.7713300000000003e-05 + }, + "w_2400": { + "max": 0.019138142, + "mean": 1.1984339209902828e-07, + "min": -0.017900093999999998, + "std": 9.1525573225226253e-05, + "tendency_mean": -3.27247449e-11, + "tendency_std": 0.000100709723 + }, + "w_2650": { + "max": 0.018334665999999999, + "mean": 1.5018591170701256e-07, + "min": -0.01560683, + "std": 8.9789988123811781e-05, + "tendency_mean": -2.7979846999999999e-11, + "tendency_std": 9.90555788e-05 + }, + "w_280": { + "max": 0.0052918569999999996, + "mean": -2.7644495048662066e-07, + "min": -0.014338071000000001, + "std": 5.2101513574598357e-05, + "tendency_mean": 7.1175806000000004e-12, + "tendency_std": 5.03959913e-05 + }, + "w_2900": { + "max": 0.016431596, + "mean": 1.4299101280812465e-07, + "min": -0.016208661999999999, + "std": 9.1297966719139367e-05, + "tendency_mean": -3.21379157e-11, + "tendency_std": 9.7182331299999999e-05 + }, + "w_30": { + "max": 0.0024789121999999999, + "mean": 1.9758162750349584e-07, + "min": -0.0036667136, + "std": 2.1813944840687327e-05, + "tendency_mean": -6.7310343900000003e-12, + "tendency_std": 2.13971675e-05 + }, + "w_3150": { + "max": 0.014075245, + "mean": 1.8419027014715539e-07, + "min": -0.024008771000000002, + "std": 8.7022941443137825e-05, + "tendency_mean": -2.05951544e-11, + "tendency_std": 9.1513410800000004e-05 + }, + "w_340": { + "max": 0.005577434, + "mean": -3.2646133263369848e-07, + "min": -0.013197917, + "std": 5.4288506362354383e-05, + "tendency_mean": 1.1251146000000001e-12, + "tendency_std": 5.2962088300000001e-05 + }, + "w_3400": { + "max": 0.010426879, + "mean": 1.700528144965574e-07, + "min": -0.024850324, + "std": 8.0861813330557197e-05, + "tendency_mean": -2.2624068200000001e-11, + "tendency_std": 8.2565952199999994e-05 + }, + "w_3650": { + "max": 0.012837757999999999, + "mean": 1.6666346880356286e-07, + "min": -0.024363777, + "std": 7.5373078288976103e-05, + "tendency_mean": -2.5686969599999999e-11, + "tendency_std": 7.3260128099999995e-05 + }, + "w_3900": { + "max": 0.018327428, + "mean": 1.4664085767890356e-07, + "min": -0.023039468, + "std": 6.7887951445300132e-05, + "tendency_mean": -1.6302273699999999e-11, + "tendency_std": 6.3399845299999994e-05 + }, + "w_40": { + "max": 0.0029668189999999999, + "mean": 1.7512732597424474e-07, + "min": -0.0031564683, + "std": 2.4752804165473208e-05, + "tendency_mean": -7.7716925799999997e-12, + "tendency_std": 2.4859617699999999e-05 + }, + "w_410": { + "max": 0.0065250964999999999, + "mean": -3.4550032523839036e-07, + "min": -0.0097920614999999992, + "std": 5.756724567618221e-05, + "tendency_mean": -6.7421164000000003e-12, + "tendency_std": 5.6497973499999998e-05 + }, + "w_4150": { + "max": 0.014237667000000001, + "mean": 1.5999268043742632e-07, + "min": -0.026229221, + "std": 5.901400072616525e-05, + "tendency_mean": 3.3593207900000002e-13, + "tendency_std": 5.3125515099999997e-05 + }, + "w_4400": { + "max": 0.0097249769999999992, + "mean": 1.1862257309758206e-07, + "min": -0.020551291999999999, + "std": 5.052442429587245e-05, + "tendency_mean": 2.7254490900000001e-12, + "tendency_std": 4.1699313500000003e-05 + }, + "w_4650": { + "max": 0.0084841550000000002, + "mean": 7.0326926504549192e-08, + "min": -0.0086573090000000002, + "std": 4.2226140067214146e-05, + "tendency_mean": 6.4570878300000001e-12, + "tendency_std": 3.2409449800000002e-05 + }, + "w_490": { + "max": 0.0073362347000000003, + "mean": -3.1405559752784029e-07, + "min": -0.010704638000000001, + "std": 5.8794019423658028e-05, + "tendency_mean": -5.5138760999999997e-12, + "tendency_std": 5.9011313699999999e-05 + }, + "w_4900": { + "max": 0.015416576499999999, + "mean": 5.7671957875982116e-08, + "min": -0.014008843999999999, + "std": 3.8516820495715365e-05, + "tendency_mean": 3.9813398400000003e-12, + "tendency_std": 2.5258449300000001e-05 + }, + "w_5": { + "max": 0.00054784739999999999, + "mean": 5.6956853455858436e-08, + "min": -0.00065650880000000004, + "std": 5.5881150728964712e-06, + "tendency_mean": -1.90194888e-12, + "tendency_std": 5.43747321e-06 + }, + "w_50": { + "max": 0.0023019276999999999, + "mean": 1.3120295250246272e-07, + "min": -0.0045245512999999996, + "std": 2.7089397917734459e-05, + "tendency_mean": -5.7395177300000003e-12, + "tendency_std": 2.7428455900000001e-05 + }, + "w_5150": { + "max": 0.010517864, + "mean": 3.9003452911856584e-08, + "min": -0.0093100920000000007, + "std": 3.0264105589594692e-05, + "tendency_mean": 9.8583863899999998e-12, + "tendency_std": 1.83393181e-05 + }, + "w_5400": { + "max": 0.0056541487999999997, + "mean": 2.9826960457057794e-08, + "min": -0.0056681149999999996, + "std": 2.3185511963674799e-05, + "tendency_mean": 1.7264388800000001e-12, + "tendency_std": 1.2343407e-05 + }, + "w_5650": { + "max": 0.0048624499999999999, + "mean": 6.2444578396991801e-09, + "min": -0.0041009653000000004, + "std": 1.5937905118335038e-05, + "tendency_mean": -1.02497249e-12, + "tendency_std": 7.0748870599999999e-06 + }, + "w_580": { + "max": 0.0063393520000000004, + "mean": -3.2974125474538596e-07, + "min": -0.012551787, + "std": 6.0415921325329691e-05, + "tendency_mean": -9.0910557400000001e-12, + "tendency_std": 6.1334320699999996e-05 + }, + "w_60": { + "max": 0.0027334833999999998, + "mean": 9.1661199519421643e-08, + "min": -0.0045517691999999998, + "std": 2.8637374271056615e-05, + "tendency_mean": -8.3534281400000001e-13, + "tendency_std": 2.9423385799999999e-05 + }, + "w_6000": { + "max": 0, + "mean": 0, + "min": 0, + "std": 0, + "tendency_mean": 0, + "tendency_std": 0 + }, + "w_6250": { + "max": 0, + "mean": 0, + "min": 0, + "std": 0, + "tendency_mean": 0, + "tendency_std": 0 + }, + "w_680": { + "max": 0.0081393179999999996, + "mean": -3.3249821740355401e-07, + "min": -0.020960867000000001, + "std": 6.4930827647913247e-05, + "tendency_mean": -1.42778144e-11, + "tendency_std": 6.3661588400000001e-05 + }, + "w_70": { + "max": 0.0028571099999999999, + "mean": 5.6627136757469998e-08, + "min": -0.0052220462999999998, + "std": 2.9920374799985439e-05, + "tendency_mean": -5.1116843999999996e-13, + "tendency_std": 3.1111861999999999e-05 + }, + "w_790": { + "max": 0.0143899005, + "mean": -3.5670544207277999e-07, + "min": -0.018998243000000001, + "std": 6.9700974563602358e-05, + "tendency_mean": -2.44861867e-11, + "tendency_std": 6.68561204e-05 + }, + "w_80": { + "max": 0.0082055700000000006, + "mean": 3.1203018835412877e-08, + "min": -0.0066085670000000001, + "std": 3.103152266703546e-05, + "tendency_mean": 1.1178914399999999e-12, + "tendency_std": 3.2516921100000003e-05 + }, + "w_90": { + "max": 0.0065021393999999998, + "mean": -6.4470055960441641e-09, + "min": -0.0064849290000000004, + "std": 3.215543256374076e-05, + "tendency_mean": -2.7699832100000002e-12, + "tendency_std": 3.3817380600000001e-05 + }, + "w_910": { + "max": 0.0096113569999999992, + "mean": -3.649873292488337e-07, + "min": -0.017038338, + "std": 7.1583162934985012e-05, + "tendency_mean": -3.4605925599999998e-11, + "tendency_std": 7.0253870600000003e-05 + } + } + }, + "stac_extensions": [ + "https://stac-extensions.github.io/datacube/v2.2.0/schema.json", + "https://stac-extensions.github.io/alternate-assets/v1.2.0/schema.json", + "https://stac-extensions.github.io/xarray-assets/v1.0.0/schema.json" + ], + "stac_version": "1.0.0", + "type": "Feature" +} diff --git a/stac/jsons/imerg.json b/stac/jsons/imerg.json new file mode 100644 index 000000000..997e85c69 --- /dev/null +++ b/stac/jsons/imerg.json @@ -0,0 +1,104 @@ +{ + "assets": { + "nasa-imerg-grib-n320-1998-2024-6h-v1.zarr": { + "description": "Anemoi dataset", + "href": "nasa-imerg-grib-n320-1998-2024-6h-v1.zarr", + "inodes": "38,966", + "locations": [ + "hpc2020", + "European Weather Cloud", + "juwels_booster" + ], + "roles": [ + "data" + ], + "size": "18 GB", + "title": "nasa-imerg-grib-n320-1998-2024-6h-v1.zarr", + "type": "application/vnd+zarr" + } + }, + "bbox": [ + -180, + -90, + 180, + 90 + ], + "geometry": { + "coordinates": [ + [ + [ + -180, + -90 + ], + [ + -180, + 90 + ], + [ + 180, + 90 + ], + [ + 180, + -90 + ], + [ + -180, + -90 + ] + ] + ], + "type": "Polygon" + }, + "id": "weathergen.atmo.IMERG", + "properties": { + "description": "NASA's Integrated Multi-satellitE Retrievals for GPM (IMERG) product combines information from the GPM satellite constellation to estimate precipitation over the majority of the Earth's surface. ", + "end_datetime": "2024-07-31T18:00:00", + "fixed_timesteps": "True", + "frequency": "6h", + "keywords": [ + "atmosphere", + "precipitation", + "reanalysis", + "global" + ], + "name": "IMERG", + "processing_level": "NA", + "providers": [ + { + "name": "ECMWF", + "roles": [ + "host" + ], + "url": "https://ecmwf.int" + }, + { + "name": "NASA", + "roles": [ + "provider" + ], + "url": "https://www.nasa.gov" + } + ], + "start_datetime": "1998-01-01T06:00:00", + "title": "IMERG", + "unique_id": "5", + "variables": { + "tp": { + "max": 0.81454499999999996, + "mean": 0.00067628000000000002, + "min": 0, + "std": 0.00326012, + "tendency_mean": -6.5433742700000003e-10, + "tendency_std": 0.0035066099999999998 + } + } + }, + "stac_extensions": [ + "https://stac-extensions.github.io/datacube/v2.2.0/schema.json", + "https://stac-extensions.github.io/alternate-assets/v1.2.0/schema.json", + "https://stac-extensions.github.io/xarray-assets/v1.0.0/schema.json" + ], + "stac_version": "1.0.0", + "type": "Feature" +} diff --git a/stac/jsons/metopa.json b/stac/jsons/metopa.json new file mode 100644 index 000000000..c1a937e18 --- /dev/null +++ b/stac/jsons/metopa.json @@ -0,0 +1,237 @@ +{ + "assets": { + "MICROWAVE_FCDR_V1.1-20200512/METOPA/*/*.nc": { + "description": "Observation dataset", + "href": "MICROWAVE_FCDR_V1.1-20200512/METOPA/*/*.nc", + "inodes": "64637", + "locations": [ + "hpc2020" + ], + "roles": [ + "data" + ], + "size": "1.3 TB", + "title": "MICROWAVE_FCDR_V1.1-20200512/METOPA/*/*.nc", + "type": "application/vnd+netcdf" + } + }, + "bbox": [ + -180, + -90, + 180, + 90 + ], + "geometry": { + "coordinates": [ + [ + [ + -180, + -90 + ], + [ + -180, + 90 + ], + [ + 180, + 90 + ], + [ + 180, + -90 + ], + [ + -180, + -90 + ] + ] + ], + "type": "Polygon" + }, + "id": "weathergen.atmo.Metop-A, MHS", + "properties": { + "description": "The MHS Metop-A dataset is derived from the Microwave Humidity Sounder instrument onboard the Meteorological Operational A satellite.", + "end_datetime": "2018-12-31T23:46:05", + "fixed_timesteps": "False", + "frequency": "NA", + "keywords": [ + "atmosphere", + "observation", + "polar-orbiter", + "satellite" + ], + "name": "Metop-A, MHS", + "processing_level": "1C", + "providers": [ + { + "name": "ECMWF", + "roles": [ + "host" + ], + "url": "https://ecmwf.int" + }, + { + "name": "EUMETSAT", + "roles": [ + "provider" + ], + "url": "https://eumetsat.int" + }, + { + "name": "EUMETSAT", + "roles": [ + "processor" + ], + "url": "https://eumetsat.int" + } + ], + "start_datetime": "2006-10-31T21:24:14", + "title": "Metop-A, MHS", + "unique_id": "8", + "variables": { + "btemps": { + "max": "NA", + "mean": "NA", + "min": "NA", + "std": "NA", + "tendency_mean": "NA", + "tendency_std": "NA" + }, + "coldnedt": { + "max": "NA", + "mean": "NA", + "min": "NA", + "std": "NA", + "tendency_mean": "NA", + "tendency_std": "NA" + }, + "data_quality_bitmask": { + "max": "NA", + "mean": "NA", + "min": "NA", + "std": "NA", + "tendency_mean": "NA", + "tendency_std": "NA" + }, + "instrtemp": { + "max": "NA", + "mean": "NA", + "min": "NA", + "std": "NA", + "tendency_mean": "NA", + "tendency_std": "NA" + }, + "quality_issue_pixel_bitmask": { + "max": "NA", + "mean": "NA", + "min": "NA", + "std": "NA", + "tendency_mean": "NA", + "tendency_std": "NA" + }, + "quality_pixel_bitmask": { + "max": "NA", + "mean": "NA", + "min": "NA", + "std": "NA", + "tendency_mean": "NA", + "tendency_std": "NA" + }, + "quality_scanline_bitmask": { + "max": "NA", + "mean": "NA", + "min": "NA", + "std": "NA", + "tendency_mean": "NA", + "tendency_std": "NA" + }, + "satellite_azimuth_angle": { + "max": "NA", + "mean": "NA", + "min": "NA", + "std": "NA", + "tendency_mean": "NA", + "tendency_std": "NA" + }, + "satellite_zenith_angle": { + "max": "NA", + "mean": "NA", + "min": "NA", + "std": "NA", + "tendency_mean": "NA", + "tendency_std": "NA" + }, + "scnlin": { + "max": "NA", + "mean": "NA", + "min": "NA", + "std": "NA", + "tendency_mean": "NA", + "tendency_std": "NA" + }, + "solar_azimuth_angle": { + "max": "NA", + "mean": "NA", + "min": "NA", + "std": "NA", + "tendency_mean": "NA", + "tendency_std": "NA" + }, + "solar_zenith_angle": { + "max": "NA", + "mean": "NA", + "min": "NA", + "std": "NA", + "tendency_mean": "NA", + "tendency_std": "NA" + }, + "time": { + "max": "NA", + "mean": "NA", + "min": "NA", + "std": "NA", + "tendency_mean": "NA", + "tendency_std": "NA" + }, + "u_common_btemps": { + "max": "NA", + "mean": "NA", + "min": "NA", + "std": "NA", + "tendency_mean": "NA", + "tendency_std": "NA" + }, + "u_independent_btemps": { + "max": "NA", + "mean": "NA", + "min": "NA", + "std": "NA", + "tendency_mean": "NA", + "tendency_std": "NA" + }, + "u_structured_btemps": { + "max": "NA", + "mean": "NA", + "min": "NA", + "std": "NA", + "tendency_mean": "NA", + "tendency_std": "NA" + }, + "warmnedt": { + "max": "NA", + "mean": "NA", + "min": "NA", + "std": "NA", + "tendency_mean": "NA", + "tendency_std": "NA" + } + } + }, + "stac_extensions": [ + "https://stac-extensions.github.io/datacube/v2.2.0/schema.json", + "https://stac-extensions.github.io/alternate-assets/v1.2.0/schema.json", + "https://stac-extensions.github.io/xarray-assets/v1.0.0/schema.json" + ], + "stac_version": "1.0.0", + "type": "Feature" +} diff --git a/stac/jsons/metopb.json b/stac/jsons/metopb.json new file mode 100644 index 000000000..0689f2c8a --- /dev/null +++ b/stac/jsons/metopb.json @@ -0,0 +1,237 @@ +{ + "assets": { + "MICROWAVE_FCDR_V1.1-20200512/METOPB/*/*.nc": { + "description": "Observation dataset", + "href": "MICROWAVE_FCDR_V1.1-20200512/METOPB/*/*.nc", + "inodes": "31708", + "locations": [ + "hpc2020" + ], + "roles": [ + "data" + ], + "size": "634.1 GB", + "title": "MICROWAVE_FCDR_V1.1-20200512/METOPB/*/*.nc", + "type": "application/vnd+netcdf" + } + }, + "bbox": [ + -180, + -90, + 180, + 90 + ], + "geometry": { + "coordinates": [ + [ + [ + -180, + -90 + ], + [ + -180, + 90 + ], + [ + 180, + 90 + ], + [ + 180, + -90 + ], + [ + -180, + -90 + ] + ] + ], + "type": "Polygon" + }, + "id": "weathergen.atmo.Metop-B, MHS", + "properties": { + "description": "The MHS Metop-B dataset is derived from the Microwave Humidity Sounder instrument onboard the Meteorological Operational B satellite.", + "end_datetime": "2018-12-31T23:11:48", + "fixed_timesteps": "False", + "frequency": "NA", + "keywords": [ + "atmosphere", + "observation", + "polar-orbiter", + "satellite" + ], + "name": "Metop-B, MHS", + "processing_level": "1C", + "providers": [ + { + "name": "ECMWF", + "roles": [ + "host" + ], + "url": "https://ecmwf.int" + }, + { + "name": "EUMETSAT", + "roles": [ + "provider" + ], + "url": "https://eumetsat.int" + }, + { + "name": "EUMETSAT", + "roles": [ + "processor" + ], + "url": "https://eumetsat.int" + } + ], + "start_datetime": "2013-04-01T02:06:10", + "title": "Metop-B, MHS", + "unique_id": "9", + "variables": { + "btemps": { + "max": "NA", + "mean": "NA", + "min": "NA", + "std": "NA", + "tendency_mean": "NA", + "tendency_std": "NA" + }, + "coldnedt": { + "max": "NA", + "mean": "NA", + "min": "NA", + "std": "NA", + "tendency_mean": "NA", + "tendency_std": "NA" + }, + "data_quality_bitmask": { + "max": "NA", + "mean": "NA", + "min": "NA", + "std": "NA", + "tendency_mean": "NA", + "tendency_std": "NA" + }, + "instrtemp": { + "max": "NA", + "mean": "NA", + "min": "NA", + "std": "NA", + "tendency_mean": "NA", + "tendency_std": "NA" + }, + "quality_issue_pixel_bitmask": { + "max": "NA", + "mean": "NA", + "min": "NA", + "std": "NA", + "tendency_mean": "NA", + "tendency_std": "NA" + }, + "quality_pixel_bitmask": { + "max": "NA", + "mean": "NA", + "min": "NA", + "std": "NA", + "tendency_mean": "NA", + "tendency_std": "NA" + }, + "quality_scanline_bitmask": { + "max": "NA", + "mean": "NA", + "min": "NA", + "std": "NA", + "tendency_mean": "NA", + "tendency_std": "NA" + }, + "satellite_azimuth_angle": { + "max": "NA", + "mean": "NA", + "min": "NA", + "std": "NA", + "tendency_mean": "NA", + "tendency_std": "NA" + }, + "satellite_zenith_angle": { + "max": "NA", + "mean": "NA", + "min": "NA", + "std": "NA", + "tendency_mean": "NA", + "tendency_std": "NA" + }, + "scnlin": { + "max": "NA", + "mean": "NA", + "min": "NA", + "std": "NA", + "tendency_mean": "NA", + "tendency_std": "NA" + }, + "solar_azimuth_angle": { + "max": "NA", + "mean": "NA", + "min": "NA", + "std": "NA", + "tendency_mean": "NA", + "tendency_std": "NA" + }, + "solar_zenith_angle": { + "max": "NA", + "mean": "NA", + "min": "NA", + "std": "NA", + "tendency_mean": "NA", + "tendency_std": "NA" + }, + "time": { + "max": "NA", + "mean": "NA", + "min": "NA", + "std": "NA", + "tendency_mean": "NA", + "tendency_std": "NA" + }, + "u_common_btemps": { + "max": "NA", + "mean": "NA", + "min": "NA", + "std": "NA", + "tendency_mean": "NA", + "tendency_std": "NA" + }, + "u_independent_btemps": { + "max": "NA", + "mean": "NA", + "min": "NA", + "std": "NA", + "tendency_mean": "NA", + "tendency_std": "NA" + }, + "u_structured_btemps": { + "max": "NA", + "mean": "NA", + "min": "NA", + "std": "NA", + "tendency_mean": "NA", + "tendency_std": "NA" + }, + "warmnedt": { + "max": "NA", + "mean": "NA", + "min": "NA", + "std": "NA", + "tendency_mean": "NA", + "tendency_std": "NA" + } + } + }, + "stac_extensions": [ + "https://stac-extensions.github.io/datacube/v2.2.0/schema.json", + "https://stac-extensions.github.io/alternate-assets/v1.2.0/schema.json", + "https://stac-extensions.github.io/xarray-assets/v1.0.0/schema.json" + ], + "stac_version": "1.0.0", + "type": "Feature" +} diff --git a/stac/jsons/npp-atms.json b/stac/jsons/npp-atms.json new file mode 100644 index 000000000..5b61f7ce1 --- /dev/null +++ b/stac/jsons/npp-atms.json @@ -0,0 +1,238 @@ +{ + "assets": { + "MICROWAVE_FCDR_V1.1-20200512/SNPP/*/*.nc": { + "description": "Observation dataset", + "href": "MICROWAVE_FCDR_V1.1-20200512/SNPP/*/*.nc", + "inodes": "44469", + "locations": [ + "hpc2020", + "juwels_booster" + ], + "roles": [ + "data" + ], + "size": "2.9 TB", + "title": "MICROWAVE_FCDR_V1.1-20200512/SNPP/*/*.nc", + "type": "application/vnd+netcdf" + } + }, + "bbox": [ + -180, + -90, + 180, + 90 + ], + "geometry": { + "coordinates": [ + [ + [ + -180, + -90 + ], + [ + -180, + 90 + ], + [ + 180, + 90 + ], + [ + 180, + -90 + ], + [ + -180, + -90 + ] + ] + ], + "type": "Polygon" + }, + "id": "weathergen.atmo.NPP-ATMS", + "properties": { + "description": "The NPP-ATMS (Advanced Technology Microwave Sounder) dataset is derived from the ATMS instrument onboard the NOAA/NASA National Polar-orbiting Partnership (NPP) satellite. It provides global measurements of atmospheric temperature, moisture, and pressure profiles, crucial for weather forecasting and climate monitoring", + "end_datetime": "2018-12-31T23:58:08", + "fixed_timesteps": "False", + "frequency": "NA", + "keywords": [ + "atmosphere", + "observation", + "polar-orbiter", + "satellite" + ], + "name": "NPP-ATMS", + "processing_level": "1C", + "providers": [ + { + "name": "ECMWF", + "roles": [ + "host" + ], + "url": "https://ecmwf.int" + }, + { + "name": "NASA", + "roles": [ + "provider" + ], + "url": "https://www.nasa.gov" + }, + { + "name": "EUMETSAT", + "roles": [ + "processor" + ], + "url": "https://eumetsat.int" + } + ], + "start_datetime": "2011-12-11T00:36:13", + "title": "NPP-ATMS", + "unique_id": "6", + "variables": { + "btemps": { + "max": "NA", + "mean": "NA", + "min": "NA", + "std": "NA", + "tendency_mean": "NA", + "tendency_std": "NA" + }, + "coldnedt": { + "max": "NA", + "mean": "NA", + "min": "NA", + "std": "NA", + "tendency_mean": "NA", + "tendency_std": "NA" + }, + "data_quality_bitmask": { + "max": "NA", + "mean": "NA", + "min": "NA", + "std": "NA", + "tendency_mean": "NA", + "tendency_std": "NA" + }, + "instrtemp": { + "max": "NA", + "mean": "NA", + "min": "NA", + "std": "NA", + "tendency_mean": "NA", + "tendency_std": "NA" + }, + "quality_issue_pixel_bitmask": { + "max": "NA", + "mean": "NA", + "min": "NA", + "std": "NA", + "tendency_mean": "NA", + "tendency_std": "NA" + }, + "quality_pixel_bitmask": { + "max": "NA", + "mean": "NA", + "min": "NA", + "std": "NA", + "tendency_mean": "NA", + "tendency_std": "NA" + }, + "quality_scanline_bitmask": { + "max": "NA", + "mean": "NA", + "min": "NA", + "std": "NA", + "tendency_mean": "NA", + "tendency_std": "NA" + }, + "satellite_azimuth_angle": { + "max": "NA", + "mean": "NA", + "min": "NA", + "std": "NA", + "tendency_mean": "NA", + "tendency_std": "NA" + }, + "satellite_zenith_angle": { + "max": "NA", + "mean": "NA", + "min": "NA", + "std": "NA", + "tendency_mean": "NA", + "tendency_std": "NA" + }, + "scnlin": { + "max": "NA", + "mean": "NA", + "min": "NA", + "std": "NA", + "tendency_mean": "NA", + "tendency_std": "NA" + }, + "solar_azimuth_angle": { + "max": "NA", + "mean": "NA", + "min": "NA", + "std": "NA", + "tendency_mean": "NA", + "tendency_std": "NA" + }, + "solar_zenith_angle": { + "max": "NA", + "mean": "NA", + "min": "NA", + "std": "NA", + "tendency_mean": "NA", + "tendency_std": "NA" + }, + "time": { + "max": "NA", + "mean": "NA", + "min": "NA", + "std": "NA", + "tendency_mean": "NA", + "tendency_std": "NA" + }, + "u_common_btemps": { + "max": "NA", + "mean": "NA", + "min": "NA", + "std": "NA", + "tendency_mean": "NA", + "tendency_std": "NA" + }, + "u_independent_btemps": { + "max": "NA", + "mean": "NA", + "min": "NA", + "std": "NA", + "tendency_mean": "NA", + "tendency_std": "NA" + }, + "u_structured_btemps": { + "max": "NA", + "mean": "NA", + "min": "NA", + "std": "NA", + "tendency_mean": "NA", + "tendency_std": "NA" + }, + "warmnedt": { + "max": "NA", + "mean": "NA", + "min": "NA", + "std": "NA", + "tendency_mean": "NA", + "tendency_std": "NA" + } + } + }, + "stac_extensions": [ + "https://stac-extensions.github.io/datacube/v2.2.0/schema.json", + "https://stac-extensions.github.io/alternate-assets/v1.2.0/schema.json", + "https://stac-extensions.github.io/xarray-assets/v1.0.0/schema.json" + ], + "stac_version": "1.0.0", + "type": "Feature" +} diff --git a/stac/jsons/opera.json b/stac/jsons/opera.json new file mode 100644 index 000000000..f0c90f1e7 --- /dev/null +++ b/stac/jsons/opera.json @@ -0,0 +1,114 @@ +{ + "assets": { + "rodeo-opera-files-2km-2013-2023-15m-v1-lambert-azimuthal-equal-area.zarr": { + "description": "Anemoi dataset", + "href": "rodeo-opera-files-2km-2013-2023-15m-v1-lambert-azimuthal-equal-area.zarr", + "inodes": "380,987", + "locations": [ + "hpc2020", + "juwels_booster", + "marenostrum5", + "juwels_booster" + ], + "roles": [ + "data" + ], + "size": "959 GB", + "title": "rodeo-opera-files-2km-2013-2023-15m-v1-lambert-azimuthal-equal-area.zarr", + "type": "application/vnd+zarr" + } + }, + "bbox": [ + -39.5, + 31.800000000000001, + 57.700000000000003, + 73.900000000000006 + ], + "geometry": { + "coordinates": [ + [ + [ + -39.5, + 31.800000000000001 + ], + [ + -39.5, + 73.900000000000006 + ], + [ + 57.700000000000003, + 73.900000000000006 + ], + [ + 57.700000000000003, + 31.800000000000001 + ], + [ + -39.5, + 31.800000000000001 + ] + ] + ], + "type": "Polygon" + }, + "id": "weathergen.atmo.OPERA", + "properties": { + "description": "The OPERA radar dataset is produced by the EUMETNET OPERA program, which coordinates and harmonizes European weather radar observations. It provides quality-controlled, pan-European radar composites and individual radar data from national meteorological services. ", + "end_datetime": "2024-02-15T14:05:00", + "fixed_timesteps": "True", + "frequency": "15m", + "keywords": [ + "radar", + "precipitation", + "atmosphere", + "observations" + ], + "name": "OPERA", + "processing_level": "NA", + "providers": [ + { + "name": "ECMWF", + "roles": [ + "host" + ], + "url": "https://ecmwf.int" + } + ], + "start_datetime": "2013-01-22T15:05:00", + "title": "OPERA", + "unique_id": "3", + "variables": { + "mask": { + "max": 3, + "mean": 1.24214, + "min": 0, + "std": 0.64675499999999997, + "tendency_mean": "NA", + "tendency_std": "NA" + }, + "quality": { + "max": 24.600000000000001, + "mean": 233054, + "min": 0, + "std": 0.19542599999999999, + "tendency_mean": "NA", + "tendency_std": "NA" + }, + "tp": { + "max": 10995900000000000000, + "mean": 2996100000000, + "min": 0, + "std": 2780720000000000, + "tendency_mean": "NA", + "tendency_std": "NA" + } + } + }, + "stac_extensions": [ + "https://stac-extensions.github.io/datacube/v2.2.0/schema.json", + "https://stac-extensions.github.io/alternate-assets/v1.2.0/schema.json", + "https://stac-extensions.github.io/xarray-assets/v1.0.0/schema.json" + ], + "stac_version": "1.0.0", + "type": "Feature" +} diff --git a/stac/jsons/seviri.json b/stac/jsons/seviri.json new file mode 100644 index 000000000..88b2d96b1 --- /dev/null +++ b/stac/jsons/seviri.json @@ -0,0 +1,279 @@ +{ + "assets": { + "observations-od-ai-0001-2018-2023-meteosat-11-seviri-v1.zarr": { + "description": "Observation dataset", + "href": "observations-od-ai-0001-2018-2023-meteosat-11-seviri-v1.zarr", + "inodes": "2727", + "locations": [ + "hpc2020", + "leonardo" + ], + "roles": [ + "data" + ], + "size": "106 GB", + "title": "observations-od-ai-0001-2018-2023-meteosat-11-seviri-v1.zarr", + "type": "application/vnd+zarr" + } + }, + "bbox": [ + -67.471359252929688, + -66.332542419433594, + 67.346687316894531, + 66.451148986816406 + ], + "geometry": { + "coordinates": [ + [ + [ + -67.471359252929688, + -66.332542419433594 + ], + [ + -67.471359252929688, + 66.451148986816406 + ], + [ + 67.346687316894531, + 66.451148986816406 + ], + [ + 67.346687316894531, + -66.332542419433594 + ], + [ + -67.471359252929688, + -66.332542419433594 + ] + ] + ], + "type": "Polygon" + }, + "id": "weathergen.atmo.SEVIRI", + "properties": { + "description": "The Spinning Enhanced Visible and InfraRed Imager (SEVIRI) is an onboard sensor of the Meteosat Second Generation (MSG) satellites operated by EUMETSAT. SEVIRI provides high-frequency geostationary observations of the Earth’s atmosphere, land, and ocean surfaces over Europe, Africa, and parts of the Atlantic. ", + "end_datetime": "2023-03-21T07:45:00", + "fixed_timesteps": "True", + "frequency": "1h", + "keywords": [ + "atmosphere", + "observation", + "geostationary", + "satellite" + ], + "name": "SEVIRI", + "processing_level": "1C", + "providers": [ + { + "name": "ECMWF", + "roles": [ + "host" + ], + "url": "https://ecmwf.int" + }, + { + "name": "EUMETSAT", + "roles": [ + "provider" + ], + "url": "https://eumetsat.int" + } + ], + "start_datetime": "2018-02-12T21:45:00", + "title": "SEVIRI", + "unique_id": "4", + "variables": { + "cos_julian_day": { + "max": 1, + "mean": 0, + "min": -1, + "std": 1, + "tendency_mean": "NA", + "tendency_std": "NA" + }, + "cos_latitude": { + "max": 0.99999368190765381, + "mean": 0, + "min": 0.39953082799911499, + "std": 1, + "tendency_mean": "NA", + "tendency_std": "NA" + }, + "cos_local_time": { + "max": 1, + "mean": 0, + "min": -1, + "std": 1, + "tendency_mean": "NA", + "tendency_std": "NA" + }, + "cos_longitude": { + "max": 0.99999380111694336, + "mean": 0, + "min": 0.38314518332481384, + "std": 1, + "tendency_mean": "NA", + "tendency_std": "NA" + }, + "cos_sza": { + "max": 1, + "mean": 0, + "min": 0, + "std": 1, + "tendency_mean": "NA", + "tendency_std": "NA" + }, + "cos_vza": { + "max": 0.99999195337295532, + "mean": 0, + "min": 0.21234826743602753, + "std": 1, + "tendency_mean": "NA", + "tendency_std": "NA" + }, + "healpix_idx_8": { + "max": 767, + "mean": 344.75527954101562, + "min": 0, + "std": 214.89877319335938, + "tendency_mean": "NA", + "tendency_std": "NA" + }, + "lat": { + "max": 66.451148986816406, + "mean": 0, + "min": -66.332542419433594, + "std": 90, + "tendency_mean": "NA", + "tendency_std": "NA" + }, + "lon": { + "max": 67.346687316894531, + "mean": 0, + "min": -67.471359252929688, + "std": 180, + "tendency_mean": "NA", + "tendency_std": "NA" + }, + "obsvalue_rawbt_10 (IR12.0)": { + "max": 335.60000610351562, + "mean": 277.3193359375, + "min": 80.900001525878906, + "std": 18.946146011352539, + "tendency_mean": "NA", + "tendency_std": "NA" + }, + "obsvalue_rawbt_11 (IR13.4)": { + "max": 291.89999389648438, + "mean": 257.73025512695312, + "min": 80.199996948242188, + "std": 13.088528633117676, + "tendency_mean": "NA", + "tendency_std": "NA" + }, + "obsvalue_rawbt_4 (IR3.9)": { + "max": 335.70001220703125, + "mean": 282.11981201171875, + "min": 80, + "std": 16.335136413574219, + "tendency_mean": "NA", + "tendency_std": "NA" + }, + "obsvalue_rawbt_5 (WV6.2)": { + "max": 263.29998779296875, + "mean": 237.96363830566406, + "min": 80.199996948242188, + "std": 8.5691623687744141, + "tendency_mean": "NA", + "tendency_std": "NA" + }, + "obsvalue_rawbt_6 (WV7.3)": { + "max": 287.70001220703125, + "mean": 254.19889831542969, + "min": 80, + "std": 11.519951820373535, + "tendency_mean": "NA", + "tendency_std": "NA" + }, + "obsvalue_rawbt_7 (IR8.7)": { + "max": 330.79998779296875, + "mean": 277.3443603515625, + "min": 80.699996948242188, + "std": 17.723258972167969, + "tendency_mean": "NA", + "tendency_std": "NA" + }, + "obsvalue_rawbt_8 (IR9.7)": { + "max": 301.29998779296875, + "mean": 257.89312744140625, + "min": 80.099998474121094, + "std": 13.657046318054199, + "tendency_mean": "NA", + "tendency_std": "NA" + }, + "obsvalue_rawbt_9 (IR10.8)": { + "max": 335.60000610351562, + "mean": 278.94522094726562, + "min": 80, + "std": 18.875223159790039, + "tendency_mean": "NA", + "tendency_std": "NA" + }, + "sin_julian_day": { + "max": 1, + "mean": 0, + "min": -1, + "std": 1, + "tendency_mean": "NA", + "tendency_std": "NA" + }, + "sin_latitude": { + "max": 0.91671973466873169, + "mean": 0, + "min": -0.91589075326919556, + "std": 1, + "tendency_mean": "NA", + "tendency_std": "NA" + }, + "sin_local_time": { + "max": 1, + "mean": 0, + "min": -1, + "std": 1, + "tendency_mean": "NA", + "tendency_std": "NA" + }, + "sin_longitude": { + "max": 0.92285221815109253, + "mean": 0, + "min": -0.92368811368942261, + "std": 1, + "tendency_mean": "NA", + "tendency_std": "NA" + }, + "solar_zenith": { + "max": 179.80000305175781, + "mean": 0, + "min": 0.20000000298023224, + "std": 180, + "tendency_mean": "NA", + "tendency_std": "NA" + }, + "zenith": { + "max": 77.739997863769531, + "mean": 0, + "min": 0.23000000417232513, + "std": 90, + "tendency_mean": "NA", + "tendency_std": "NA" + } + } + }, + "stac_extensions": [ + "https://stac-extensions.github.io/datacube/v2.2.0/schema.json", + "https://stac-extensions.github.io/alternate-assets/v1.2.0/schema.json", + "https://stac-extensions.github.io/xarray-assets/v1.0.0/schema.json" + ], + "stac_version": "1.0.0", + "type": "Feature" +} diff --git a/stac/jsons/synop.json b/stac/jsons/synop.json new file mode 100644 index 000000000..9088343b0 --- /dev/null +++ b/stac/jsons/synop.json @@ -0,0 +1,255 @@ +{ + "assets": { + "observations-ea-ofb-0001-1979-2023-combined-surface-v2": { + "description": "Observation dataset", + "href": "observations-ea-ofb-0001-1979-2023-combined-surface-v2", + "inodes": "4711", + "locations": [ + "hpc2020", + "lumi" + ], + "roles": [ + "data" + ], + "size": "61.5 GB", + "title": "observations-ea-ofb-0001-1979-2023-combined-surface-v2", + "type": "application/vnd+zarr" + } + }, + "bbox": [ + -180, + -90, + 180, + 90 + ], + "geometry": { + "coordinates": [ + [ + [ + -180, + -90 + ], + [ + -180, + 90 + ], + [ + 180, + 90 + ], + [ + 180, + -90 + ], + [ + -180, + -90 + ] + ] + ], + "type": "Polygon" + }, + "id": "weathergen.atmo.SYNOP", + "properties": { + "description": "SYNOP (surface synoptic observation) data consist of standardized meteorological observations collected from land-based weather stations worldwide, typically at 6-hourly or hourly intervals. These observations include key atmospheric variables such as temperature, wind speed and direction, pressure, humidity, cloud cover, and precipitation. ", + "end_datetime": "2023-05-31T21:00:0", + "fixed_timesteps": "True", + "frequency": "3h", + "keywords": [ + "atmosphere", + "observation", + "synoptic data" + ], + "name": "SYNOP", + "processing_level": "NA", + "providers": [ + { + "name": "ECMWF", + "roles": [ + "host" + ], + "url": "https://ecmwf.int" + } + ], + "start_datetime": "1979-01-01T00:00:00", + "title": "SYNOP", + "unique_id": "7", + "variables": { + "cos_julian_day": { + "max": 1, + "mean": 0, + "min": -1, + "std": 1, + "tendency_mean": "NA", + "tendency_std": "NA" + }, + "cos_latitude": { + "max": 1, + "mean": 0, + "min": -4.3711388286737929e-08, + "std": 1, + "tendency_mean": "NA", + "tendency_std": "NA" + }, + "cos_local_time": { + "max": 1, + "mean": 0, + "min": -1, + "std": 1, + "tendency_mean": "NA", + "tendency_std": "NA" + }, + "cos_longitude": { + "max": 1, + "mean": 0, + "min": -1, + "std": 1, + "tendency_mean": "NA", + "tendency_std": "NA" + }, + "cos_sza": { + "max": 1, + "mean": 0, + "min": 0, + "std": 1, + "tendency_mean": "NA", + "tendency_std": "NA" + }, + "healpix_idx_8": { + "max": 767, + "mean": 211.55131530761719, + "min": 0, + "std": 208.08619689941406, + "tendency_mean": "NA", + "tendency_std": "NA" + }, + "lat": { + "max": 90, + "mean": 0, + "min": -90, + "std": 90, + "tendency_mean": "NA", + "tendency_std": "NA" + }, + "lon": { + "max": 180, + "mean": 0, + "min": -180, + "std": 180, + "tendency_mean": "NA", + "tendency_std": "NA" + }, + "lsm": { + "max": 1, + "mean": 0.70989722013473511, + "min": 0, + "std": 0.37073072791099548, + "tendency_mean": "NA", + "tendency_std": "NA" + }, + "obsvalue_ps_0": { + "max": 113770, + "mean": 97822.171875, + "min": 15990, + "std": 5907.458984375, + "tendency_mean": "NA", + "tendency_std": "NA" + }, + "obsvalue_rh2m_0": { + "max": 1, + "mean": 0.71966838836669922, + "min": 1.1888814687225063e-14, + "std": 0.20914055407047272, + "tendency_mean": "NA", + "tendency_std": "NA" + }, + "obsvalue_t2m_0": { + "max": 338, + "mean": 285.33074951171875, + "min": 184.30000305175781, + "std": 13.912480354309082, + "tendency_mean": "NA", + "tendency_std": "NA" + }, + "obsvalue_tsts_0": { + "max": 320.20001220703125, + "mean": 291.65081787109375, + "min": 229.55000305175781, + "std": 8.8829250335693359, + "tendency_mean": "NA", + "tendency_std": "NA" + }, + "obsvalue_u10m_0": { + "max": 80, + "mean": 0.20722207427024841, + "min": -55.149234771728516, + "std": 3.4235959053039551, + "tendency_mean": "NA", + "tendency_std": "NA" + }, + "obsvalue_v10m_0": { + "max": 51.645042419433594, + "mean": 0.055502079427242279, + "min": -51.210002899169922, + "std": 3.2893860340118408, + "tendency_mean": "NA", + "tendency_std": "NA" + }, + "seqno": { + "max": 22173636, + "mean": 5340427.5, + "min": 5704, + "std": 4946647.5, + "tendency_mean": "NA", + "tendency_std": "NA" + }, + "sin_julian_day": { + "max": 0.99999940395355225, + "mean": 0, + "min": -0.99999940395355225, + "std": 1, + "tendency_mean": "NA", + "tendency_std": "NA" + }, + "sin_latitude": { + "max": 1, + "mean": 0, + "min": -1, + "std": 1, + "tendency_mean": "NA", + "tendency_std": "NA" + }, + "sin_local_time": { + "max": 1, + "mean": 0, + "min": -1, + "std": 1, + "tendency_mean": "NA", + "tendency_std": "NA" + }, + "sin_longitude": { + "max": 1, + "mean": 0, + "min": -1, + "std": 1, + "tendency_mean": "NA", + "tendency_std": "NA" + }, + "stalt": { + "max": 31072, + "mean": 307.25546264648438, + "min": -389, + "std": 532.384521484375, + "tendency_mean": "NA", + "tendency_std": "NA" + } + } + }, + "stac_extensions": [ + "https://stac-extensions.github.io/datacube/v2.2.0/schema.json", + "https://stac-extensions.github.io/alternate-assets/v1.2.0/schema.json", + "https://stac-extensions.github.io/xarray-assets/v1.0.0/schema.json" + ], + "stac_version": "1.0.0", + "type": "Feature" +} diff --git a/stac/merged.jsonnet b/stac/merged.jsonnet new file mode 100644 index 000000000..f3009aa35 --- /dev/null +++ b/stac/merged.jsonnet @@ -0,0 +1,72 @@ +// run it with: jsonnet -m ./jsons merged.jsonnet --ext-str branch_name=develop + +// import functions +local fn = import 'functions.libsonnet'; +local branch_name = std.extVar("branch_name"); +local github_username = std.extVar("username"); + +// URL for hrefs +local href_link = 'https://raw.githubusercontent.com/'+github_username+'/WeatherGenerator/refs/heads/'+branch_name+'/stac/jsons/'; + +// TODO: improve this +local era5v8 = import 'era5_v8.jsonnet'; +local opera = import 'opera.jsonnet'; +local cerra = import 'cerra.jsonnet'; +local seviri = import 'seviri.jsonnet'; +local imerg = import 'imerg.jsonnet'; +local nppatms = import 'nppatms.jsonnet'; +local synop = import 'synop.jsonnet'; +local metopa = import 'metopa.jsonnet'; +local metopb = import 'metopb.jsonnet'; +local fy3a = import 'fy3a.jsonnet'; +local fy3b = import 'fy3b.jsonnet'; +local fy3c = import 'fy3c.jsonnet'; +local goes16 = import 'abi-goes16.jsonnet'; +local ifs_fesom_atmos = import 'ifs_fesom_atmos.jsonnet'; +local ifs_fesom_ocean_elem = import 'ifs_fesom_ocean_elem.jsonnet'; +local ifs_fesom_ocean_node = import 'ifs_fesom_ocean_node.jsonnet'; + +local datasets = [era5v8, opera, cerra, seviri, imerg, nppatms, synop, metopa, metopb, fy3a, fy3b, fy3c, goes16, ifs_fesom_atmos, ifs_fesom_ocean_elem, ifs_fesom_ocean_node]; + +local check = fn.check_unique_ids(datasets); + +local files = [ds.filename + '.json' for ds in datasets]; +fn.check_unique_ids(datasets) ++ +{ + 'catalogue.json': + { + type: 'Catalog', + id: 'weathergen', + stac_version: '1.0.0', + description: 'The data catalogue of the WeatherGenerator project', + + links: + [ + { + rel: 'root', + href: href_link + 'catalogue.json', + type: 'application/json', + title: 'The WeatherGenerator data server', + }, + { + rel: 'self', + href: href_link + 'catalogue.json', + type: 'application/json', + }, + ] + + + [fn.dataset_entry_catalogue(ds, href_link) for ds in datasets], + + stac_extensions: [ + 'https://stac-extensions.github.io/datacube/v2.2.0/schema.json', + 'https://stac-extensions.github.io/alternate-assets/v1.2.0/schema.json', + ], + title: 'The WeatherGenerator data catalogue', + }, +} + +{ + [ds.filename]: fn.dataset_entry_fill(ds) + for ds in datasets +} diff --git a/stac/metopa.jsonnet b/stac/metopa.jsonnet new file mode 100644 index 000000000..8064b4543 --- /dev/null +++ b/stac/metopa.jsonnet @@ -0,0 +1,61 @@ +local common = import 'common.jsonnet'; + +{ + name: 'Metop-A, MHS', + filename: 'metopa.json', + description: 'The MHS Metop-A dataset is derived from the Microwave Humidity Sounder instrument onboard the Meteorological Operational A satellite.', + title: 'Metop-A, MHS', + unique_id: '8', + start_datetime: '2006-10-31T21:24:14', + end_datetime: '2018-12-31T23:46:05', + frequency: 'NA', + fixed_timesteps: 'False', + keywords: [ + 'atmosphere', + 'observation', + 'polar-orbiter', + 'satellite', + ], + providers: [ + common.providers.ecmwf_host, + common.providers.eumetsat, + common.providers.eumetsat_processor, + ], + processing_level: '1C', + + + variables: { + names: [ + 'quality_pixel_bitmask', + 'btemps', + 'instrtemp', + 'scnlin', + 'satellite_azimuth_angle', + 'satellite_zenith_angle', + 'solar_azimuth_angle', + 'solar_zenith_angle', + 'u_independent_btemps', + 'u_structured_btemps', + 'quality_issue_pixel_bitmask', + 'data_quality_bitmask', + 'quality_scanline_bitmask', + 'u_common_btemps', + 'warmnedt', + 'coldnedt', + 'time', + ], + + }, + + geometry: [-180, 180, -90, 90], + + dataset: { + dataset_name: 'MICROWAVE_FCDR_V1.1-20200512/METOPA/*/*.nc', + type: 'application/vnd+netcdf', + description: 'Observation dataset', + locations: [common.hpc.hpc2020], + size: '1.3 TB', + inodes: '64637', + roles: ['data'], + }, +} diff --git a/stac/metopb.jsonnet b/stac/metopb.jsonnet new file mode 100644 index 000000000..691336241 --- /dev/null +++ b/stac/metopb.jsonnet @@ -0,0 +1,61 @@ +local common = import 'common.jsonnet'; + +{ + name: 'Metop-B, MHS', + filename: 'metopb.json', + description: 'The MHS Metop-B dataset is derived from the Microwave Humidity Sounder instrument onboard the Meteorological Operational B satellite.', + title: 'Metop-B, MHS', + unique_id: '9', + start_datetime: '2013-04-01T02:06:10', + end_datetime: '2018-12-31T23:11:48', + frequency: 'NA', + fixed_timesteps: 'False', + keywords: [ + 'atmosphere', + 'observation', + 'polar-orbiter', + 'satellite', + ], + providers: [ + common.providers.ecmwf_host, + common.providers.eumetsat, + common.providers.eumetsat_processor, + ], + processing_level: '1C', + + + variables: { + names: [ + 'quality_pixel_bitmask', + 'btemps', + 'instrtemp', + 'scnlin', + 'satellite_azimuth_angle', + 'satellite_zenith_angle', + 'solar_azimuth_angle', + 'solar_zenith_angle', + 'u_independent_btemps', + 'u_structured_btemps', + 'quality_issue_pixel_bitmask', + 'data_quality_bitmask', + 'quality_scanline_bitmask', + 'u_common_btemps', + 'warmnedt', + 'coldnedt', + 'time', + ], + + }, + + geometry: [-180, 180, -90, 90], + + dataset: { + dataset_name: 'MICROWAVE_FCDR_V1.1-20200512/METOPB/*/*.nc', + type: 'application/vnd+netcdf', + description: 'Observation dataset', + locations: [common.hpc.hpc2020], + size: '634.1 GB', + inodes: '31708', + roles: ['data'], + }, +} diff --git a/stac/nppatms.jsonnet b/stac/nppatms.jsonnet new file mode 100644 index 000000000..bb2a8e02c --- /dev/null +++ b/stac/nppatms.jsonnet @@ -0,0 +1,60 @@ +local common = import 'common.jsonnet'; + +{ + name: 'NPP-ATMS', + filename: 'npp-atms.json', + description: 'The NPP-ATMS (Advanced Technology Microwave Sounder) dataset is derived from the ATMS instrument onboard the NOAA/NASA National Polar-orbiting Partnership (NPP) satellite. It provides global measurements of atmospheric temperature, moisture, and pressure profiles, crucial for weather forecasting and climate monitoring', + title: 'NPP-ATMS', + unique_id: '6', + start_datetime: '2011-12-11T00:36:13', + end_datetime: '2018-12-31T23:58:08', + frequency: 'NA', + fixed_timesteps: 'False', + keywords: [ + 'atmosphere', + 'observation', + 'polar-orbiter', + 'satellite', + ], + providers: [ + common.providers.ecmwf_host, + common.providers.nasa, + common.providers.eumetsat_processor + ], + processing_level: '1C', + + variables: { + names: [ + 'quality_pixel_bitmask', + 'instrtemp', + 'scnlin', + 'satellite_azimuth_angle', + 'satellite_zenith_angle', + 'solar_azimuth_angle', + 'solar_zenith_angle', + 'data_quality_bitmask', + 'quality_scanline_bitmask', + 'time', + 'warmnedt', + 'coldnedt', + 'btemps', + 'u_independent_btemps', + 'u_structured_btemps', + 'u_common_btemps', + 'quality_issue_pixel_bitmask', + ], + + }, + + geometry: [-180, 180, -90, 90], + + dataset: { + dataset_name: 'MICROWAVE_FCDR_V1.1-20200512/SNPP/*/*.nc', + type: 'application/vnd+netcdf', + description: 'Observation dataset', + locations: [common.hpc.hpc2020, common.hpc.jsc], + size: '2.9 TB', + inodes: '44469', + roles: ['data'], + }, +} diff --git a/stac/opera.jsonnet b/stac/opera.jsonnet new file mode 100644 index 000000000..e9db5a59b --- /dev/null +++ b/stac/opera.jsonnet @@ -0,0 +1,44 @@ +local common = import 'common.jsonnet'; + +{ + name: 'OPERA', + filename: 'opera.json', + description: 'The OPERA radar dataset is produced by the EUMETNET OPERA program, which coordinates and harmonizes European weather radar observations. It provides quality-controlled, pan-European radar composites and individual radar data from national meteorological services. ', + title: 'OPERA', + unique_id: '3', + start_datetime: '2013-01-22T15:05:00', + end_datetime: '2024-02-15T14:05:00', + frequency: '15m', + fixed_timesteps: 'True', + keywords: [ + 'radar', + 'precipitation', + 'atmosphere', + 'observations', + ], + providers: [ + common.providers.ecmwf_host, + ], + processing_level: 'NA', + + + variables: { + names: ['mask', 'quality', 'tp'], + mins: [0, 0, 0], + maxs: [3, 24.6, 1.09959e+19], + means: [1.24214, 233054, 2.9961e+12], + stds: [0.646755, 0.195426, 2.78072e+15], + }, + + geometry: [-39.5, 57.7, 31.8, 73.9], + + dataset: { + dataset_name: 'rodeo-opera-files-2km-2013-2023-15m-v1-lambert-azimuthal-equal-area.zarr', + type: 'application/vnd+zarr', + description: 'Anemoi dataset', + locations: [common.hpc.hpc2020, common.hpc.jsc, common.hpc.marenostrum5, common.hpc.jsc], + size: '959 GB', + inodes: '380,987', + roles: ['data'], + }, +} diff --git a/stac/seviri.jsonnet b/stac/seviri.jsonnet new file mode 100644 index 000000000..a9b0b682c --- /dev/null +++ b/stac/seviri.jsonnet @@ -0,0 +1,47 @@ +local common = import 'common.jsonnet'; + +{ + name: 'SEVIRI', + filename: 'seviri.json', + description: 'The Spinning Enhanced Visible and InfraRed Imager (SEVIRI) is an onboard sensor of the Meteosat Second Generation (MSG) satellites operated by EUMETSAT. SEVIRI provides high-frequency geostationary observations of the Earth’s atmosphere, land, and ocean surfaces over Europe, Africa, and parts of the Atlantic. ', + title: 'SEVIRI', + unique_id: '4', + start_datetime: '2018-02-12T21:45:00', + end_datetime: '2023-03-21T07:45:00', + frequency: '1h', + fixed_timesteps: 'True', + keywords: [ + 'atmosphere', + 'observation', + 'geostationary', + 'satellite', + ], + providers: [ + common.providers.ecmwf_host, + common.providers.eumetsat, + ], + processing_level: '1C', + + // Retrieved with: root.data.attrs["colnames"]), + // {root.data.attrs["mins"],root.data.attrs["maxs"], root.data.attrs["means"], + // {root.data.attrs["stds"] + variables: { + names: ['healpix_idx_8', 'lat', 'lon', 'zenith', 'solar_zenith', 'obsvalue_rawbt_4 (IR3.9)', 'obsvalue_rawbt_5 (WV6.2)', 'obsvalue_rawbt_6 (WV7.3)', 'obsvalue_rawbt_7 (IR8.7)', 'obsvalue_rawbt_8 (IR9.7)', 'obsvalue_rawbt_9 (IR10.8)', 'obsvalue_rawbt_10 (IR12.0)', 'obsvalue_rawbt_11 (IR13.4)', 'cos_julian_day', 'sin_julian_day', 'cos_local_time', 'sin_local_time', 'cos_sza', 'cos_latitude', 'sin_latitude', 'cos_longitude', 'sin_longitude', 'cos_vza'], + mins: [0.0, -66.3325424194336, -67.47135925292969, 0.23000000417232513, 0.20000000298023224, 80.0, 80.19999694824219, 80.0, 80.69999694824219, 80.0999984741211, 80.0, 80.9000015258789, 80.19999694824219, -1.0, -1.0, -1.0, -1.0, 0.0, 0.399530827999115, -0.9158907532691956, 0.38314518332481384, -0.9236881136894226, 0.21234826743602753], + maxs: [767.0, 66.4511489868164, 67.34668731689453, 77.73999786376953, 179.8000030517578, 335.70001220703125, 263.29998779296875, 287.70001220703125, 330.79998779296875, 301.29998779296875, 335.6000061035156, 335.6000061035156, 291.8999938964844, 1.0, 1.0, 1.0, 1.0, 1.0, 0.9999936819076538, 0.9167197346687317, 0.9999938011169434, 0.9228522181510925, 0.9999919533729553], + means: [344.7552795410156, 0.0, 0.0, 0.0, 0.0, 282.11981201171875, 237.96363830566406, 254.1988983154297, 277.3443603515625, 257.89312744140625, 278.9452209472656, 277.3193359375, 257.7302551269531, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], + stds: [214.89877319335938, 90.0, 180.0, 90.0, 180.0, 16.33513641357422, 8.569162368774414, 11.519951820373535, 17.72325897216797, 13.6570463180542, 18.87522315979004, 18.94614601135254, 13.088528633117676, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0], + }, + + geometry: [-67.47135925292969, 67.34668731689453, -66.3325424194336, 66.4511489868164], + + dataset: { + dataset_name: 'observations-od-ai-0001-2018-2023-meteosat-11-seviri-v1.zarr', + type: 'application/vnd+zarr', + description: 'Observation dataset', + locations: [common.hpc.hpc2020, common.hpc.leonardo], + size: '106 GB', + inodes: '2727', + roles: ['data'], + }, +} diff --git a/stac/synop.jsonnet b/stac/synop.jsonnet new file mode 100644 index 000000000..9f559a364 --- /dev/null +++ b/stac/synop.jsonnet @@ -0,0 +1,47 @@ +local common = import 'common.jsonnet'; + +{ + name: 'SYNOP', + filename: 'synop.json', + description: 'SYNOP (surface synoptic observation) data consist of standardized meteorological observations collected from land-based weather stations worldwide, typically at 6-hourly or hourly intervals. These observations include key atmospheric variables such as temperature, wind speed and direction, pressure, humidity, cloud cover, and precipitation. ', + title: 'SYNOP', + unique_id: '7', + start_datetime: '1979-01-01T00:00:00', + end_datetime: '2023-05-31T21:00:0', + frequency: '3h', + fixed_timesteps: 'True', + keywords: [ + 'atmosphere', + 'observation', + 'synoptic data', + ], + providers: [ + common.providers.ecmwf_host, + ], + processing_level: 'NA', + + // Retrieved with: root.data.attrs["colnames"], + // root.data.attrs["mins"], root.data.attrs["maxs"], + // root.data.attrs["means"], root.data.attrs["stds"] + variables: { + + names: ['healpix_idx_8', 'seqno', 'lat', 'lon', 'stalt', 'lsm', 'obsvalue_tsts_0', 'obsvalue_t2m_0', 'obsvalue_u10m_0', 'obsvalue_v10m_0', 'obsvalue_rh2m_0', 'obsvalue_ps_0', 'cos_julian_day', 'sin_julian_day', 'cos_local_time', 'sin_local_time', 'cos_sza', 'cos_latitude', 'sin_latitude', 'cos_longitude', 'sin_longitude'], + mins: [0.0, 5704.0, -90.0, -180.0, -389.0, 0.0, 229.5500030517578, 184.3000030517578, -55.149234771728516, -51.21000289916992, 1.1888814687225063e-14, 15990.0, -1.0, -0.9999994039535522, -1.0, -1.0, 0.0, -4.371138828673793e-08, -1.0, -1.0, -1.0], + maxs: [767.0, 22173636.0, 90.0, 180.0, 31072.0, 1.0, 320.20001220703125, 338.0, 80.0, 51.645042419433594, 1.0, 113770.0, 1.0, 0.9999994039535522, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0], + means: [211.5513153076172, 5340427.5, 0.0, 0.0, 307.2554626464844, 0.7098972201347351, 291.65081787109375, 285.33074951171875, 0.2072220742702484, 0.05550207942724228, 0.7196683883666992, 97822.171875, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], + stds: [208.08619689941406, 4946647.5, 90.0, 180.0, 532.384521484375, 0.3707307279109955, 8.882925033569336, 13.912480354309082, 3.423595905303955, 3.289386034011841, 0.20914055407047272, 5907.458984375, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0], + + }, + + geometry: [-180, 180, -90, 90], + + dataset: { + dataset_name: 'observations-ea-ofb-0001-1979-2023-combined-surface-v2', + type: 'application/vnd+zarr', + description: 'Observation dataset', + locations: [common.hpc.hpc2020, common.hpc.lumi], + size: '61.5 GB', + inodes: '4711', + roles: ['data'], + }, +} diff --git a/tests/test_cli.py b/tests/test_cli.py new file mode 100644 index 000000000..8e47df184 --- /dev/null +++ b/tests/test_cli.py @@ -0,0 +1,121 @@ +import pathlib + +import pytest + +import weathergen.utils.cli as cli + +DATE_FORMATS = ["2022-12-01T00:00:00", "20221201", "2022-12-01", "12.01.2022"] +EXPECTED_DATE_STR = "202212010000" +MODEL_LOADING_ARGS = ["from_run_id", "mini_epoch", "reuse_run_id"] +GENERAL_ARGS = ["config", "private_config", "options", "run_id"] +MODEL_LOADING_PARSERS = [cli.get_continue_parser(), cli.get_inference_parser()] +BASIC_ARGLIST = ["--from_run_id", "test123"] + + +@pytest.fixture +def inference_parser(): + return cli.get_inference_parser() + + +def test_private_config_is_path(): + argl = ["--private_config", "foo/bar"] + + args = cli.get_train_parser().parse_args(argl) + + assert args.private_config.name == "bar" + + +@pytest.mark.parametrize("files", [["foo/bar"], ["foo/bar", "baz"]]) +def test_config_is_pathes(files): + args = cli.get_train_parser().parse_args(["--config"] + files) + + assert all([isinstance(file, pathlib.Path) for file in args.config]) + + +@pytest.mark.parametrize("overwrites", [["foo=/bar", "baz.foo=1"], ["foo=2"]]) +def test_options(overwrites): + args = cli.get_train_parser().parse_args(["--options"] + overwrites) + + assert all([overwrite in args.options for overwrite in overwrites]) + + +def test_train_general_has_params(): + args = cli.get_train_parser().parse_args([]) + + assert all([arg in vars(args).keys() for arg in GENERAL_ARGS]) + + +@pytest.mark.parametrize("parser", MODEL_LOADING_PARSERS) +def test_general_has_params(parser): + args = parser.parse_args(BASIC_ARGLIST) + + assert all([arg in vars(args).keys() for arg in GENERAL_ARGS]) + + +@pytest.mark.parametrize("parser", MODEL_LOADING_PARSERS) +def test_model_loading_has_params(parser): + args = parser.parse_args(BASIC_ARGLIST) + + assert all([arg in vars(args).keys() for arg in MODEL_LOADING_ARGS]) + + +@pytest.mark.parametrize("streams", [["ERA5", "FOO"], ["BAR"]]) +def test_inference_streams_output(inference_parser, streams): + arglist = BASIC_ARGLIST + ["--streams_output", *streams] + args = inference_parser.parse_args(arglist) + + assert args.streams_output == streams + + +def test_inference_streams_output_empty(inference_parser): + arglist = BASIC_ARGLIST + ["--streams_output", *[]] + + with pytest.raises(SystemExit): + inference_parser.parse_args(arglist) + + +def test_inference_defaults(inference_parser): + default_args = [ + "start_date", + "end_date", + "samples", + "streams_output", + "mini_epoch", + "private_config", + ] + default_values = [inference_parser.get_default(arg) for arg in default_args] + # apply custom type + default_values[:2] = [cli._format_date(date) for date in default_values[:2]] + + args = inference_parser.parse_args(BASIC_ARGLIST) + + assert all( + [ + getattr(args, arg) == default_value + for arg, default_value in zip(default_args, default_values, strict=True) + ] + ) + + +@pytest.mark.parametrize("date", DATE_FORMATS) +def test_inference_start_date(inference_parser, date): + args = inference_parser.parse_args(BASIC_ARGLIST + ["--start_date", date]) + + assert args.start_date == EXPECTED_DATE_STR + + +def test_inference_start_date_invalid(inference_parser): + with pytest.raises(SystemExit): + inference_parser.parse_args(BASIC_ARGLIST + ["--start_date", "foobar"]) + + +@pytest.mark.parametrize("date", DATE_FORMATS) +def test_inference_end_date(inference_parser, date): + args = inference_parser.parse_args(BASIC_ARGLIST + ["--end_date", date]) + + assert args.end_date == EXPECTED_DATE_STR + + +def test_inference_end_date_invalid(inference_parser): + with pytest.raises(SystemExit): + inference_parser.parse_args(BASIC_ARGLIST + ["--end_date", "foobar"]) diff --git a/tests/test_config.py b/tests/test_config.py new file mode 100644 index 000000000..c1ed19569 --- /dev/null +++ b/tests/test_config.py @@ -0,0 +1,325 @@ +import pathlib +import tempfile + +import pytest +from omegaconf import OmegaConf + +import weathergen.common.config as config + +TEST_RUN_ID = "test123" +SECRET_COMPONENT = "53CR3T" +DUMMY_PRIVATE_CONF = { + "data_path_anemoi": "/path/to/anmoi/data", + "data_path_obs": "/path/to/observation/data", + "secrets": { + "my_big_secret": { + "my_secret_id": f"{SECRET_COMPONENT}01234", + "my_secret_access_key": SECRET_COMPONENT, + } + }, +} + +DUMMY_OVERWRITES = [("num_mini_epochs", 42), ("healpix_level", 42)] + +DUMMY_STREAM_CONF = { + "ERA5": { + "type": "anemoi", + "filenames": ["aifs-ea-an-oper-0001-mars-o96-1979-2023-6h-v8.zarr"], + "source": ["u_", "v_", "10u", "10v"], + "target": ["10u", "10v"], + "loss_weight": 1.0, + "diagnostic": False, + "masking_rate": 0.6, + "masking_rate_none": 0.05, + "token_size": 32, + "embed2": { + "net": "transformer", + "num_tokens": 1, + "num_heads": 4, + "dim_embed": 128, + "num_blocks": 2, + }, + "embed_target_coords": {"net": "linear", "dim_embed": 128}, + "target_readout": { + "type": "obs_value", # token or obs_value + "num_layers": 2, + "num_heads": 4, + # "sampling_rate" : 0.2 + }, + "pred_head": {"ens_size": 1, "num_layers": 1}, + } +} +DUMMY_MULTIPLE_STREAM_CONF = DUMMY_STREAM_CONF | {"FOO": DUMMY_STREAM_CONF["ERA5"]} + +VALID_STREAMS = [ + (pathlib.Path("test.yml"), DUMMY_STREAM_CONF), + (pathlib.Path("foo/test.yml"), DUMMY_STREAM_CONF), + (pathlib.Path("bar/foo/test.yml"), DUMMY_STREAM_CONF), +] + +EXCLUDED_STREAMS = [ + (pathlib.Path(".test.yml"), DUMMY_STREAM_CONF), + (pathlib.Path("#test.yml"), DUMMY_STREAM_CONF), +] + + +def contains_keys(super_config, sub_config): + keys_present = [key in super_config.keys() for key in sub_config.keys()] + + return all(keys_present) + + +def contains_values(super_config, sub_config): + correct_values = [super_config[key] == value for key, value in sub_config.items()] + + return all(correct_values) + + +def contains(super_config, sub_config): + return contains_keys(super_config, sub_config) and contains_values(super_config, sub_config) + + +def is_equal(config1, config2): + return contains(config1, config2) and contains(config2, config1) + + +@pytest.fixture +def shared_working_dir(): + with tempfile.TemporaryDirectory(prefix="models") as temp_dir: + yield temp_dir + + +def write_stream_file(write_path, content: str): + write_path.parent.mkdir(parents=True, exist_ok=True) + with open(write_path, "a") as stream_file: + stream_file.write(content) + + +def get_expected_config(key, config): + config = OmegaConf.create(config) + config.name = key + return config + + +@pytest.fixture +def streams_dir(): + with tempfile.TemporaryDirectory(prefix="streams") as temp_dir: + yield pathlib.Path(temp_dir) + + +@pytest.fixture +def private_conf(shared_working_dir): + cf = OmegaConf.create(DUMMY_PRIVATE_CONF) + cf.path_shared_working_dir = shared_working_dir + cf.path_shared_slurm_dir = shared_working_dir + + return cf + + +@pytest.fixture +def private_config_file(private_conf): + with tempfile.NamedTemporaryFile("w+") as temp: + temp.write(OmegaConf.to_yaml(private_conf)) + temp.flush() + yield pathlib.Path(temp.name) + + +@pytest.fixture +def overwrite_dict(request): + key, value = request.param + return {key: value} + + +@pytest.fixture +def overwrite_config(overwrite_dict): + return OmegaConf.create(overwrite_dict) + + +@pytest.fixture +def overwrite_file(overwrite_config): + # TODO should this be "w+t" instead of "w"? + with tempfile.NamedTemporaryFile("w+") as temp: + temp.write(OmegaConf.to_yaml(overwrite_config)) + temp.flush() + yield pathlib.Path(temp.name) + + +@pytest.fixture +def config_fresh(private_config_file): + cf = config.load_config(private_config_file, None, None) + cf = config.set_run_id(cf, TEST_RUN_ID, False) + cf.data_loader_rng_seed = 42 + + return cf + + +def test_contains_private(config_fresh): + sanitized_private_conf = DUMMY_PRIVATE_CONF.copy() + del sanitized_private_conf["secrets"] + assert contains_keys(config_fresh, sanitized_private_conf) + + +def test_is_paths_set(config_fresh): + paths = {"model_path": "foo", "run_path": "bar"} + + assert contains_keys(config_fresh, paths) + + +@pytest.mark.parametrize("overwrite_dict", DUMMY_OVERWRITES, indirect=True) +def test_load_with_overwrite_dict(overwrite_dict, private_config_file): + cf = config.load_config(private_config_file, None, None, overwrite_dict) + + assert contains(cf, overwrite_dict) + + +@pytest.mark.parametrize("overwrite_dict", DUMMY_OVERWRITES, indirect=True) +def test_load_with_overwrite_config(overwrite_config, private_config_file): + cf = config.load_config(private_config_file, None, None, overwrite_config) + + assert contains(cf, overwrite_config) + + +@pytest.mark.parametrize("overwrite_dict", DUMMY_OVERWRITES, indirect=True) +def test_load_with_overwrite_file(private_config_file, overwrite_file): + sub_cf = OmegaConf.load(overwrite_file) + cf = config.load_config(private_config_file, None, None, overwrite_file) + + assert contains(cf, sub_cf) + + +def test_load_with_stream_in_overwrite(private_config_file, streams_dir, mocker): + overwrite = {"streams_directory": streams_dir} + stub = mocker.patch("weathergen.common.config.load_streams", return_value=streams_dir) + + config.load_config(private_config_file, None, None, overwrite) + + stub.assert_called_once_with(streams_dir) + + +def test_load_multiple_overwrites(private_config_file): + overwrites = [{"foo": 1, "bar": 1, "baz": 1}, {"foo": 2, "bar": 2}, {"foo": 3}] + + expected = {"foo": 3, "bar": 2, "baz": 1} + cf = config.load_config(private_config_file, None, None, *overwrites) + + assert contains(cf, expected) + + +@pytest.mark.parametrize("mini_epoch", [None, 0, 1, 2, -1]) +def test_load_existing_config(mini_epoch, private_config_file, config_fresh): + test_num_mini_epochs = 3000 + + config_fresh.num_mini_epochs = test_num_mini_epochs # some specific change + config.save(config_fresh, mini_epoch) + + cf = config.load_config(private_config_file, config_fresh.run_id, mini_epoch) + + assert cf.num_mini_epochs == test_num_mini_epochs + + +@pytest.mark.parametrize("options,cf", [(["foo=1", "bar=2"], {"foo": 1, "bar": 2}), ([], {})]) +def test_from_cli(options, cf): + parsed_config = config.from_cli_arglist(options) + + assert parsed_config == OmegaConf.create(cf) + + +@pytest.mark.parametrize( + "run_id,reuse,expected", + [ + (None, False, "generated"), + ("new_id", False, "new_id"), + (None, True, TEST_RUN_ID), + ("new_id", True, TEST_RUN_ID), + ], +) +def test_set_run_id(config_fresh, run_id, reuse, expected, mocker): + mocker.patch("weathergen.common.config.get_run_id", return_value="generated") + + config_fresh = config.set_run_id(config_fresh, run_id, reuse) + + assert config_fresh.run_id == expected + + +def test_print_cf_no_secrets(config_fresh): + output = config.format_cf(config_fresh) + + assert "53CR3T" not in output and "secrets" not in config_fresh.keys() + + +@pytest.mark.parametrize("rel_path,cf", VALID_STREAMS) +def test_load_streams(streams_dir, rel_path, cf): + expected = get_expected_config(*[*cf.items()][0]) + write_stream_file(streams_dir / rel_path, OmegaConf.to_yaml(cf)) + + streams = config.load_streams(streams_dir) + + assert all(is_equal(stream, expected) for stream in streams) + + +@pytest.mark.parametrize("rel_path,cf", EXCLUDED_STREAMS) +def test_load_streams_exclude_files(streams_dir, rel_path, cf): + write_stream_file(streams_dir / rel_path, OmegaConf.to_yaml(cf)) + + streams = config.load_streams(streams_dir) + + assert streams == [] + + +def test_load_empty_stream(streams_dir): + write_stream_file(streams_dir / "empty.yml", "") + + streams = config.load_streams(streams_dir) + assert streams == [] + + +def test_load_malformed_stream(streams_dir): + write_stream_file(streams_dir / "error.yml", "ae:{") + + with pytest.raises(ValueError): + config.load_streams(streams_dir) + + +@pytest.mark.parametrize("rel_path,cf", [("test.yml", DUMMY_MULTIPLE_STREAM_CONF)]) +def test_load_multiple_streams_len(streams_dir, rel_path, cf): + write_stream_file(streams_dir / rel_path, OmegaConf.to_yaml(cf)) + + streams = config.load_streams(streams_dir) + + assert len(streams) == len(cf) + + +@pytest.mark.parametrize("rel_path,cf", [("test.yml", DUMMY_MULTIPLE_STREAM_CONF)]) +def test_load_multiple_streams_content(streams_dir, rel_path, cf): + expected = [get_expected_config(name, conf) for name, conf in cf.items()] + write_stream_file(streams_dir / rel_path, OmegaConf.to_yaml(cf)) + + streams = config.load_streams(streams_dir) + + assert all( + is_equal(stream, stream_e) for stream, stream_e in zip(streams, expected, strict=True) + ) + + +def test_load_duplicate_streams(streams_dir): + write_stream_file(streams_dir / "foo.yml", OmegaConf.to_yaml(DUMMY_STREAM_CONF)) + write_stream_file(streams_dir / "bar.yml", OmegaConf.to_yaml(DUMMY_STREAM_CONF)) + + with pytest.raises(ValueError): + config.load_streams(streams_dir) + + +def test_load_duplicate_streams_same_file(streams_dir): + write_stream_file(streams_dir / "foo.yml", OmegaConf.to_yaml(DUMMY_STREAM_CONF)) + write_stream_file(streams_dir / "foo.yml", OmegaConf.to_yaml(DUMMY_STREAM_CONF)) + + with pytest.raises(ValueError): + config.load_streams(streams_dir) + + +@pytest.mark.parametrize("mini_epoch", [None, 0, 1, 2, -1]) # maybe add -5 as test case +def test_save(mini_epoch, config_fresh): + config.save(config_fresh, mini_epoch) + + cf = config.load_model_config(config_fresh.run_id, mini_epoch, config_fresh.model_path) + assert is_equal(cf, config_fresh) diff --git a/uv.lock b/uv.lock new file mode 100644 index 000000000..4cdcbdcc5 --- /dev/null +++ b/uv.lock @@ -0,0 +1,3081 @@ +version = 1 +revision = 3 +requires-python = "==3.12.*" +resolution-markers = [ + "platform_machine == 'aarch64' and sys_platform == 'linux'", + "platform_machine == 'x86_64' and sys_platform == 'linux'", +] +supported-markers = [ + "platform_machine == 'aarch64' and sys_platform == 'linux'", + "platform_machine == 'x86_64' and sys_platform == 'linux'", +] +conflicts = [[ + { package = "weathergen", extra = "cpu" }, + { package = "weathergen", extra = "gpu" }, +]] + +[manifest] +members = [ + "weathergen", + "weathergen-common", + "weathergen-evaluate", + "weathergen-metrics", + "weathergen-readers-extra", +] + +[[package]] +name = "absl-py" +version = "2.3.1" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/10/2a/c93173ffa1b39c1d0395b7e842bbdc62e556ca9d8d3b5572926f3e4ca752/absl_py-2.3.1.tar.gz", hash = "sha256:a97820526f7fbfd2ec1bce83f3f25e3a14840dac0d8e02a0b71cd75db3f77fc9", size = 116588, upload-time = "2025-07-03T09:31:44.05Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/8f/aa/ba0014cc4659328dc818a28827be78e6d97312ab0cb98105a770924dc11e/absl_py-2.3.1-py3-none-any.whl", hash = "sha256:eeecf07f0c2a93ace0772c92e596ace6d3d3996c042b2128459aaae2a76de11d", size = 135811, upload-time = "2025-07-03T09:31:42.253Z" }, +] + +[[package]] +name = "anemoi-datasets" +version = "0.5.24" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "anemoi-transform", marker = "(platform_machine != 'aarch64' and platform_machine != 'x86_64' and extra == 'extra-10-weathergen-cpu' and extra == 'extra-10-weathergen-gpu') or (platform_machine == 'aarch64' and sys_platform == 'linux') or (platform_machine == 'x86_64' and sys_platform == 'linux') or (sys_platform != 'linux' and extra == 'extra-10-weathergen-cpu' and extra == 'extra-10-weathergen-gpu')" }, + { name = "anemoi-utils", extra = ["provenance"], marker = "(platform_machine != 'aarch64' and platform_machine != 'x86_64' and extra == 'extra-10-weathergen-cpu' and extra == 'extra-10-weathergen-gpu') or (platform_machine == 'aarch64' and sys_platform == 'linux') or (platform_machine == 'x86_64' and sys_platform == 'linux') or (sys_platform != 'linux' and extra == 'extra-10-weathergen-cpu' and extra == 'extra-10-weathergen-gpu')" }, + { name = "cfunits", marker = "(platform_machine != 'aarch64' and platform_machine != 'x86_64' and extra == 'extra-10-weathergen-cpu' and extra == 'extra-10-weathergen-gpu') or (platform_machine == 'aarch64' and sys_platform == 'linux') or (platform_machine == 'x86_64' and sys_platform == 'linux') or (sys_platform != 'linux' and extra == 'extra-10-weathergen-cpu' and extra == 'extra-10-weathergen-gpu')" }, + { name = "numcodecs", marker = "(platform_machine != 'aarch64' and platform_machine != 'x86_64' and extra == 'extra-10-weathergen-cpu' and extra == 'extra-10-weathergen-gpu') or (platform_machine == 'aarch64' and sys_platform == 'linux') or (platform_machine == 'x86_64' and sys_platform == 'linux') or (sys_platform != 'linux' and extra == 'extra-10-weathergen-cpu' and extra == 'extra-10-weathergen-gpu')" }, + { name = "numpy", marker = "(platform_machine != 'aarch64' and platform_machine != 'x86_64' and extra == 'extra-10-weathergen-cpu' and extra == 'extra-10-weathergen-gpu') or (platform_machine == 'aarch64' and sys_platform == 'linux') or (platform_machine == 'x86_64' and sys_platform == 'linux') or (sys_platform != 'linux' and extra == 'extra-10-weathergen-cpu' and extra == 'extra-10-weathergen-gpu')" }, + { name = "pyyaml", marker = "(platform_machine != 'aarch64' and platform_machine != 'x86_64' and extra == 'extra-10-weathergen-cpu' and extra == 'extra-10-weathergen-gpu') or (platform_machine == 'aarch64' and sys_platform == 'linux') or (platform_machine == 'x86_64' and sys_platform == 'linux') or (sys_platform != 'linux' and extra == 'extra-10-weathergen-cpu' and extra == 'extra-10-weathergen-gpu')" }, + { name = "semantic-version", marker = "(platform_machine != 'aarch64' and platform_machine != 'x86_64' and extra == 'extra-10-weathergen-cpu' and extra == 'extra-10-weathergen-gpu') or (platform_machine == 'aarch64' and sys_platform == 'linux') or (platform_machine == 'x86_64' and sys_platform == 'linux') or (sys_platform != 'linux' and extra == 'extra-10-weathergen-cpu' and extra == 'extra-10-weathergen-gpu')" }, + { name = "tqdm", marker = "(platform_machine != 'aarch64' and platform_machine != 'x86_64' and extra == 'extra-10-weathergen-cpu' and extra == 'extra-10-weathergen-gpu') or (platform_machine == 'aarch64' and sys_platform == 'linux') or (platform_machine == 'x86_64' and sys_platform == 'linux') or (sys_platform != 'linux' and extra == 'extra-10-weathergen-cpu' and extra == 'extra-10-weathergen-gpu')" }, + { name = "zarr", marker = "(platform_machine != 'aarch64' and platform_machine != 'x86_64' and extra == 'extra-10-weathergen-cpu' and extra == 'extra-10-weathergen-gpu') or (platform_machine == 'aarch64' and sys_platform == 'linux') or (platform_machine == 'x86_64' and sys_platform == 'linux') or (sys_platform != 'linux' and extra == 'extra-10-weathergen-cpu' and extra == 'extra-10-weathergen-gpu')" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/67/b2/f0d2dd03a00414407c2840d5fac43e4f29a2fc97f1a5a712df9f7eb933dc/anemoi_datasets-0.5.24.tar.gz", hash = "sha256:a29e051bdc02522a47459ce18b7d637ec1b7323634ccf255557ccf1a49005215", size = 1766949, upload-time = "2025-05-23T11:20:21.986Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/25/68/5a7cdd7db91f1e94347277a5f1431295d1a6cf9c267ce16f34edf7b32fbb/anemoi_datasets-0.5.24-py3-none-any.whl", hash = "sha256:7f414955ea39ce964dbb3ab7f90a7bb626468e19bc3249377dbd18447a56106a", size = 295020, upload-time = "2025-05-23T11:20:20.756Z" }, +] + +[[package]] +name = "anemoi-transform" +version = "0.1.11" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "anemoi-utils", marker = "(platform_machine != 'aarch64' and platform_machine != 'x86_64' and extra == 'extra-10-weathergen-cpu' and extra == 'extra-10-weathergen-gpu') or (platform_machine == 'aarch64' and sys_platform == 'linux') or (platform_machine == 'x86_64' and sys_platform == 'linux') or (sys_platform != 'linux' and extra == 'extra-10-weathergen-cpu' and extra == 'extra-10-weathergen-gpu')" }, + { name = "cfunits", marker = "(platform_machine != 'aarch64' and platform_machine != 'x86_64' and extra == 'extra-10-weathergen-cpu' and extra == 'extra-10-weathergen-gpu') or (platform_machine == 'aarch64' and sys_platform == 'linux') or (platform_machine == 'x86_64' and sys_platform == 'linux') or (sys_platform != 'linux' and extra == 'extra-10-weathergen-cpu' and extra == 'extra-10-weathergen-gpu')" }, + { name = "earthkit-data", marker = "(platform_machine != 'aarch64' and platform_machine != 'x86_64' and extra == 'extra-10-weathergen-cpu' and extra == 'extra-10-weathergen-gpu') or (platform_machine == 'aarch64' and sys_platform == 'linux') or (platform_machine == 'x86_64' and sys_platform == 'linux') or (sys_platform != 'linux' and extra == 'extra-10-weathergen-cpu' and extra == 'extra-10-weathergen-gpu')" }, + { name = "earthkit-meteo", marker = "(platform_machine != 'aarch64' and platform_machine != 'x86_64' and extra == 'extra-10-weathergen-cpu' and extra == 'extra-10-weathergen-gpu') or (platform_machine == 'aarch64' and sys_platform == 'linux') or (platform_machine == 'x86_64' and sys_platform == 'linux') or (sys_platform != 'linux' and extra == 'extra-10-weathergen-cpu' and extra == 'extra-10-weathergen-gpu')" }, + { name = "earthkit-regrid", marker = "(platform_machine != 'aarch64' and platform_machine != 'x86_64' and extra == 'extra-10-weathergen-cpu' and extra == 'extra-10-weathergen-gpu') or (platform_machine == 'aarch64' and sys_platform == 'linux') or (platform_machine == 'x86_64' and sys_platform == 'linux') or (sys_platform != 'linux' and extra == 'extra-10-weathergen-cpu' and extra == 'extra-10-weathergen-gpu')" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/d1/e5/32c3a4d15b3f04e364518eb7082aec6d2b5eca144adefedbc597eaebf0ca/anemoi_transform-0.1.11.tar.gz", hash = "sha256:8d6d60bbc82884ebeac91593c1aff1a017faa3606335455aecbb229af212ce60", size = 105541, upload-time = "2025-05-28T10:17:52.154Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/24/d0/7d2b30ae8d72ee471b9cf6e67f352ef92869a1a98169ee4bd7ebe732fa6e/anemoi_transform-0.1.11-py3-none-any.whl", hash = "sha256:79e6ad9c4bc9fffb82dec86e72da44085ab7ab7dfbb608521e8530e6f7a7e540", size = 67550, upload-time = "2025-05-28T10:17:50.794Z" }, +] + +[[package]] +name = "anemoi-utils" +version = "0.4.24" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "aniso8601", marker = "(platform_machine != 'aarch64' and platform_machine != 'x86_64' and extra == 'extra-10-weathergen-cpu' and extra == 'extra-10-weathergen-gpu') or (platform_machine == 'aarch64' and sys_platform == 'linux') or (platform_machine == 'x86_64' and sys_platform == 'linux') or (sys_platform != 'linux' and extra == 'extra-10-weathergen-cpu' and extra == 'extra-10-weathergen-gpu')" }, + { name = "multiurl", marker = "(platform_machine != 'aarch64' and platform_machine != 'x86_64' and extra == 'extra-10-weathergen-cpu' and extra == 'extra-10-weathergen-gpu') or (platform_machine == 'aarch64' and sys_platform == 'linux') or (platform_machine == 'x86_64' and sys_platform == 'linux') or (sys_platform != 'linux' and extra == 'extra-10-weathergen-cpu' and extra == 'extra-10-weathergen-gpu')" }, + { name = "numpy", marker = "(platform_machine != 'aarch64' and platform_machine != 'x86_64' and extra == 'extra-10-weathergen-cpu' and extra == 'extra-10-weathergen-gpu') or (platform_machine == 'aarch64' and sys_platform == 'linux') or (platform_machine == 'x86_64' and sys_platform == 'linux') or (sys_platform != 'linux' and extra == 'extra-10-weathergen-cpu' and extra == 'extra-10-weathergen-gpu')" }, + { name = "pydantic", marker = "(platform_machine != 'aarch64' and platform_machine != 'x86_64' and extra == 'extra-10-weathergen-cpu' and extra == 'extra-10-weathergen-gpu') or (platform_machine == 'aarch64' and sys_platform == 'linux') or (platform_machine == 'x86_64' and sys_platform == 'linux') or (sys_platform != 'linux' and extra == 'extra-10-weathergen-cpu' and extra == 'extra-10-weathergen-gpu')" }, + { name = "python-dateutil", marker = "(platform_machine != 'aarch64' and platform_machine != 'x86_64' and extra == 'extra-10-weathergen-cpu' and extra == 'extra-10-weathergen-gpu') or (platform_machine == 'aarch64' and sys_platform == 'linux') or (platform_machine == 'x86_64' and sys_platform == 'linux') or (sys_platform != 'linux' and extra == 'extra-10-weathergen-cpu' and extra == 'extra-10-weathergen-gpu')" }, + { name = "pyyaml", marker = "(platform_machine != 'aarch64' and platform_machine != 'x86_64' and extra == 'extra-10-weathergen-cpu' and extra == 'extra-10-weathergen-gpu') or (platform_machine == 'aarch64' and sys_platform == 'linux') or (platform_machine == 'x86_64' and sys_platform == 'linux') or (sys_platform != 'linux' and extra == 'extra-10-weathergen-cpu' and extra == 'extra-10-weathergen-gpu')" }, + { name = "tqdm", marker = "(platform_machine != 'aarch64' and platform_machine != 'x86_64' and extra == 'extra-10-weathergen-cpu' and extra == 'extra-10-weathergen-gpu') or (platform_machine == 'aarch64' and sys_platform == 'linux') or (platform_machine == 'x86_64' and sys_platform == 'linux') or (sys_platform != 'linux' and extra == 'extra-10-weathergen-cpu' and extra == 'extra-10-weathergen-gpu')" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/a9/da/208276e1a4af5a2230c6aa45fa1126ea6ec51f33de4e2e886f4f90dfa18b/anemoi_utils-0.4.24.tar.gz", hash = "sha256:ffae3b791fe3d46da28a46441aa4d7aa5ff2e081e78054245dd8167b23d867dd", size = 125476, upload-time = "2025-06-06T07:38:09.63Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/83/9b/306a1f8358ecc589b573c75df4a1cf80042be7f991fa2cd0f07337135c44/anemoi_utils-0.4.24-py3-none-any.whl", hash = "sha256:88616199ae0dd0e50e7e9155625dee32784e5de7c6a1223570b3e0f6128a5343", size = 78937, upload-time = "2025-06-06T07:38:07.755Z" }, +] + +[package.optional-dependencies] +provenance = [ + { name = "gitpython", marker = "(platform_machine != 'aarch64' and platform_machine != 'x86_64' and extra == 'extra-10-weathergen-cpu' and extra == 'extra-10-weathergen-gpu') or (platform_machine == 'aarch64' and sys_platform == 'linux') or (platform_machine == 'x86_64' and sys_platform == 'linux') or (sys_platform != 'linux' and extra == 'extra-10-weathergen-cpu' and extra == 'extra-10-weathergen-gpu')" }, + { name = "nvsmi", marker = "(platform_machine != 'aarch64' and platform_machine != 'x86_64' and extra == 'extra-10-weathergen-cpu' and extra == 'extra-10-weathergen-gpu') or (platform_machine == 'aarch64' and sys_platform == 'linux') or (platform_machine == 'x86_64' and sys_platform == 'linux') or (sys_platform != 'linux' and extra == 'extra-10-weathergen-cpu' and extra == 'extra-10-weathergen-gpu')" }, +] + +[[package]] +name = "aniso8601" +version = "10.0.1" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/8b/8d/52179c4e3f1978d3d9a285f98c706642522750ef343e9738286130423730/aniso8601-10.0.1.tar.gz", hash = "sha256:25488f8663dd1528ae1f54f94ac1ea51ae25b4d531539b8bc707fed184d16845", size = 47190, upload-time = "2025-04-18T17:29:42.995Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/59/75/e0e10dc7ed1408c28e03a6cb2d7a407f99320eb953f229d008a7a6d05546/aniso8601-10.0.1-py2.py3-none-any.whl", hash = "sha256:eb19717fd4e0db6de1aab06f12450ab92144246b257423fe020af5748c0cb89e", size = 52848, upload-time = "2025-04-18T17:29:41.492Z" }, +] + +[[package]] +name = "annotated-types" +version = "0.7.0" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/ee/67/531ea369ba64dcff5ec9c3402f9f51bf748cec26dde048a2f973a4eea7f5/annotated_types-0.7.0.tar.gz", hash = "sha256:aff07c09a53a08bc8cfccb9c85b05f1aa9a2a6f23728d790723543408344ce89", size = 16081, upload-time = "2024-05-20T21:33:25.928Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/78/b6/6307fbef88d9b5ee7421e68d78a9f162e0da4900bc5f5793f6d3d0e34fb8/annotated_types-0.7.0-py3-none-any.whl", hash = "sha256:1f02e8b43a8fbbc3f3e0d4f0f4bfc8131bcb4eebe8849b8e5c773f3a1c582a53", size = 13643, upload-time = "2024-05-20T21:33:24.1Z" }, +] + +[[package]] +name = "antlr4-python3-runtime" +version = "4.9.3" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/3e/38/7859ff46355f76f8d19459005ca000b6e7012f2f1ca597746cbcd1fbfe5e/antlr4-python3-runtime-4.9.3.tar.gz", hash = "sha256:f224469b4168294902bb1efa80a8bf7855f24c99aef99cbefc1bcd3cce77881b", size = 117034, upload-time = "2021-11-06T17:52:23.524Z" } + +[[package]] +name = "anyio" +version = "4.11.0" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "idna", marker = "(platform_machine != 'aarch64' and platform_machine != 'x86_64' and extra == 'extra-10-weathergen-cpu' and extra == 'extra-10-weathergen-gpu') or (platform_machine == 'aarch64' and sys_platform == 'linux') or (platform_machine == 'x86_64' and sys_platform == 'linux') or (sys_platform != 'linux' and extra == 'extra-10-weathergen-cpu' and extra == 'extra-10-weathergen-gpu')" }, + { name = "sniffio", marker = "(platform_machine != 'aarch64' and platform_machine != 'x86_64' and extra == 'extra-10-weathergen-cpu' and extra == 'extra-10-weathergen-gpu') or (platform_machine == 'aarch64' and sys_platform == 'linux') or (platform_machine == 'x86_64' and sys_platform == 'linux') or (sys_platform != 'linux' and extra == 'extra-10-weathergen-cpu' and extra == 'extra-10-weathergen-gpu')" }, + { name = "typing-extensions", marker = "(platform_machine != 'aarch64' and platform_machine != 'x86_64' and extra == 'extra-10-weathergen-cpu' and extra == 'extra-10-weathergen-gpu') or (platform_machine == 'aarch64' and sys_platform == 'linux') or (platform_machine == 'x86_64' and sys_platform == 'linux') or (sys_platform != 'linux' and extra == 'extra-10-weathergen-cpu' and extra == 'extra-10-weathergen-gpu')" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/c6/78/7d432127c41b50bccba979505f272c16cbcadcc33645d5fa3a738110ae75/anyio-4.11.0.tar.gz", hash = "sha256:82a8d0b81e318cc5ce71a5f1f8b5c4e63619620b63141ef8c995fa0db95a57c4", size = 219094, upload-time = "2025-09-23T09:19:12.58Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/15/b3/9b1a8074496371342ec1e796a96f99c82c945a339cd81a8e73de28b4cf9e/anyio-4.11.0-py3-none-any.whl", hash = "sha256:0287e96f4d26d4149305414d4e3bc32f0dcd0862365a4bddea19d7a1ec38c4fc", size = 109097, upload-time = "2025-09-23T09:19:10.601Z" }, +] + +[[package]] +name = "array-api-compat" +version = "1.12.0" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/8d/bd/9fa5c7c5621698d5632cc852a79fbbdc28024462c9396698e5fdcb395f37/array_api_compat-1.12.0.tar.gz", hash = "sha256:585bc615f650de53ac24b7c012baecfcdd810f50df3573be47e6dd9fa20df974", size = 99883, upload-time = "2025-05-16T08:49:59.897Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/e0/b1/0542e0cab6f49f151a2d7a42400f84f706fc0b64e85dc1f56708b2e9fd37/array_api_compat-1.12.0-py3-none-any.whl", hash = "sha256:a0b4795b6944a9507fde54679f9350e2ad2b1e2acf4a2408a098cdc27f890a8b", size = 58156, upload-time = "2025-05-16T08:49:58.129Z" }, +] + +[[package]] +name = "asciitree" +version = "0.3.3" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/2d/6a/885bc91484e1aa8f618f6f0228d76d0e67000b0fdd6090673b777e311913/asciitree-0.3.3.tar.gz", hash = "sha256:4aa4b9b649f85e3fcb343363d97564aa1fb62e249677f2e18a96765145cc0f6e", size = 3951, upload-time = "2016-09-05T19:10:42.681Z" } + +[[package]] +name = "astropy" +version = "7.1.0" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "astropy-iers-data", marker = "(platform_machine != 'aarch64' and platform_machine != 'x86_64' and extra == 'extra-10-weathergen-cpu' and extra == 'extra-10-weathergen-gpu') or (platform_machine == 'aarch64' and sys_platform == 'linux') or (platform_machine == 'x86_64' and sys_platform == 'linux') or (sys_platform != 'linux' and extra == 'extra-10-weathergen-cpu' and extra == 'extra-10-weathergen-gpu')" }, + { name = "numpy", marker = "(platform_machine != 'aarch64' and platform_machine != 'x86_64' and extra == 'extra-10-weathergen-cpu' and extra == 'extra-10-weathergen-gpu') or (platform_machine == 'aarch64' and sys_platform == 'linux') or (platform_machine == 'x86_64' and sys_platform == 'linux') or (sys_platform != 'linux' and extra == 'extra-10-weathergen-cpu' and extra == 'extra-10-weathergen-gpu')" }, + { name = "packaging", marker = "(platform_machine != 'aarch64' and platform_machine != 'x86_64' and extra == 'extra-10-weathergen-cpu' and extra == 'extra-10-weathergen-gpu') or (platform_machine == 'aarch64' and sys_platform == 'linux') or (platform_machine == 'x86_64' and sys_platform == 'linux') or (sys_platform != 'linux' and extra == 'extra-10-weathergen-cpu' and extra == 'extra-10-weathergen-gpu')" }, + { name = "pyerfa", marker = "(platform_machine != 'aarch64' and platform_machine != 'x86_64' and extra == 'extra-10-weathergen-cpu' and extra == 'extra-10-weathergen-gpu') or (platform_machine == 'aarch64' and sys_platform == 'linux') or (platform_machine == 'x86_64' and sys_platform == 'linux') or (sys_platform != 'linux' and extra == 'extra-10-weathergen-cpu' and extra == 'extra-10-weathergen-gpu')" }, + { name = "pyyaml", marker = "(platform_machine != 'aarch64' and platform_machine != 'x86_64' and extra == 'extra-10-weathergen-cpu' and extra == 'extra-10-weathergen-gpu') or (platform_machine == 'aarch64' and sys_platform == 'linux') or (platform_machine == 'x86_64' and sys_platform == 'linux') or (sys_platform != 'linux' and extra == 'extra-10-weathergen-cpu' and extra == 'extra-10-weathergen-gpu')" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/83/91/124d020cea78e4e4b6db7ff726c2c2e4a5865293d0a4355d13b0312d99f1/astropy-7.1.0.tar.gz", hash = "sha256:c8f254322295b1b8cf24303d6f155bf7efdb6c1282882b966ce3040eff8c53c5", size = 6976116, upload-time = "2025-05-20T13:40:10.557Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/cf/9a/ed2b35b55e28a6317471b61456d2feda7798b2dd3601e17859620e8eae4c/astropy-7.1.0-cp312-cp312-macosx_10_13_x86_64.whl", hash = "sha256:e0fec2f4b5265caab68020eaa320704e7ce9433ae8dbea75c300468fed695437", size = 6381273, upload-time = "2025-05-20T13:39:45.481Z" }, + { url = "https://files.pythonhosted.org/packages/5c/45/333bc1072f3b2ac31aec33063bb7122661405a97cb7fec702e95af707bd4/astropy-7.1.0-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:e4bb022f863cf13eefeb406692f58824c0d9bdb1aa36ae786e87c096d8ebdd07", size = 6301716, upload-time = "2025-05-20T13:39:47.339Z" }, + { url = "https://files.pythonhosted.org/packages/58/90/bfb7a1b5d9e3401967e351cf31add576cddf7466d2030cc6f4d1d841a18d/astropy-7.1.0-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:c811a4aedd8fbf6d7611d64d40af1f0c1c1e6621e5992e7e1a7b5fec47dc1fa1", size = 10096600, upload-time = "2025-05-20T13:39:49.169Z" }, + { url = "https://files.pythonhosted.org/packages/fb/69/a34f20db7146912f25e2487c5283f1ae2aed5d24f615fa976439ece35f7e/astropy-7.1.0-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:654261df547c150e5b6a022f3785f47a2e547e0cc1c06fbcf6293b5f4e85722a", size = 10160320, upload-time = "2025-05-20T13:39:51.079Z" }, + { url = "https://files.pythonhosted.org/packages/7a/be/0b874f551acbac27ff3d5d73bdf3c0860a8b78c6db574b19e6d0b7d363e4/astropy-7.1.0-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:6bd25761ba1385bb99a189401fbc486e0884d97129e271b655b6efa956a12a77", size = 10137307, upload-time = "2025-05-20T13:39:53.175Z" }, + { url = "https://files.pythonhosted.org/packages/ea/1c/132241ab2006a52b4809ee82a71fde793c0159916ae45dae374b0412b037/astropy-7.1.0-cp312-cp312-win32.whl", hash = "sha256:f637e39622b23750a12b19ab4642f2e3970f6cb84f2228587725f15bf1d80d03", size = 6152433, upload-time = "2025-05-20T13:39:54.933Z" }, + { url = "https://files.pythonhosted.org/packages/11/39/0a38241008905a9c1048f7b351310deee71cf42e0de1f929e84a10878864/astropy-7.1.0-cp312-cp312-win_amd64.whl", hash = "sha256:06ba650db237557912fdd7bb8ffdd87838e23e58b0fa0001b4d43c2bb5a79412", size = 6279663, upload-time = "2025-05-20T13:39:56.274Z" }, +] + +[[package]] +name = "astropy-healpix" +version = "1.1.2" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "astropy", marker = "(platform_machine != 'aarch64' and platform_machine != 'x86_64' and extra == 'extra-10-weathergen-cpu' and extra == 'extra-10-weathergen-gpu') or (platform_machine == 'aarch64' and sys_platform == 'linux') or (platform_machine == 'x86_64' and sys_platform == 'linux') or (sys_platform != 'linux' and extra == 'extra-10-weathergen-cpu' and extra == 'extra-10-weathergen-gpu')" }, + { name = "numpy", marker = "(platform_machine != 'aarch64' and platform_machine != 'x86_64' and extra == 'extra-10-weathergen-cpu' and extra == 'extra-10-weathergen-gpu') or (platform_machine == 'aarch64' and sys_platform == 'linux') or (platform_machine == 'x86_64' and sys_platform == 'linux') or (sys_platform != 'linux' and extra == 'extra-10-weathergen-cpu' and extra == 'extra-10-weathergen-gpu')" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/5c/8a/dfd71e4db13706383ca2cea0b2831eb10da2c286d4494f183f80fc03cbc2/astropy_healpix-1.1.2.tar.gz", hash = "sha256:03671df12a36ec3b357c244d5154b6786362ff5d80770675c7b24815101066e4", size = 109453, upload-time = "2025-02-19T19:38:06.814Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/35/be/0caad18d7077c2547c1225bfff3edb691e03495b8952541d000cf40234b7/astropy_healpix-1.1.2-cp310-abi3-macosx_10_9_x86_64.whl", hash = "sha256:fb504c998e1661215c74da9537558cd2048d29b44acb2d63e613aae133b91668", size = 85424, upload-time = "2025-02-19T19:37:53.808Z" }, + { url = "https://files.pythonhosted.org/packages/8d/38/eb2897df8b0cba6d8dd0d08571fa7f2277002a46feb4c97fa121f3878c30/astropy_healpix-1.1.2-cp310-abi3-macosx_11_0_arm64.whl", hash = "sha256:00a0c9378d7e844aecb23d62c206a999e045a48781a320ac5f012f8c95ac4022", size = 81721, upload-time = "2025-02-19T19:37:55.72Z" }, + { url = "https://files.pythonhosted.org/packages/d9/40/0382c85a995008ba8e2f99e28d143cfe9f0a835e3a2088c36a4947c93420/astropy_healpix-1.1.2-cp310-abi3-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:ee37b14700d28cf53e2376c65f8cb6224a59f80067feb3f3cd6dd6f9a4577337", size = 191974, upload-time = "2025-02-19T19:37:56.791Z" }, + { url = "https://files.pythonhosted.org/packages/31/01/b5d91f29f36ab9fb220ef3e820dd3046f9f671fc1ec99644369dc606840b/astropy_healpix-1.1.2-cp310-abi3-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:cf7a616af0b7df9c1d96f6af1e12382f29bd43e3fb88ce98f46992bfa23a149e", size = 195201, upload-time = "2025-02-19T19:38:00.034Z" }, + { url = "https://files.pythonhosted.org/packages/39/51/54770150df54c09b06c00f24481317abcb4b6478a99772a5df399894306d/astropy_healpix-1.1.2-cp310-abi3-musllinux_1_2_x86_64.whl", hash = "sha256:3526008fc5ccd4c13f3166a878bfb856b909a00912b27d26666992615c668e88", size = 191335, upload-time = "2025-02-19T19:38:02.038Z" }, + { url = "https://files.pythonhosted.org/packages/da/5b/6bafcee285e9ef653449742e6d0ba6974e97cbffccac99727d8606e610b6/astropy_healpix-1.1.2-cp310-abi3-win32.whl", hash = "sha256:94f4a2fcee2e66ab68f8face8d20be4553cbf6ce81bd214052ddf307e2118513", size = 52693, upload-time = "2025-02-19T19:38:04.662Z" }, + { url = "https://files.pythonhosted.org/packages/01/6d/07a6dcd87aec162b9cb63167ccb3919a4f6ee739ce32035cd44887ae8708/astropy_healpix-1.1.2-cp310-abi3-win_amd64.whl", hash = "sha256:f6b3e50c49e73a66bb1847dc3451e1d22bf828c10881275bf359928e95d25fe3", size = 54830, upload-time = "2025-02-19T19:38:05.709Z" }, +] + +[[package]] +name = "astropy-iers-data" +version = "0.2025.6.16.0.38.47" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/63/ae/efd931d47653f474afb1154d9181f885bf3f82854dcdd979a9e2838470c5/astropy_iers_data-0.2025.6.16.0.38.47.tar.gz", hash = "sha256:58c984976506b8a5c1c969aafe8744b286111d98c8623e66aecc45db9e777121", size = 1899704, upload-time = "2025-06-16T00:39:20.529Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/3c/35/e4b67e87d7ff182b59047db09dce11c7154273eee428c98fe0828adbf3d7/astropy_iers_data-0.2025.6.16.0.38.47-py3-none-any.whl", hash = "sha256:c8c2c999bb9fa987c89c737bc7e71d83f8a6c8fd874ef03b0adc196c1432e20e", size = 1955135, upload-time = "2025-06-16T00:39:18.731Z" }, +] + +[[package]] +name = "asttokens" +version = "3.0.0" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/4a/e7/82da0a03e7ba5141f05cce0d302e6eed121ae055e0456ca228bf693984bc/asttokens-3.0.0.tar.gz", hash = "sha256:0dcd8baa8d62b0c1d118b399b2ddba3c4aff271d0d7a9e0d4c1681c79035bbc7", size = 61978, upload-time = "2024-11-30T04:30:14.439Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/25/8a/c46dcc25341b5bce5472c718902eb3d38600a903b14fa6aeecef3f21a46f/asttokens-3.0.0-py3-none-any.whl", hash = "sha256:e3078351a059199dd5138cb1c706e6430c05eff2ff136af5eb4790f9d28932e2", size = 26918, upload-time = "2024-11-30T04:30:10.946Z" }, +] + +[[package]] +name = "attrs" +version = "25.3.0" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/5a/b0/1367933a8532ee6ff8d63537de4f1177af4bff9f3e829baf7331f595bb24/attrs-25.3.0.tar.gz", hash = "sha256:75d7cefc7fb576747b2c81b4442d4d4a1ce0900973527c011d1030fd3bf4af1b", size = 812032, upload-time = "2025-03-13T11:10:22.779Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/77/06/bb80f5f86020c4551da315d78b3ab75e8228f89f0162f2c3a819e407941a/attrs-25.3.0-py3-none-any.whl", hash = "sha256:427318ce031701fea540783410126f03899a97ffc6f61596ad581ac2e40e3bc3", size = 63815, upload-time = "2025-03-13T11:10:21.14Z" }, +] + +[[package]] +name = "bleach" +version = "6.2.0" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "webencodings", marker = "(platform_machine != 'aarch64' and platform_machine != 'x86_64' and extra == 'extra-10-weathergen-cpu' and extra == 'extra-10-weathergen-gpu') or (platform_machine == 'aarch64' and sys_platform == 'linux') or (platform_machine == 'x86_64' and sys_platform == 'linux') or (sys_platform != 'linux' and extra == 'extra-10-weathergen-cpu' and extra == 'extra-10-weathergen-gpu')" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/76/9a/0e33f5054c54d349ea62c277191c020c2d6ef1d65ab2cb1993f91ec846d1/bleach-6.2.0.tar.gz", hash = "sha256:123e894118b8a599fd80d3ec1a6d4cc7ce4e5882b1317a7e1ba69b56e95f991f", size = 203083, upload-time = "2024-10-29T18:30:40.477Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/fc/55/96142937f66150805c25c4d0f31ee4132fd33497753400734f9dfdcbdc66/bleach-6.2.0-py3-none-any.whl", hash = "sha256:117d9c6097a7c3d22fd578fcd8d35ff1e125df6736f554da4e432fdd63f31e5e", size = 163406, upload-time = "2024-10-29T18:30:38.186Z" }, +] + +[[package]] +name = "bokeh" +version = "3.7.3" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "contourpy", marker = "(platform_machine != 'aarch64' and platform_machine != 'x86_64' and extra == 'extra-10-weathergen-cpu' and extra == 'extra-10-weathergen-gpu') or (platform_machine == 'aarch64' and sys_platform == 'linux') or (platform_machine == 'x86_64' and sys_platform == 'linux') or (sys_platform != 'linux' and extra == 'extra-10-weathergen-cpu' and extra == 'extra-10-weathergen-gpu')" }, + { name = "jinja2", marker = "(platform_machine != 'aarch64' and platform_machine != 'x86_64' and extra == 'extra-10-weathergen-cpu' and extra == 'extra-10-weathergen-gpu') or (platform_machine == 'aarch64' and sys_platform == 'linux') or (platform_machine == 'x86_64' and sys_platform == 'linux') or (sys_platform != 'linux' and extra == 'extra-10-weathergen-cpu' and extra == 'extra-10-weathergen-gpu')" }, + { name = "narwhals", marker = "(platform_machine != 'aarch64' and platform_machine != 'x86_64' and extra == 'extra-10-weathergen-cpu' and extra == 'extra-10-weathergen-gpu') or (platform_machine == 'aarch64' and sys_platform == 'linux') or (platform_machine == 'x86_64' and sys_platform == 'linux') or (sys_platform != 'linux' and extra == 'extra-10-weathergen-cpu' and extra == 'extra-10-weathergen-gpu')" }, + { name = "numpy", marker = "(platform_machine != 'aarch64' and platform_machine != 'x86_64' and extra == 'extra-10-weathergen-cpu' and extra == 'extra-10-weathergen-gpu') or (platform_machine == 'aarch64' and sys_platform == 'linux') or (platform_machine == 'x86_64' and sys_platform == 'linux') or (sys_platform != 'linux' and extra == 'extra-10-weathergen-cpu' and extra == 'extra-10-weathergen-gpu')" }, + { name = "packaging", marker = "(platform_machine != 'aarch64' and platform_machine != 'x86_64' and extra == 'extra-10-weathergen-cpu' and extra == 'extra-10-weathergen-gpu') or (platform_machine == 'aarch64' and sys_platform == 'linux') or (platform_machine == 'x86_64' and sys_platform == 'linux') or (sys_platform != 'linux' and extra == 'extra-10-weathergen-cpu' and extra == 'extra-10-weathergen-gpu')" }, + { name = "pandas", marker = "(platform_machine != 'aarch64' and platform_machine != 'x86_64' and extra == 'extra-10-weathergen-cpu' and extra == 'extra-10-weathergen-gpu') or (platform_machine == 'aarch64' and sys_platform == 'linux') or (platform_machine == 'x86_64' and sys_platform == 'linux') or (sys_platform != 'linux' and extra == 'extra-10-weathergen-cpu' and extra == 'extra-10-weathergen-gpu')" }, + { name = "pillow", marker = "(platform_machine != 'aarch64' and platform_machine != 'x86_64' and extra == 'extra-10-weathergen-cpu' and extra == 'extra-10-weathergen-gpu') or (platform_machine == 'aarch64' and sys_platform == 'linux') or (platform_machine == 'x86_64' and sys_platform == 'linux') or (sys_platform != 'linux' and extra == 'extra-10-weathergen-cpu' and extra == 'extra-10-weathergen-gpu')" }, + { name = "pyyaml", marker = "(platform_machine != 'aarch64' and platform_machine != 'x86_64' and extra == 'extra-10-weathergen-cpu' and extra == 'extra-10-weathergen-gpu') or (platform_machine == 'aarch64' and sys_platform == 'linux') or (platform_machine == 'x86_64' and sys_platform == 'linux') or (sys_platform != 'linux' and extra == 'extra-10-weathergen-cpu' and extra == 'extra-10-weathergen-gpu')" }, + { name = "tornado", marker = "(platform_machine != 'aarch64' and platform_machine != 'x86_64' and extra == 'extra-10-weathergen-cpu' and extra == 'extra-10-weathergen-gpu') or (platform_machine == 'aarch64' and sys_platform == 'linux') or (platform_machine == 'x86_64' and sys_platform == 'linux') or (sys_platform != 'linux' and extra == 'extra-10-weathergen-cpu' and extra == 'extra-10-weathergen-gpu')" }, + { name = "xyzservices", marker = "(platform_machine != 'aarch64' and platform_machine != 'x86_64' and extra == 'extra-10-weathergen-cpu' and extra == 'extra-10-weathergen-gpu') or (platform_machine == 'aarch64' and sys_platform == 'linux') or (platform_machine == 'x86_64' and sys_platform == 'linux') or (sys_platform != 'linux' and extra == 'extra-10-weathergen-cpu' and extra == 'extra-10-weathergen-gpu')" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/75/18/12d0d6024177ad18ba65deffc363046d0cbafe116f8b964a9efa85d2800f/bokeh-3.7.3.tar.gz", hash = "sha256:70a89a9f797b103d5ee6ad15fb7944adda115cf0da996ed0b75cfba61cb12f2b", size = 6366610, upload-time = "2025-05-12T12:13:29.318Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/91/48/08b2382e739236aa3360b7976360ba3e0c043b6234e25951c18c1eb6fa06/bokeh-3.7.3-py3-none-any.whl", hash = "sha256:b0e79dd737f088865212e4fdcb0f3b95d087f0f088bf8ca186a300ab1641e2c7", size = 7031447, upload-time = "2025-05-12T12:13:27.47Z" }, +] + +[[package]] +name = "cachetools" +version = "6.2.1" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/cc/7e/b975b5814bd36faf009faebe22c1072a1fa1168db34d285ef0ba071ad78c/cachetools-6.2.1.tar.gz", hash = "sha256:3f391e4bd8f8bf0931169baf7456cc822705f4e2a31f840d218f445b9a854201", size = 31325, upload-time = "2025-10-12T14:55:30.139Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/96/c5/1e741d26306c42e2bf6ab740b2202872727e0f606033c9dd713f8b93f5a8/cachetools-6.2.1-py3-none-any.whl", hash = "sha256:09868944b6dde876dfd44e1d47e18484541eaf12f26f29b7af91b26cc892d701", size = 11280, upload-time = "2025-10-12T14:55:28.382Z" }, +] + +[[package]] +name = "cartopy" +version = "0.24.1" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "matplotlib", marker = "(platform_machine != 'aarch64' and platform_machine != 'x86_64' and extra == 'extra-10-weathergen-cpu' and extra == 'extra-10-weathergen-gpu') or (platform_machine == 'aarch64' and sys_platform == 'linux') or (platform_machine == 'x86_64' and sys_platform == 'linux') or (sys_platform != 'linux' and extra == 'extra-10-weathergen-cpu' and extra == 'extra-10-weathergen-gpu')" }, + { name = "numpy", marker = "(platform_machine != 'aarch64' and platform_machine != 'x86_64' and extra == 'extra-10-weathergen-cpu' and extra == 'extra-10-weathergen-gpu') or (platform_machine == 'aarch64' and sys_platform == 'linux') or (platform_machine == 'x86_64' and sys_platform == 'linux') or (sys_platform != 'linux' and extra == 'extra-10-weathergen-cpu' and extra == 'extra-10-weathergen-gpu')" }, + { name = "packaging", marker = "(platform_machine != 'aarch64' and platform_machine != 'x86_64' and extra == 'extra-10-weathergen-cpu' and extra == 'extra-10-weathergen-gpu') or (platform_machine == 'aarch64' and sys_platform == 'linux') or (platform_machine == 'x86_64' and sys_platform == 'linux') or (sys_platform != 'linux' and extra == 'extra-10-weathergen-cpu' and extra == 'extra-10-weathergen-gpu')" }, + { name = "pyproj", marker = "(platform_machine != 'aarch64' and platform_machine != 'x86_64' and extra == 'extra-10-weathergen-cpu' and extra == 'extra-10-weathergen-gpu') or (platform_machine == 'aarch64' and sys_platform == 'linux') or (platform_machine == 'x86_64' and sys_platform == 'linux') or (sys_platform != 'linux' and extra == 'extra-10-weathergen-cpu' and extra == 'extra-10-weathergen-gpu')" }, + { name = "pyshp", marker = "(platform_machine != 'aarch64' and platform_machine != 'x86_64' and extra == 'extra-10-weathergen-cpu' and extra == 'extra-10-weathergen-gpu') or (platform_machine == 'aarch64' and sys_platform == 'linux') or (platform_machine == 'x86_64' and sys_platform == 'linux') or (sys_platform != 'linux' and extra == 'extra-10-weathergen-cpu' and extra == 'extra-10-weathergen-gpu')" }, + { name = "shapely", marker = "(platform_machine != 'aarch64' and platform_machine != 'x86_64' and extra == 'extra-10-weathergen-cpu' and extra == 'extra-10-weathergen-gpu') or (platform_machine == 'aarch64' and sys_platform == 'linux') or (platform_machine == 'x86_64' and sys_platform == 'linux') or (sys_platform != 'linux' and extra == 'extra-10-weathergen-cpu' and extra == 'extra-10-weathergen-gpu')" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/e0/75/94aff4fef338887641aa780d13795609861e6e9f9593bd66d4917ab7954b/cartopy-0.24.1.tar.gz", hash = "sha256:01c910d5634c69a7efdec46e0a17d473d2328767f001d4dc0b5c4b48e585c8bd", size = 10741277, upload-time = "2024-10-08T23:25:35.148Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/6e/76/774a4f808c6a4fc19b87c2cc38dd8731d413aad606689451c017ff93ad12/Cartopy-0.24.1-cp312-cp312-macosx_10_13_x86_64.whl", hash = "sha256:a984e33977daed8f760c09c331c8368a6af060db1190af89d74a027c272e39c3", size = 10983939, upload-time = "2024-10-08T23:25:11.814Z" }, + { url = "https://files.pythonhosted.org/packages/2f/48/8517d5d1cc56ce5c4abda1de6454593474a23412115a543f7981aa7e4377/Cartopy-0.24.1-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:71d8a6d061d0764aba3baf357a68f3d73796a8a46d34b8c9fb241171b273c69e", size = 10972374, upload-time = "2024-10-08T23:25:15.009Z" }, + { url = "https://files.pythonhosted.org/packages/c8/84/cb1577d5ac2f0deb002001c6e25b291735151c8c3033c97f212dc482ef72/Cartopy-0.24.1-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:2f354a1d902a8d6ee33b099acc86ac2e1af528bbc0ea718b834111c97e604981", size = 11715215, upload-time = "2024-10-08T23:25:18.447Z" }, + { url = "https://files.pythonhosted.org/packages/11/95/40c7abae8789aae22ad2a5da3974d3270dc3526b46cee253f680f72ee6cc/Cartopy-0.24.1-cp312-cp312-win_amd64.whl", hash = "sha256:b1bb2d02b31884ee1d4f14e5b436bbf95745eac39c6fc0d6c67c83bb907b55b3", size = 10959875, upload-time = "2024-10-08T23:25:21.515Z" }, +] + +[[package]] +name = "certifi" +version = "2025.6.15" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/73/f7/f14b46d4bcd21092d7d3ccef689615220d8a08fb25e564b65d20738e672e/certifi-2025.6.15.tar.gz", hash = "sha256:d747aa5a8b9bbbb1bb8c22bb13e22bd1f18e9796defa16bab421f7f7a317323b", size = 158753, upload-time = "2025-06-15T02:45:51.329Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/84/ae/320161bd181fc06471eed047ecce67b693fd7515b16d495d8932db763426/certifi-2025.6.15-py3-none-any.whl", hash = "sha256:2e0c7ce7cb5d8f8634ca55d2ba7e6ec2689a2fd6537d8dec1296a477a4910057", size = 157650, upload-time = "2025-06-15T02:45:49.977Z" }, +] + +[[package]] +name = "cffi" +version = "1.17.1" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "pycparser", marker = "(platform_machine != 'aarch64' and platform_machine != 'x86_64' and extra == 'extra-10-weathergen-cpu' and extra == 'extra-10-weathergen-gpu') or (platform_machine == 'aarch64' and sys_platform == 'linux') or (platform_machine == 'x86_64' and sys_platform == 'linux') or (sys_platform != 'linux' and extra == 'extra-10-weathergen-cpu' and extra == 'extra-10-weathergen-gpu')" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/fc/97/c783634659c2920c3fc70419e3af40972dbaf758daa229a7d6ea6135c90d/cffi-1.17.1.tar.gz", hash = "sha256:1c39c6016c32bc48dd54561950ebd6836e1670f2ae46128f67cf49e789c52824", size = 516621, upload-time = "2024-09-04T20:45:21.852Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/5a/84/e94227139ee5fb4d600a7a4927f322e1d4aea6fdc50bd3fca8493caba23f/cffi-1.17.1-cp312-cp312-macosx_10_9_x86_64.whl", hash = "sha256:805b4371bf7197c329fcb3ead37e710d1bca9da5d583f5073b799d5c5bd1eee4", size = 183178, upload-time = "2024-09-04T20:44:12.232Z" }, + { url = "https://files.pythonhosted.org/packages/da/ee/fb72c2b48656111c4ef27f0f91da355e130a923473bf5ee75c5643d00cca/cffi-1.17.1-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:733e99bc2df47476e3848417c5a4540522f234dfd4ef3ab7fafdf555b082ec0c", size = 178840, upload-time = "2024-09-04T20:44:13.739Z" }, + { url = "https://files.pythonhosted.org/packages/cc/b6/db007700f67d151abadf508cbfd6a1884f57eab90b1bb985c4c8c02b0f28/cffi-1.17.1-cp312-cp312-manylinux_2_12_i686.manylinux2010_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:1257bdabf294dceb59f5e70c64a3e2f462c30c7ad68092d01bbbfb1c16b1ba36", size = 454803, upload-time = "2024-09-04T20:44:15.231Z" }, + { url = "https://files.pythonhosted.org/packages/1a/df/f8d151540d8c200eb1c6fba8cd0dfd40904f1b0682ea705c36e6c2e97ab3/cffi-1.17.1-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:da95af8214998d77a98cc14e3a3bd00aa191526343078b530ceb0bd710fb48a5", size = 478850, upload-time = "2024-09-04T20:44:17.188Z" }, + { url = "https://files.pythonhosted.org/packages/28/c0/b31116332a547fd2677ae5b78a2ef662dfc8023d67f41b2a83f7c2aa78b1/cffi-1.17.1-cp312-cp312-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:d63afe322132c194cf832bfec0dc69a99fb9bb6bbd550f161a49e9e855cc78ff", size = 485729, upload-time = "2024-09-04T20:44:18.688Z" }, + { url = "https://files.pythonhosted.org/packages/91/2b/9a1ddfa5c7f13cab007a2c9cc295b70fbbda7cb10a286aa6810338e60ea1/cffi-1.17.1-cp312-cp312-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:f79fc4fc25f1c8698ff97788206bb3c2598949bfe0fef03d299eb1b5356ada99", size = 471256, upload-time = "2024-09-04T20:44:20.248Z" }, + { url = "https://files.pythonhosted.org/packages/b2/d5/da47df7004cb17e4955df6a43d14b3b4ae77737dff8bf7f8f333196717bf/cffi-1.17.1-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:b62ce867176a75d03a665bad002af8e6d54644fad99a3c70905c543130e39d93", size = 479424, upload-time = "2024-09-04T20:44:21.673Z" }, + { url = "https://files.pythonhosted.org/packages/0b/ac/2a28bcf513e93a219c8a4e8e125534f4f6db03e3179ba1c45e949b76212c/cffi-1.17.1-cp312-cp312-musllinux_1_1_aarch64.whl", hash = "sha256:386c8bf53c502fff58903061338ce4f4950cbdcb23e2902d86c0f722b786bbe3", size = 484568, upload-time = "2024-09-04T20:44:23.245Z" }, + { url = "https://files.pythonhosted.org/packages/d4/38/ca8a4f639065f14ae0f1d9751e70447a261f1a30fa7547a828ae08142465/cffi-1.17.1-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:4ceb10419a9adf4460ea14cfd6bc43d08701f0835e979bf821052f1805850fe8", size = 488736, upload-time = "2024-09-04T20:44:24.757Z" }, + { url = "https://files.pythonhosted.org/packages/86/c5/28b2d6f799ec0bdecf44dced2ec5ed43e0eb63097b0f58c293583b406582/cffi-1.17.1-cp312-cp312-win32.whl", hash = "sha256:a08d7e755f8ed21095a310a693525137cfe756ce62d066e53f502a83dc550f65", size = 172448, upload-time = "2024-09-04T20:44:26.208Z" }, + { url = "https://files.pythonhosted.org/packages/50/b9/db34c4755a7bd1cb2d1603ac3863f22bcecbd1ba29e5ee841a4bc510b294/cffi-1.17.1-cp312-cp312-win_amd64.whl", hash = "sha256:51392eae71afec0d0c8fb1a53b204dbb3bcabcb3c9b807eedf3e1e6ccf2de903", size = 181976, upload-time = "2024-09-04T20:44:27.578Z" }, +] + +[[package]] +name = "cfgrib" +version = "0.9.15.0" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "attrs", marker = "(platform_machine != 'aarch64' and platform_machine != 'x86_64' and extra == 'extra-10-weathergen-cpu' and extra == 'extra-10-weathergen-gpu') or (platform_machine == 'aarch64' and sys_platform == 'linux') or (platform_machine == 'x86_64' and sys_platform == 'linux') or (sys_platform != 'linux' and extra == 'extra-10-weathergen-cpu' and extra == 'extra-10-weathergen-gpu')" }, + { name = "click", marker = "(platform_machine != 'aarch64' and platform_machine != 'x86_64' and extra == 'extra-10-weathergen-cpu' and extra == 'extra-10-weathergen-gpu') or (platform_machine == 'aarch64' and sys_platform == 'linux') or (platform_machine == 'x86_64' and sys_platform == 'linux') or (sys_platform != 'linux' and extra == 'extra-10-weathergen-cpu' and extra == 'extra-10-weathergen-gpu')" }, + { name = "eccodes", marker = "(platform_machine != 'aarch64' and platform_machine != 'x86_64' and extra == 'extra-10-weathergen-cpu' and extra == 'extra-10-weathergen-gpu') or (platform_machine == 'aarch64' and sys_platform == 'linux') or (platform_machine == 'x86_64' and sys_platform == 'linux') or (sys_platform != 'linux' and extra == 'extra-10-weathergen-cpu' and extra == 'extra-10-weathergen-gpu')" }, + { name = "numpy", marker = "(platform_machine != 'aarch64' and platform_machine != 'x86_64' and extra == 'extra-10-weathergen-cpu' and extra == 'extra-10-weathergen-gpu') or (platform_machine == 'aarch64' and sys_platform == 'linux') or (platform_machine == 'x86_64' and sys_platform == 'linux') or (sys_platform != 'linux' and extra == 'extra-10-weathergen-cpu' and extra == 'extra-10-weathergen-gpu')" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/3e/3b/0ccbbc67866a4a2df570d6bf0f53d6d22220c44e1f3684455b5eae298936/cfgrib-0.9.15.0.tar.gz", hash = "sha256:d455034e19b9560a75d008ba9d09b2d4e65762adfb2e911f28b841f4b9c6b47f", size = 6511752, upload-time = "2024-12-18T11:10:17.728Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/7d/d7/96b4209c99f1fd6c19f502cebe8c91983c23331c380f3f521250f268ae8c/cfgrib-0.9.15.0-py3-none-any.whl", hash = "sha256:469cfd25dc173863795e596263b3b6b5ea1402b1715f2b7b1d4b995b40b32c18", size = 48908, upload-time = "2024-12-18T11:10:13.381Z" }, +] + +[[package]] +name = "cftime" +version = "1.6.4.post1" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "numpy", marker = "(platform_machine != 'aarch64' and platform_machine != 'x86_64' and extra == 'extra-10-weathergen-cpu' and extra == 'extra-10-weathergen-gpu') or (platform_machine == 'aarch64' and sys_platform == 'linux') or (platform_machine == 'x86_64' and sys_platform == 'linux') or (sys_platform != 'linux' and extra == 'extra-10-weathergen-cpu' and extra == 'extra-10-weathergen-gpu')" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/ab/c8/1155d1d58003105307c7e5985f422ae5bcb2ca0cbc553cc828f3c5a934a7/cftime-1.6.4.post1.tar.gz", hash = "sha256:50ac76cc9f10ab7bd46e44a71c51a6927051b499b4407df4f29ab13d741b942f", size = 54631, upload-time = "2024-10-22T18:48:34.194Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/50/81/0bb28d54088a61592f61a11e7fcabcea6d261c47af79e18d0f9cbcd940ae/cftime-1.6.4.post1-cp312-cp312-macosx_10_13_x86_64.whl", hash = "sha256:a590f73506f4704ba5e154ef55bfbaed5e1b4ac170f3caeb8c58e4f2c619ee4e", size = 226615, upload-time = "2024-10-22T18:47:59.575Z" }, + { url = "https://files.pythonhosted.org/packages/f3/1e/38dbbf8a828dfb5e0e6e5c912818b77aacf2e7bcb97b262ac6126beeb29f/cftime-1.6.4.post1-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:933cb10e1af4e362e77f513e3eb92b34a688729ddbf938bbdfa5ac20a7f44ba0", size = 209193, upload-time = "2024-10-22T18:48:00.767Z" }, + { url = "https://files.pythonhosted.org/packages/9b/60/0db884c76311ecaaf31f628aa9358beae5fcb0fbbdc2eb0b790a93aa258f/cftime-1.6.4.post1-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:cf17a1b36f62e9e73c4c9363dd811e1bbf1170f5ac26d343fb26012ccf482908", size = 1320215, upload-time = "2024-10-22T18:48:02.275Z" }, + { url = "https://files.pythonhosted.org/packages/8d/7d/2d5fc7af06da4f3bdea59a204f741bf7a30bc5019355991b2f083e557e4e/cftime-1.6.4.post1-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:8e18021f421aa26527bad8688c1acf0c85fa72730beb6efce969c316743294f2", size = 1367426, upload-time = "2024-10-22T18:48:03.57Z" }, + { url = "https://files.pythonhosted.org/packages/5d/ab/e8b26d05323fc5629356c82a7f64026248f121ea1361b49df441bbc8f2d7/cftime-1.6.4.post1-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:5835b9d622f9304d1c23a35603a0f068739f428d902860f25e6e7e5a1b7cd8ea", size = 1385593, upload-time = "2024-10-22T18:48:04.918Z" }, + { url = "https://files.pythonhosted.org/packages/af/7b/ca72a075a3f660315b031d62d39a3e9cfef71f7929da2621d5120077a75f/cftime-1.6.4.post1-cp312-cp312-win_amd64.whl", hash = "sha256:7f50bf0d1b664924aaee636eb2933746b942417d1f8b82ab6c1f6e8ba0da6885", size = 178918, upload-time = "2024-10-22T18:48:06.195Z" }, +] + +[[package]] +name = "cfunits" +version = "3.3.7" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "cftime", marker = "(platform_machine != 'aarch64' and platform_machine != 'x86_64' and extra == 'extra-10-weathergen-cpu' and extra == 'extra-10-weathergen-gpu') or (platform_machine == 'aarch64' and sys_platform == 'linux') or (platform_machine == 'x86_64' and sys_platform == 'linux') or (sys_platform != 'linux' and extra == 'extra-10-weathergen-cpu' and extra == 'extra-10-weathergen-gpu')" }, + { name = "numpy", marker = "(platform_machine != 'aarch64' and platform_machine != 'x86_64' and extra == 'extra-10-weathergen-cpu' and extra == 'extra-10-weathergen-gpu') or (platform_machine == 'aarch64' and sys_platform == 'linux') or (platform_machine == 'x86_64' and sys_platform == 'linux') or (sys_platform != 'linux' and extra == 'extra-10-weathergen-cpu' and extra == 'extra-10-weathergen-gpu')" }, + { name = "packaging", marker = "(platform_machine != 'aarch64' and platform_machine != 'x86_64' and extra == 'extra-10-weathergen-cpu' and extra == 'extra-10-weathergen-gpu') or (platform_machine == 'aarch64' and sys_platform == 'linux') or (platform_machine == 'x86_64' and sys_platform == 'linux') or (sys_platform != 'linux' and extra == 'extra-10-weathergen-cpu' and extra == 'extra-10-weathergen-gpu')" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/94/33/6ab78579a3c3be1f7ada33b7d4b8c4db9c00bc93ba77c8913d3090d3bf24/cfunits-3.3.7.tar.gz", hash = "sha256:e271ae2c01765cc437e7eefd802846894367179e3208653ab9b520ade48286d6", size = 42692, upload-time = "2024-04-23T07:54:45.384Z" } + +[[package]] +name = "charset-normalizer" +version = "3.4.2" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/e4/33/89c2ced2b67d1c2a61c19c6751aa8902d46ce3dacb23600a283619f5a12d/charset_normalizer-3.4.2.tar.gz", hash = "sha256:5baececa9ecba31eff645232d59845c07aa030f0c81ee70184a90d35099a0e63", size = 126367, upload-time = "2025-05-02T08:34:42.01Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/d7/a4/37f4d6035c89cac7930395a35cc0f1b872e652eaafb76a6075943754f095/charset_normalizer-3.4.2-cp312-cp312-macosx_10_13_universal2.whl", hash = "sha256:0c29de6a1a95f24b9a1aa7aefd27d2487263f00dfd55a77719b530788f75cff7", size = 199936, upload-time = "2025-05-02T08:32:33.712Z" }, + { url = "https://files.pythonhosted.org/packages/ee/8a/1a5e33b73e0d9287274f899d967907cd0bf9c343e651755d9307e0dbf2b3/charset_normalizer-3.4.2-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:cddf7bd982eaa998934a91f69d182aec997c6c468898efe6679af88283b498d3", size = 143790, upload-time = "2025-05-02T08:32:35.768Z" }, + { url = "https://files.pythonhosted.org/packages/66/52/59521f1d8e6ab1482164fa21409c5ef44da3e9f653c13ba71becdd98dec3/charset_normalizer-3.4.2-cp312-cp312-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:fcbe676a55d7445b22c10967bceaaf0ee69407fbe0ece4d032b6eb8d4565982a", size = 153924, upload-time = "2025-05-02T08:32:37.284Z" }, + { url = "https://files.pythonhosted.org/packages/86/2d/fb55fdf41964ec782febbf33cb64be480a6b8f16ded2dbe8db27a405c09f/charset_normalizer-3.4.2-cp312-cp312-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:d41c4d287cfc69060fa91cae9683eacffad989f1a10811995fa309df656ec214", size = 146626, upload-time = "2025-05-02T08:32:38.803Z" }, + { url = "https://files.pythonhosted.org/packages/8c/73/6ede2ec59bce19b3edf4209d70004253ec5f4e319f9a2e3f2f15601ed5f7/charset_normalizer-3.4.2-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:4e594135de17ab3866138f496755f302b72157d115086d100c3f19370839dd3a", size = 148567, upload-time = "2025-05-02T08:32:40.251Z" }, + { url = "https://files.pythonhosted.org/packages/09/14/957d03c6dc343c04904530b6bef4e5efae5ec7d7990a7cbb868e4595ee30/charset_normalizer-3.4.2-cp312-cp312-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:cf713fe9a71ef6fd5adf7a79670135081cd4431c2943864757f0fa3a65b1fafd", size = 150957, upload-time = "2025-05-02T08:32:41.705Z" }, + { url = "https://files.pythonhosted.org/packages/0d/c8/8174d0e5c10ccebdcb1b53cc959591c4c722a3ad92461a273e86b9f5a302/charset_normalizer-3.4.2-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:a370b3e078e418187da8c3674eddb9d983ec09445c99a3a263c2011993522981", size = 145408, upload-time = "2025-05-02T08:32:43.709Z" }, + { url = "https://files.pythonhosted.org/packages/58/aa/8904b84bc8084ac19dc52feb4f5952c6df03ffb460a887b42615ee1382e8/charset_normalizer-3.4.2-cp312-cp312-musllinux_1_2_i686.whl", hash = "sha256:a955b438e62efdf7e0b7b52a64dc5c3396e2634baa62471768a64bc2adb73d5c", size = 153399, upload-time = "2025-05-02T08:32:46.197Z" }, + { url = "https://files.pythonhosted.org/packages/c2/26/89ee1f0e264d201cb65cf054aca6038c03b1a0c6b4ae998070392a3ce605/charset_normalizer-3.4.2-cp312-cp312-musllinux_1_2_ppc64le.whl", hash = "sha256:7222ffd5e4de8e57e03ce2cef95a4c43c98fcb72ad86909abdfc2c17d227fc1b", size = 156815, upload-time = "2025-05-02T08:32:48.105Z" }, + { url = "https://files.pythonhosted.org/packages/fd/07/68e95b4b345bad3dbbd3a8681737b4338ff2c9df29856a6d6d23ac4c73cb/charset_normalizer-3.4.2-cp312-cp312-musllinux_1_2_s390x.whl", hash = "sha256:bee093bf902e1d8fc0ac143c88902c3dfc8941f7ea1d6a8dd2bcb786d33db03d", size = 154537, upload-time = "2025-05-02T08:32:49.719Z" }, + { url = "https://files.pythonhosted.org/packages/77/1a/5eefc0ce04affb98af07bc05f3bac9094513c0e23b0562d64af46a06aae4/charset_normalizer-3.4.2-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:dedb8adb91d11846ee08bec4c8236c8549ac721c245678282dcb06b221aab59f", size = 149565, upload-time = "2025-05-02T08:32:51.404Z" }, + { url = "https://files.pythonhosted.org/packages/37/a0/2410e5e6032a174c95e0806b1a6585eb21e12f445ebe239fac441995226a/charset_normalizer-3.4.2-cp312-cp312-win32.whl", hash = "sha256:db4c7bf0e07fc3b7d89ac2a5880a6a8062056801b83ff56d8464b70f65482b6c", size = 98357, upload-time = "2025-05-02T08:32:53.079Z" }, + { url = "https://files.pythonhosted.org/packages/6c/4f/c02d5c493967af3eda9c771ad4d2bbc8df6f99ddbeb37ceea6e8716a32bc/charset_normalizer-3.4.2-cp312-cp312-win_amd64.whl", hash = "sha256:5a9979887252a82fefd3d3ed2a8e3b937a7a809f65dcb1e068b090e165bbe99e", size = 105776, upload-time = "2025-05-02T08:32:54.573Z" }, + { url = "https://files.pythonhosted.org/packages/20/94/c5790835a017658cbfabd07f3bfb549140c3ac458cfc196323996b10095a/charset_normalizer-3.4.2-py3-none-any.whl", hash = "sha256:7f56930ab0abd1c45cd15be65cc741c28b1c9a34876ce8c17a2fa107810c0af0", size = 52626, upload-time = "2025-05-02T08:34:40.053Z" }, +] + +[[package]] +name = "click" +version = "8.2.1" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/60/6c/8ca2efa64cf75a977a0d7fac081354553ebe483345c734fb6b6515d96bbc/click-8.2.1.tar.gz", hash = "sha256:27c491cc05d968d271d5a1db13e3b5a184636d9d930f148c50b038f0d0646202", size = 286342, upload-time = "2025-05-20T23:19:49.832Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/85/32/10bb5764d90a8eee674e9dc6f4db6a0ab47c8c4d0d83c27f7c39ac415a4d/click-8.2.1-py3-none-any.whl", hash = "sha256:61a3265b914e850b85317d0b3109c7f8cd35a670f963866005d6ef1d5175a12b", size = 102215, upload-time = "2025-05-20T23:19:47.796Z" }, +] + +[[package]] +name = "cloudpickle" +version = "3.1.1" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/52/39/069100b84d7418bc358d81669d5748efb14b9cceacd2f9c75f550424132f/cloudpickle-3.1.1.tar.gz", hash = "sha256:b216fa8ae4019d5482a8ac3c95d8f6346115d8835911fd4aefd1a445e4242c64", size = 22113, upload-time = "2025-01-14T17:02:05.085Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/7e/e8/64c37fadfc2816a7701fa8a6ed8d87327c7d54eacfbfb6edab14a2f2be75/cloudpickle-3.1.1-py3-none-any.whl", hash = "sha256:c8c5a44295039331ee9dad40ba100a9c7297b6f988e50e87ccdf3765a668350e", size = 20992, upload-time = "2025-01-14T17:02:02.417Z" }, +] + +[[package]] +name = "comm" +version = "0.2.3" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/4c/13/7d740c5849255756bc17888787313b61fd38a0a8304fc4f073dfc46122aa/comm-0.2.3.tar.gz", hash = "sha256:2dc8048c10962d55d7ad693be1e7045d891b7ce8d999c97963a5e3e99c055971", size = 6319, upload-time = "2025-07-25T14:02:04.452Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/60/97/891a0971e1e4a8c5d2b20bbe0e524dc04548d2307fee33cdeba148fd4fc7/comm-0.2.3-py3-none-any.whl", hash = "sha256:c615d91d75f7f04f095b30d1c1711babd43bdc6419c1be9886a85f2f4e489417", size = 7294, upload-time = "2025-07-25T14:02:02.896Z" }, +] + +[[package]] +name = "contourpy" +version = "1.3.2" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "numpy", marker = "(platform_machine != 'aarch64' and platform_machine != 'x86_64' and extra == 'extra-10-weathergen-cpu' and extra == 'extra-10-weathergen-gpu') or (platform_machine == 'aarch64' and sys_platform == 'linux') or (platform_machine == 'x86_64' and sys_platform == 'linux') or (sys_platform != 'linux' and extra == 'extra-10-weathergen-cpu' and extra == 'extra-10-weathergen-gpu')" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/66/54/eb9bfc647b19f2009dd5c7f5ec51c4e6ca831725f1aea7a993034f483147/contourpy-1.3.2.tar.gz", hash = "sha256:b6945942715a034c671b7fc54f9588126b0b8bf23db2696e3ca8328f3ff0ab54", size = 13466130, upload-time = "2025-04-15T17:47:53.79Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/34/f7/44785876384eff370c251d58fd65f6ad7f39adce4a093c934d4a67a7c6b6/contourpy-1.3.2-cp312-cp312-macosx_10_13_x86_64.whl", hash = "sha256:4caf2bcd2969402bf77edc4cb6034c7dd7c0803213b3523f111eb7460a51b8d2", size = 271580, upload-time = "2025-04-15T17:37:03.105Z" }, + { url = "https://files.pythonhosted.org/packages/93/3b/0004767622a9826ea3d95f0e9d98cd8729015768075d61f9fea8eeca42a8/contourpy-1.3.2-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:82199cb78276249796419fe36b7386bd8d2cc3f28b3bc19fe2454fe2e26c4c15", size = 255530, upload-time = "2025-04-15T17:37:07.026Z" }, + { url = "https://files.pythonhosted.org/packages/e7/bb/7bd49e1f4fa805772d9fd130e0d375554ebc771ed7172f48dfcd4ca61549/contourpy-1.3.2-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:106fab697af11456fcba3e352ad50effe493a90f893fca6c2ca5c033820cea92", size = 307688, upload-time = "2025-04-15T17:37:11.481Z" }, + { url = "https://files.pythonhosted.org/packages/fc/97/e1d5dbbfa170725ef78357a9a0edc996b09ae4af170927ba8ce977e60a5f/contourpy-1.3.2-cp312-cp312-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:d14f12932a8d620e307f715857107b1d1845cc44fdb5da2bc8e850f5ceba9f87", size = 347331, upload-time = "2025-04-15T17:37:18.212Z" }, + { url = "https://files.pythonhosted.org/packages/6f/66/e69e6e904f5ecf6901be3dd16e7e54d41b6ec6ae3405a535286d4418ffb4/contourpy-1.3.2-cp312-cp312-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:532fd26e715560721bb0d5fc7610fce279b3699b018600ab999d1be895b09415", size = 318963, upload-time = "2025-04-15T17:37:22.76Z" }, + { url = "https://files.pythonhosted.org/packages/a8/32/b8a1c8965e4f72482ff2d1ac2cd670ce0b542f203c8e1d34e7c3e6925da7/contourpy-1.3.2-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:f26b383144cf2d2c29f01a1e8170f50dacf0eac02d64139dcd709a8ac4eb3cfe", size = 323681, upload-time = "2025-04-15T17:37:33.001Z" }, + { url = "https://files.pythonhosted.org/packages/30/c6/12a7e6811d08757c7162a541ca4c5c6a34c0f4e98ef2b338791093518e40/contourpy-1.3.2-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:c49f73e61f1f774650a55d221803b101d966ca0c5a2d6d5e4320ec3997489441", size = 1308674, upload-time = "2025-04-15T17:37:48.64Z" }, + { url = "https://files.pythonhosted.org/packages/2a/8a/bebe5a3f68b484d3a2b8ffaf84704b3e343ef1addea528132ef148e22b3b/contourpy-1.3.2-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:3d80b2c0300583228ac98d0a927a1ba6a2ba6b8a742463c564f1d419ee5b211e", size = 1380480, upload-time = "2025-04-15T17:38:06.7Z" }, + { url = "https://files.pythonhosted.org/packages/34/db/fcd325f19b5978fb509a7d55e06d99f5f856294c1991097534360b307cf1/contourpy-1.3.2-cp312-cp312-win32.whl", hash = "sha256:90df94c89a91b7362e1142cbee7568f86514412ab8a2c0d0fca72d7e91b62912", size = 178489, upload-time = "2025-04-15T17:38:10.338Z" }, + { url = "https://files.pythonhosted.org/packages/01/c8/fadd0b92ffa7b5eb5949bf340a63a4a496a6930a6c37a7ba0f12acb076d6/contourpy-1.3.2-cp312-cp312-win_amd64.whl", hash = "sha256:8c942a01d9163e2e5cfb05cb66110121b8d07ad438a17f9e766317bcb62abf73", size = 223042, upload-time = "2025-04-15T17:38:14.239Z" }, +] + +[[package]] +name = "cycler" +version = "0.12.1" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/a9/95/a3dbbb5028f35eafb79008e7522a75244477d2838f38cbb722248dabc2a8/cycler-0.12.1.tar.gz", hash = "sha256:88bb128f02ba341da8ef447245a9e138fae777f6a23943da4540077d3601eb1c", size = 7615, upload-time = "2023-10-07T05:32:18.335Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/e7/05/c19819d5e3d95294a6f5947fb9b9629efb316b96de511b418c53d245aae6/cycler-0.12.1-py3-none-any.whl", hash = "sha256:85cef7cff222d8644161529808465972e51340599459b8ac3ccbac5a854e0d30", size = 8321, upload-time = "2023-10-07T05:32:16.783Z" }, +] + +[[package]] +name = "dask" +version = "2025.5.1" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "click", marker = "(platform_machine != 'aarch64' and platform_machine != 'x86_64' and extra == 'extra-10-weathergen-cpu' and extra == 'extra-10-weathergen-gpu') or (platform_machine == 'aarch64' and sys_platform == 'linux') or (platform_machine == 'x86_64' and sys_platform == 'linux') or (sys_platform != 'linux' and extra == 'extra-10-weathergen-cpu' and extra == 'extra-10-weathergen-gpu')" }, + { name = "cloudpickle", marker = "(platform_machine != 'aarch64' and platform_machine != 'x86_64' and extra == 'extra-10-weathergen-cpu' and extra == 'extra-10-weathergen-gpu') or (platform_machine == 'aarch64' and sys_platform == 'linux') or (platform_machine == 'x86_64' and sys_platform == 'linux') or (sys_platform != 'linux' and extra == 'extra-10-weathergen-cpu' and extra == 'extra-10-weathergen-gpu')" }, + { name = "fsspec", marker = "(platform_machine != 'aarch64' and platform_machine != 'x86_64' and extra == 'extra-10-weathergen-cpu' and extra == 'extra-10-weathergen-gpu') or (platform_machine == 'aarch64' and sys_platform == 'linux') or (platform_machine == 'x86_64' and sys_platform == 'linux') or (sys_platform != 'linux' and extra == 'extra-10-weathergen-cpu' and extra == 'extra-10-weathergen-gpu')" }, + { name = "packaging", marker = "(platform_machine != 'aarch64' and platform_machine != 'x86_64' and extra == 'extra-10-weathergen-cpu' and extra == 'extra-10-weathergen-gpu') or (platform_machine == 'aarch64' and sys_platform == 'linux') or (platform_machine == 'x86_64' and sys_platform == 'linux') or (sys_platform != 'linux' and extra == 'extra-10-weathergen-cpu' and extra == 'extra-10-weathergen-gpu')" }, + { name = "partd", marker = "(platform_machine != 'aarch64' and platform_machine != 'x86_64' and extra == 'extra-10-weathergen-cpu' and extra == 'extra-10-weathergen-gpu') or (platform_machine == 'aarch64' and sys_platform == 'linux') or (platform_machine == 'x86_64' and sys_platform == 'linux') or (sys_platform != 'linux' and extra == 'extra-10-weathergen-cpu' and extra == 'extra-10-weathergen-gpu')" }, + { name = "pyyaml", marker = "(platform_machine != 'aarch64' and platform_machine != 'x86_64' and extra == 'extra-10-weathergen-cpu' and extra == 'extra-10-weathergen-gpu') or (platform_machine == 'aarch64' and sys_platform == 'linux') or (platform_machine == 'x86_64' and sys_platform == 'linux') or (sys_platform != 'linux' and extra == 'extra-10-weathergen-cpu' and extra == 'extra-10-weathergen-gpu')" }, + { name = "toolz", marker = "(platform_machine != 'aarch64' and platform_machine != 'x86_64' and extra == 'extra-10-weathergen-cpu' and extra == 'extra-10-weathergen-gpu') or (platform_machine == 'aarch64' and sys_platform == 'linux') or (platform_machine == 'x86_64' and sys_platform == 'linux') or (sys_platform != 'linux' and extra == 'extra-10-weathergen-cpu' and extra == 'extra-10-weathergen-gpu')" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/3d/29/05feb8e2531c46d763547c66b7f5deb39b53d99b3be1b4ddddbd1cec6567/dask-2025.5.1.tar.gz", hash = "sha256:979d9536549de0e463f4cab8a8c66c3a2ef55791cd740d07d9bf58fab1d1076a", size = 10969324, upload-time = "2025-05-20T19:54:30.688Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/38/30/53b0844a7a4c6b041b111b24ca15cc9b8661a86fe1f6aaeb2d0d7f0fb1f2/dask-2025.5.1-py3-none-any.whl", hash = "sha256:3b85fdaa5f6f989dde49da6008415b1ae996985ebdfb1e40de2c997d9010371d", size = 1474226, upload-time = "2025-05-20T19:54:20.309Z" }, +] + +[package.optional-dependencies] +array = [ + { name = "numpy", marker = "(platform_machine != 'aarch64' and platform_machine != 'x86_64' and extra == 'extra-10-weathergen-cpu' and extra == 'extra-10-weathergen-gpu') or (platform_machine == 'aarch64' and sys_platform == 'linux') or (platform_machine == 'x86_64' and sys_platform == 'linux') or (sys_platform != 'linux' and extra == 'extra-10-weathergen-cpu' and extra == 'extra-10-weathergen-gpu')" }, +] + +[[package]] +name = "databricks-sdk" +version = "0.69.0" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "google-auth", marker = "(platform_machine != 'aarch64' and platform_machine != 'x86_64' and extra == 'extra-10-weathergen-cpu' and extra == 'extra-10-weathergen-gpu') or (platform_machine == 'aarch64' and sys_platform == 'linux') or (platform_machine == 'x86_64' and sys_platform == 'linux') or (sys_platform != 'linux' and extra == 'extra-10-weathergen-cpu' and extra == 'extra-10-weathergen-gpu')" }, + { name = "protobuf", marker = "(platform_machine != 'aarch64' and platform_machine != 'x86_64' and extra == 'extra-10-weathergen-cpu' and extra == 'extra-10-weathergen-gpu') or (platform_machine == 'aarch64' and sys_platform == 'linux') or (platform_machine == 'x86_64' and sys_platform == 'linux') or (sys_platform != 'linux' and extra == 'extra-10-weathergen-cpu' and extra == 'extra-10-weathergen-gpu')" }, + { name = "requests", marker = "(platform_machine != 'aarch64' and platform_machine != 'x86_64' and extra == 'extra-10-weathergen-cpu' and extra == 'extra-10-weathergen-gpu') or (platform_machine == 'aarch64' and sys_platform == 'linux') or (platform_machine == 'x86_64' and sys_platform == 'linux') or (sys_platform != 'linux' and extra == 'extra-10-weathergen-cpu' and extra == 'extra-10-weathergen-gpu')" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/19/ba/1dc248e4cc646a1a29504bcbb910bfb28d3affe58063df622e7e3c5c0634/databricks_sdk-0.69.0.tar.gz", hash = "sha256:5ad7514325d941afe47da4cf8748ba9f7da7250977666c519f534c9f6298d2f5", size = 794676, upload-time = "2025-10-20T11:38:15.004Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/b7/73/6f82f2a926a2129f9a08ba550b3f5c837d23156082c8d1f4226801168456/databricks_sdk-0.69.0-py3-none-any.whl", hash = "sha256:f75c37c0da2126d9fec31cefd7b5c5491a7c8b5d62481cd661d3e9f1efec0b1f", size = 749754, upload-time = "2025-10-20T11:38:13.451Z" }, +] + +[[package]] +name = "debugpy" +version = "1.8.15" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/8c/8b/3a9a28ddb750a76eaec445c7f4d3147ea2c579a97dbd9e25d39001b92b21/debugpy-1.8.15.tar.gz", hash = "sha256:58d7a20b7773ab5ee6bdfb2e6cf622fdf1e40c9d5aef2857d85391526719ac00", size = 1643279, upload-time = "2025-07-15T16:43:29.135Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/ab/4a/4508d256e52897f5cdfee6a6d7580974811e911c6d01321df3264508a5ac/debugpy-1.8.15-cp312-cp312-macosx_14_0_universal2.whl", hash = "sha256:3dcc7225cb317469721ab5136cda9ff9c8b6e6fb43e87c9e15d5b108b99d01ba", size = 2511197, upload-time = "2025-07-15T16:43:42.343Z" }, + { url = "https://files.pythonhosted.org/packages/99/8d/7f6ef1097e7fecf26b4ef72338d08e41644a41b7ee958a19f494ffcffc29/debugpy-1.8.15-cp312-cp312-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:047a493ca93c85ccede1dbbaf4e66816794bdc214213dde41a9a61e42d27f8fc", size = 4229517, upload-time = "2025-07-15T16:43:44.14Z" }, + { url = "https://files.pythonhosted.org/packages/3f/e8/e8c6a9aa33a9c9c6dacbf31747384f6ed2adde4de2e9693c766bdf323aa3/debugpy-1.8.15-cp312-cp312-win32.whl", hash = "sha256:b08e9b0bc260cf324c890626961dad4ffd973f7568fbf57feb3c3a65ab6b6327", size = 5276132, upload-time = "2025-07-15T16:43:45.529Z" }, + { url = "https://files.pythonhosted.org/packages/e9/ad/231050c6177b3476b85fcea01e565dac83607b5233d003ff067e2ee44d8f/debugpy-1.8.15-cp312-cp312-win_amd64.whl", hash = "sha256:e2a4fe357c92334272eb2845fcfcdbec3ef9f22c16cf613c388ac0887aed15fa", size = 5317645, upload-time = "2025-07-15T16:43:46.968Z" }, + { url = "https://files.pythonhosted.org/packages/07/d5/98748d9860e767a1248b5e31ffa7ce8cb7006e97bf8abbf3d891d0a8ba4e/debugpy-1.8.15-py2.py3-none-any.whl", hash = "sha256:bce2e6c5ff4f2e00b98d45e7e01a49c7b489ff6df5f12d881c67d2f1ac635f3d", size = 5282697, upload-time = "2025-07-15T16:44:07.996Z" }, +] + +[[package]] +name = "decorator" +version = "5.2.1" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/43/fa/6d96a0978d19e17b68d634497769987b16c8f4cd0a7a05048bec693caa6b/decorator-5.2.1.tar.gz", hash = "sha256:65f266143752f734b0a7cc83c46f4618af75b8c5911b00ccb61d0ac9b6da0360", size = 56711, upload-time = "2025-02-24T04:41:34.073Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/4e/8c/f3147f5c4b73e7550fe5f9352eaa956ae838d5c51eb58e7a25b9f3e2643b/decorator-5.2.1-py3-none-any.whl", hash = "sha256:d316bb415a2d9e2d2b3abcc4084c6502fc09240e292cd76a76afc106a1c8e04a", size = 9190, upload-time = "2025-02-24T04:41:32.565Z" }, +] + +[[package]] +name = "deprecated" +version = "1.2.18" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "wrapt", marker = "(platform_machine != 'aarch64' and platform_machine != 'x86_64' and extra == 'extra-10-weathergen-cpu' and extra == 'extra-10-weathergen-gpu') or (platform_machine == 'aarch64' and sys_platform == 'linux') or (platform_machine == 'x86_64' and sys_platform == 'linux') or (sys_platform != 'linux' and extra == 'extra-10-weathergen-cpu' and extra == 'extra-10-weathergen-gpu')" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/98/97/06afe62762c9a8a86af0cfb7bfdab22a43ad17138b07af5b1a58442690a2/deprecated-1.2.18.tar.gz", hash = "sha256:422b6f6d859da6f2ef57857761bfb392480502a64c3028ca9bbe86085d72115d", size = 2928744, upload-time = "2025-01-27T10:46:25.7Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/6e/c6/ac0b6c1e2d138f1002bcf799d330bd6d85084fece321e662a14223794041/Deprecated-1.2.18-py2.py3-none-any.whl", hash = "sha256:bd5011788200372a32418f888e326a09ff80d0214bd961147cfed01b5c018eec", size = 9998, upload-time = "2025-01-27T10:46:09.186Z" }, +] + +[[package]] +name = "deprecation" +version = "2.1.0" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "packaging", marker = "(platform_machine != 'aarch64' and platform_machine != 'x86_64' and extra == 'extra-10-weathergen-cpu' and extra == 'extra-10-weathergen-gpu') or (platform_machine == 'aarch64' and sys_platform == 'linux') or (platform_machine == 'x86_64' and sys_platform == 'linux') or (sys_platform != 'linux' and extra == 'extra-10-weathergen-cpu' and extra == 'extra-10-weathergen-gpu')" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/5a/d3/8ae2869247df154b64c1884d7346d412fed0c49df84db635aab2d1c40e62/deprecation-2.1.0.tar.gz", hash = "sha256:72b3bde64e5d778694b0cf68178aed03d15e15477116add3fb773e581f9518ff", size = 173788, upload-time = "2020-04-20T14:23:38.738Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/02/c3/253a89ee03fc9b9682f1541728eb66db7db22148cd94f89ab22528cd1e1b/deprecation-2.1.0-py2.py3-none-any.whl", hash = "sha256:a10811591210e1fb0e768a8c25517cabeabcba6f0bf96564f8ff45189f90b14a", size = 11178, upload-time = "2020-04-20T14:23:36.581Z" }, +] + +[[package]] +name = "earthkit-data" +version = "0.14.4" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "cfgrib", marker = "(platform_machine != 'aarch64' and platform_machine != 'x86_64' and extra == 'extra-10-weathergen-cpu' and extra == 'extra-10-weathergen-gpu') or (platform_machine == 'aarch64' and sys_platform == 'linux') or (platform_machine == 'x86_64' and sys_platform == 'linux') or (sys_platform != 'linux' and extra == 'extra-10-weathergen-cpu' and extra == 'extra-10-weathergen-gpu')" }, + { name = "dask", marker = "(platform_machine != 'aarch64' and platform_machine != 'x86_64' and extra == 'extra-10-weathergen-cpu' and extra == 'extra-10-weathergen-gpu') or (platform_machine == 'aarch64' and sys_platform == 'linux') or (platform_machine == 'x86_64' and sys_platform == 'linux') or (sys_platform != 'linux' and extra == 'extra-10-weathergen-cpu' and extra == 'extra-10-weathergen-gpu')" }, + { name = "deprecation", marker = "(platform_machine != 'aarch64' and platform_machine != 'x86_64' and extra == 'extra-10-weathergen-cpu' and extra == 'extra-10-weathergen-gpu') or (platform_machine == 'aarch64' and sys_platform == 'linux') or (platform_machine == 'x86_64' and sys_platform == 'linux') or (sys_platform != 'linux' and extra == 'extra-10-weathergen-cpu' and extra == 'extra-10-weathergen-gpu')" }, + { name = "earthkit-meteo", marker = "(platform_machine != 'aarch64' and platform_machine != 'x86_64' and extra == 'extra-10-weathergen-cpu' and extra == 'extra-10-weathergen-gpu') or (platform_machine == 'aarch64' and sys_platform == 'linux') or (platform_machine == 'x86_64' and sys_platform == 'linux') or (sys_platform != 'linux' and extra == 'extra-10-weathergen-cpu' and extra == 'extra-10-weathergen-gpu')" }, + { name = "earthkit-utils", marker = "(platform_machine != 'aarch64' and platform_machine != 'x86_64' and extra == 'extra-10-weathergen-cpu' and extra == 'extra-10-weathergen-gpu') or (platform_machine == 'aarch64' and sys_platform == 'linux') or (platform_machine == 'x86_64' and sys_platform == 'linux') or (sys_platform != 'linux' and extra == 'extra-10-weathergen-cpu' and extra == 'extra-10-weathergen-gpu')" }, + { name = "eccodes", marker = "(platform_machine != 'aarch64' and platform_machine != 'x86_64' and extra == 'extra-10-weathergen-cpu' and extra == 'extra-10-weathergen-gpu') or (platform_machine == 'aarch64' and sys_platform == 'linux') or (platform_machine == 'x86_64' and sys_platform == 'linux') or (sys_platform != 'linux' and extra == 'extra-10-weathergen-cpu' and extra == 'extra-10-weathergen-gpu')" }, + { name = "entrypoints", marker = "(platform_machine != 'aarch64' and platform_machine != 'x86_64' and extra == 'extra-10-weathergen-cpu' and extra == 'extra-10-weathergen-gpu') or (platform_machine == 'aarch64' and sys_platform == 'linux') or (platform_machine == 'x86_64' and sys_platform == 'linux') or (sys_platform != 'linux' and extra == 'extra-10-weathergen-cpu' and extra == 'extra-10-weathergen-gpu')" }, + { name = "filelock", marker = "(platform_machine != 'aarch64' and platform_machine != 'x86_64' and extra == 'extra-10-weathergen-cpu' and extra == 'extra-10-weathergen-gpu') or (platform_machine == 'aarch64' and sys_platform == 'linux') or (platform_machine == 'x86_64' and sys_platform == 'linux') or (sys_platform != 'linux' and extra == 'extra-10-weathergen-cpu' and extra == 'extra-10-weathergen-gpu')" }, + { name = "jinja2", marker = "(platform_machine != 'aarch64' and platform_machine != 'x86_64' and extra == 'extra-10-weathergen-cpu' and extra == 'extra-10-weathergen-gpu') or (platform_machine == 'aarch64' and sys_platform == 'linux') or (platform_machine == 'x86_64' and sys_platform == 'linux') or (sys_platform != 'linux' and extra == 'extra-10-weathergen-cpu' and extra == 'extra-10-weathergen-gpu')" }, + { name = "jsonschema", marker = "(platform_machine != 'aarch64' and platform_machine != 'x86_64' and extra == 'extra-10-weathergen-cpu' and extra == 'extra-10-weathergen-gpu') or (platform_machine == 'aarch64' and sys_platform == 'linux') or (platform_machine == 'x86_64' and sys_platform == 'linux') or (sys_platform != 'linux' and extra == 'extra-10-weathergen-cpu' and extra == 'extra-10-weathergen-gpu')" }, + { name = "lru-dict", marker = "(platform_machine != 'aarch64' and platform_machine != 'x86_64' and extra == 'extra-10-weathergen-cpu' and extra == 'extra-10-weathergen-gpu') or (platform_machine == 'aarch64' and sys_platform == 'linux') or (platform_machine == 'x86_64' and sys_platform == 'linux') or (sys_platform != 'linux' and extra == 'extra-10-weathergen-cpu' and extra == 'extra-10-weathergen-gpu')" }, + { name = "markdown", marker = "(platform_machine != 'aarch64' and platform_machine != 'x86_64' and extra == 'extra-10-weathergen-cpu' and extra == 'extra-10-weathergen-gpu') or (platform_machine == 'aarch64' and sys_platform == 'linux') or (platform_machine == 'x86_64' and sys_platform == 'linux') or (sys_platform != 'linux' and extra == 'extra-10-weathergen-cpu' and extra == 'extra-10-weathergen-gpu')" }, + { name = "multiurl", marker = "(platform_machine != 'aarch64' and platform_machine != 'x86_64' and extra == 'extra-10-weathergen-cpu' and extra == 'extra-10-weathergen-gpu') or (platform_machine == 'aarch64' and sys_platform == 'linux') or (platform_machine == 'x86_64' and sys_platform == 'linux') or (sys_platform != 'linux' and extra == 'extra-10-weathergen-cpu' and extra == 'extra-10-weathergen-gpu')" }, + { name = "netcdf4", marker = "(platform_machine != 'aarch64' and platform_machine != 'x86_64' and extra == 'extra-10-weathergen-cpu' and extra == 'extra-10-weathergen-gpu') or (platform_machine == 'aarch64' and sys_platform == 'linux') or (platform_machine == 'x86_64' and sys_platform == 'linux') or (sys_platform != 'linux' and extra == 'extra-10-weathergen-cpu' and extra == 'extra-10-weathergen-gpu')" }, + { name = "pandas", marker = "(platform_machine != 'aarch64' and platform_machine != 'x86_64' and extra == 'extra-10-weathergen-cpu' and extra == 'extra-10-weathergen-gpu') or (platform_machine == 'aarch64' and sys_platform == 'linux') or (platform_machine == 'x86_64' and sys_platform == 'linux') or (sys_platform != 'linux' and extra == 'extra-10-weathergen-cpu' and extra == 'extra-10-weathergen-gpu')" }, + { name = "pdbufr", marker = "(platform_machine != 'aarch64' and platform_machine != 'x86_64' and extra == 'extra-10-weathergen-cpu' and extra == 'extra-10-weathergen-gpu') or (platform_machine == 'aarch64' and sys_platform == 'linux') or (platform_machine == 'x86_64' and sys_platform == 'linux') or (sys_platform != 'linux' and extra == 'extra-10-weathergen-cpu' and extra == 'extra-10-weathergen-gpu')" }, + { name = "pyyaml", marker = "(platform_machine != 'aarch64' and platform_machine != 'x86_64' and extra == 'extra-10-weathergen-cpu' and extra == 'extra-10-weathergen-gpu') or (platform_machine == 'aarch64' and sys_platform == 'linux') or (platform_machine == 'x86_64' and sys_platform == 'linux') or (sys_platform != 'linux' and extra == 'extra-10-weathergen-cpu' and extra == 'extra-10-weathergen-gpu')" }, + { name = "tqdm", marker = "(platform_machine != 'aarch64' and platform_machine != 'x86_64' and extra == 'extra-10-weathergen-cpu' and extra == 'extra-10-weathergen-gpu') or (platform_machine == 'aarch64' and sys_platform == 'linux') or (platform_machine == 'x86_64' and sys_platform == 'linux') or (sys_platform != 'linux' and extra == 'extra-10-weathergen-cpu' and extra == 'extra-10-weathergen-gpu')" }, + { name = "xarray", marker = "(platform_machine != 'aarch64' and platform_machine != 'x86_64' and extra == 'extra-10-weathergen-cpu' and extra == 'extra-10-weathergen-gpu') or (platform_machine == 'aarch64' and sys_platform == 'linux') or (platform_machine == 'x86_64' and sys_platform == 'linux') or (sys_platform != 'linux' and extra == 'extra-10-weathergen-cpu' and extra == 'extra-10-weathergen-gpu')" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/43/99/d41bf77e4769ed4146ad1d66b94b05d8c17ec168bc93f145b82bcfd40c40/earthkit_data-0.14.4.tar.gz", hash = "sha256:d3d5d7b920b57a4abdbfc3add56bf167bb2d1eec151b6f6d36abea766b06929a", size = 4851784, upload-time = "2025-06-06T17:00:24.357Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/c4/a3/50f356541db8b74565359b604412b4392bc4e053fbc8a45e3f14327517de/earthkit_data-0.14.4-py3-none-any.whl", hash = "sha256:e56a0fe22f13648ac0becb2e075d8ee41da351c072afced5f16154a0da1d4083", size = 369012, upload-time = "2025-06-06T17:00:22.195Z" }, +] + +[[package]] +name = "earthkit-meteo" +version = "0.4.1" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "earthkit-utils", marker = "(platform_machine != 'aarch64' and platform_machine != 'x86_64' and extra == 'extra-10-weathergen-cpu' and extra == 'extra-10-weathergen-gpu') or (platform_machine == 'aarch64' and sys_platform == 'linux') or (platform_machine == 'x86_64' and sys_platform == 'linux') or (sys_platform != 'linux' and extra == 'extra-10-weathergen-cpu' and extra == 'extra-10-weathergen-gpu')" }, + { name = "numpy", marker = "(platform_machine != 'aarch64' and platform_machine != 'x86_64' and extra == 'extra-10-weathergen-cpu' and extra == 'extra-10-weathergen-gpu') or (platform_machine == 'aarch64' and sys_platform == 'linux') or (platform_machine == 'x86_64' and sys_platform == 'linux') or (sys_platform != 'linux' and extra == 'extra-10-weathergen-cpu' and extra == 'extra-10-weathergen-gpu')" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/db/13/f57e9b361bb6760e34ebadea71ee51b00730d696466458d13ddcfec24236/earthkit_meteo-0.4.1.tar.gz", hash = "sha256:80867899d6149abbb3d0297df1de3feab78d6d0185aef45f3f2aa16361363e94", size = 368696, upload-time = "2025-06-05T12:00:18.952Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/67/df/8178bb8aa5897cd65369356402267b1609b1bdb592ba80c917c716aefe11/earthkit_meteo-0.4.1-py3-none-any.whl", hash = "sha256:f90b708a338bacf593a34c3da8e0526c9344a5480ce7627a10f7a1df67bd320f", size = 56920, upload-time = "2025-06-05T12:00:17.649Z" }, +] + +[[package]] +name = "earthkit-regrid" +version = "0.4.0" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "filelock", marker = "(platform_machine != 'aarch64' and platform_machine != 'x86_64' and extra == 'extra-10-weathergen-cpu' and extra == 'extra-10-weathergen-gpu') or (platform_machine == 'aarch64' and sys_platform == 'linux') or (platform_machine == 'x86_64' and sys_platform == 'linux') or (sys_platform != 'linux' and extra == 'extra-10-weathergen-cpu' and extra == 'extra-10-weathergen-gpu')" }, + { name = "multiurl", marker = "(platform_machine != 'aarch64' and platform_machine != 'x86_64' and extra == 'extra-10-weathergen-cpu' and extra == 'extra-10-weathergen-gpu') or (platform_machine == 'aarch64' and sys_platform == 'linux') or (platform_machine == 'x86_64' and sys_platform == 'linux') or (sys_platform != 'linux' and extra == 'extra-10-weathergen-cpu' and extra == 'extra-10-weathergen-gpu')" }, + { name = "scipy", marker = "(platform_machine != 'aarch64' and platform_machine != 'x86_64' and extra == 'extra-10-weathergen-cpu' and extra == 'extra-10-weathergen-gpu') or (platform_machine == 'aarch64' and sys_platform == 'linux') or (platform_machine == 'x86_64' and sys_platform == 'linux') or (sys_platform != 'linux' and extra == 'extra-10-weathergen-cpu' and extra == 'extra-10-weathergen-gpu')" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/2b/4b/2472567db7e8d4b6e2b4d2f0f6da8c3846f1b7c0870138dfaae1d8cd8217/earthkit_regrid-0.4.0.tar.gz", hash = "sha256:779cfb89b2d7c2ae75dbb4572f72a5ec2052f4c649703924e4ef4be6aadf7dda", size = 582851, upload-time = "2025-01-20T13:31:31.805Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/0a/68/116626e164472e928bd849e13e89c1ed186cb1471e09f60f6d933d81ddcc/earthkit_regrid-0.4.0-py3-none-any.whl", hash = "sha256:1335b5da4869c732fda5dcf3c7b8c489385b7c32f7698b7e6bf5764a4e8ec710", size = 36892, upload-time = "2025-01-20T13:31:29.538Z" }, +] + +[[package]] +name = "earthkit-utils" +version = "0.0.1" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "array-api-compat", marker = "(platform_machine != 'aarch64' and platform_machine != 'x86_64' and extra == 'extra-10-weathergen-cpu' and extra == 'extra-10-weathergen-gpu') or (platform_machine == 'aarch64' and sys_platform == 'linux') or (platform_machine == 'x86_64' and sys_platform == 'linux') or (sys_platform != 'linux' and extra == 'extra-10-weathergen-cpu' and extra == 'extra-10-weathergen-gpu')" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/51/d7/91ce33376f48a0dc5993008eebbb12415853fb87361ac849a834db505a35/earthkit_utils-0.0.1.tar.gz", hash = "sha256:8bb41d9b6c8cfc1e0d330cf9801183301e7febd03b6c87082ce3d52d129939e3", size = 19791, upload-time = "2025-04-04T16:00:49.79Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/6e/9b/568d8e53ea6804084284015d79a757c11fa41f907b56c523c9e11a7ee679/earthkit_utils-0.0.1-py3-none-any.whl", hash = "sha256:1732ac37d9c4c97f56b733526052c047c49854438a7fc35ed775e2c204a7d825", size = 15431, upload-time = "2025-04-04T16:00:48.607Z" }, +] + +[[package]] +name = "eccodes" +version = "2.41.0" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "attrs", marker = "(platform_machine != 'aarch64' and platform_machine != 'x86_64' and extra == 'extra-10-weathergen-cpu' and extra == 'extra-10-weathergen-gpu') or (platform_machine == 'aarch64' and sys_platform == 'linux') or (platform_machine == 'x86_64' and sys_platform == 'linux') or (sys_platform != 'linux' and extra == 'extra-10-weathergen-cpu' and extra == 'extra-10-weathergen-gpu')" }, + { name = "cffi", marker = "(platform_machine != 'aarch64' and platform_machine != 'x86_64' and extra == 'extra-10-weathergen-cpu' and extra == 'extra-10-weathergen-gpu') or (platform_machine == 'aarch64' and sys_platform == 'linux') or (platform_machine == 'x86_64' and sys_platform == 'linux') or (sys_platform != 'linux' and extra == 'extra-10-weathergen-cpu' and extra == 'extra-10-weathergen-gpu')" }, + { name = "findlibs", marker = "(platform_machine != 'aarch64' and platform_machine != 'x86_64' and extra == 'extra-10-weathergen-cpu' and extra == 'extra-10-weathergen-gpu') or (platform_machine == 'aarch64' and sys_platform == 'linux') or (platform_machine == 'x86_64' and sys_platform == 'linux') or (sys_platform != 'linux' and extra == 'extra-10-weathergen-cpu' and extra == 'extra-10-weathergen-gpu')" }, + { name = "numpy", marker = "(platform_machine != 'aarch64' and platform_machine != 'x86_64' and extra == 'extra-10-weathergen-cpu' and extra == 'extra-10-weathergen-gpu') or (platform_machine == 'aarch64' and sys_platform == 'linux') or (platform_machine == 'x86_64' and sys_platform == 'linux') or (sys_platform != 'linux' and extra == 'extra-10-weathergen-cpu' and extra == 'extra-10-weathergen-gpu')" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/8a/c2/e2f8d98dea0b2d8d77c99edb72b5831286abfaf80d94edf13ad127f6979a/eccodes-2.41.0.tar.gz", hash = "sha256:f3e209f5da5a7fcee4942295db4ee7888e077bd2e0342e6170ec5fedb9b29840", size = 2268345, upload-time = "2025-04-10T10:18:00.637Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/35/d5/7803aa1bbff4161b147c11cd6531d421a2ad38a0bb2fd29a7265fb369c3d/eccodes-2.41.0-cp312-cp312-macosx_13_0_arm64.whl", hash = "sha256:56803ac38e5b50377054cb2b944c8982d6fcfca0c25b4d04fe1ef94ba81b202c", size = 6557422, upload-time = "2025-04-10T10:14:50.202Z" }, + { url = "https://files.pythonhosted.org/packages/d1/17/bf8f714f5dd483d0da11515dbcb1b4f0992e900abef540c318a93b55edb9/eccodes-2.41.0-cp312-cp312-macosx_13_0_x86_64.whl", hash = "sha256:ae3a0f62a4b1107fe9f8362d58e4f452891ccbcc6aaecca5437724223f71a974", size = 6659630, upload-time = "2025-04-10T10:17:05.746Z" }, + { url = "https://files.pythonhosted.org/packages/45/4a/7a45f8fc7d8f2047b023befd17155fa7d2d1274feda9796b1e69b68b7033/eccodes-2.41.0-cp312-cp312-manylinux_2_28_x86_64.whl", hash = "sha256:bb0182f7ce3c907671860ab554430be053f37f227789a7a27b2d06118fe48c97", size = 7431350, upload-time = "2025-04-10T10:13:40.244Z" }, + { url = "https://files.pythonhosted.org/packages/31/f7/f48a3ab347941243064060e3b0139aeb8d9414f1775f54239bed7fe66a29/eccodes-2.41.0-cp312-cp312-win_amd64.whl", hash = "sha256:81ca80d251be3fa66c42c020538cd67b12ed6e7c79e1e10299dc36dc07d28678", size = 6239631, upload-time = "2025-04-10T10:15:36.13Z" }, + { url = "https://files.pythonhosted.org/packages/bd/42/ac29e37149f36807e8f979707f5ae0d466d4a2c4b340597e2177809a016b/eccodes-2.41.0-py3-none-any.whl", hash = "sha256:f3f4444757aac6a249cc47947dee5660309d48854ebfc5e6ca8515374398e1bf", size = 44012, upload-time = "2025-04-10T10:17:59.189Z" }, +] + +[[package]] +name = "einops" +version = "0.8.1" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/e5/81/df4fbe24dff8ba3934af99044188e20a98ed441ad17a274539b74e82e126/einops-0.8.1.tar.gz", hash = "sha256:de5d960a7a761225532e0f1959e5315ebeafc0cd43394732f103ca44b9837e84", size = 54805, upload-time = "2025-02-09T03:17:00.434Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/87/62/9773de14fe6c45c23649e98b83231fffd7b9892b6cf863251dc2afa73643/einops-0.8.1-py3-none-any.whl", hash = "sha256:919387eb55330f5757c6bea9165c5ff5cfe63a642682ea788a6d472576d81737", size = 64359, upload-time = "2025-02-09T03:17:01.998Z" }, +] + +[[package]] +name = "entrypoints" +version = "0.4" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/ea/8d/a7121ffe5f402dc015277d2d31eb82d2187334503a011c18f2e78ecbb9b2/entrypoints-0.4.tar.gz", hash = "sha256:b706eddaa9218a19ebcd67b56818f05bb27589b1ca9e8d797b74affad4ccacd4", size = 13974, upload-time = "2022-02-02T21:30:28.172Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/35/a8/365059bbcd4572cbc41de17fd5b682be5868b218c3c5479071865cab9078/entrypoints-0.4-py3-none-any.whl", hash = "sha256:f174b5ff827504fd3cd97cc3f8649f3693f51538c7e4bdf3ef002c8429d42f9f", size = 5294, upload-time = "2022-02-02T21:30:26.024Z" }, +] + +[[package]] +name = "executing" +version = "2.2.0" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/91/50/a9d80c47ff289c611ff12e63f7c5d13942c65d68125160cefd768c73e6e4/executing-2.2.0.tar.gz", hash = "sha256:5d108c028108fe2551d1a7b2e8b713341e2cb4fc0aa7dcf966fa4327a5226755", size = 978693, upload-time = "2025-01-22T15:41:29.403Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/7b/8f/c4d9bafc34ad7ad5d8dc16dd1347ee0e507a52c3adb6bfa8887e1c6a26ba/executing-2.2.0-py2.py3-none-any.whl", hash = "sha256:11387150cad388d62750327a53d3339fad4888b39a6fe233c3afbb54ecffd3aa", size = 26702, upload-time = "2025-01-22T15:41:25.929Z" }, +] + +[[package]] +name = "fancycompleter" +version = "0.11.1" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "pyrepl", marker = "(platform_machine != 'aarch64' and platform_machine != 'x86_64' and extra == 'extra-10-weathergen-cpu' and extra == 'extra-10-weathergen-gpu') or (platform_machine == 'aarch64' and sys_platform == 'linux') or (platform_machine == 'x86_64' and sys_platform == 'linux') or (sys_platform != 'linux' and extra == 'extra-10-weathergen-cpu' and extra == 'extra-10-weathergen-gpu')" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/4e/4c/d11187dee93eff89d082afda79b63c79320ae1347e49485a38f05ad359d0/fancycompleter-0.11.1.tar.gz", hash = "sha256:5b4ad65d76b32b1259251516d0f1cb2d82832b1ff8506697a707284780757f69", size = 341776, upload-time = "2025-05-26T12:59:11.045Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/30/c3/6f0e3896f193528bbd2b4d2122d4be8108a37efab0b8475855556a8c4afa/fancycompleter-0.11.1-py3-none-any.whl", hash = "sha256:44243d7fab37087208ca5acacf8f74c0aa4d733d04d593857873af7513cdf8a6", size = 11207, upload-time = "2025-05-26T12:59:09.857Z" }, +] + +[[package]] +name = "fastapi" +version = "0.119.1" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "pydantic", marker = "(platform_machine != 'aarch64' and platform_machine != 'x86_64' and extra == 'extra-10-weathergen-cpu' and extra == 'extra-10-weathergen-gpu') or (platform_machine == 'aarch64' and sys_platform == 'linux') or (platform_machine == 'x86_64' and sys_platform == 'linux') or (sys_platform != 'linux' and extra == 'extra-10-weathergen-cpu' and extra == 'extra-10-weathergen-gpu')" }, + { name = "starlette", marker = "(platform_machine != 'aarch64' and platform_machine != 'x86_64' and extra == 'extra-10-weathergen-cpu' and extra == 'extra-10-weathergen-gpu') or (platform_machine == 'aarch64' and sys_platform == 'linux') or (platform_machine == 'x86_64' and sys_platform == 'linux') or (sys_platform != 'linux' and extra == 'extra-10-weathergen-cpu' and extra == 'extra-10-weathergen-gpu')" }, + { name = "typing-extensions", marker = "(platform_machine != 'aarch64' and platform_machine != 'x86_64' and extra == 'extra-10-weathergen-cpu' and extra == 'extra-10-weathergen-gpu') or (platform_machine == 'aarch64' and sys_platform == 'linux') or (platform_machine == 'x86_64' and sys_platform == 'linux') or (sys_platform != 'linux' and extra == 'extra-10-weathergen-cpu' and extra == 'extra-10-weathergen-gpu')" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/a6/f4/152127681182e6413e7a89684c434e19e7414ed7ac0c632999c3c6980640/fastapi-0.119.1.tar.gz", hash = "sha256:a5e3426edce3fe221af4e1992c6d79011b247e3b03cc57999d697fe76cbf8ae0", size = 338616, upload-time = "2025-10-20T11:30:27.734Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/b1/26/e6d959b4ac959fdb3e9c4154656fc160794db6af8e64673d52759456bf07/fastapi-0.119.1-py3-none-any.whl", hash = "sha256:0b8c2a2cce853216e150e9bd4faaed88227f8eb37de21cb200771f491586a27f", size = 108123, upload-time = "2025-10-20T11:30:26.185Z" }, +] + +[[package]] +name = "fasteners" +version = "0.19" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/5f/d4/e834d929be54bfadb1f3e3b931c38e956aaa3b235a46a3c764c26c774902/fasteners-0.19.tar.gz", hash = "sha256:b4f37c3ac52d8a445af3a66bce57b33b5e90b97c696b7b984f530cf8f0ded09c", size = 24832, upload-time = "2023-09-19T17:11:20.228Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/61/bf/fd60001b3abc5222d8eaa4a204cd8c0ae78e75adc688f33ce4bf25b7fafa/fasteners-0.19-py3-none-any.whl", hash = "sha256:758819cb5d94cdedf4e836988b74de396ceacb8e2794d21f82d131fd9ee77237", size = 18679, upload-time = "2023-09-19T17:11:18.725Z" }, +] + +[[package]] +name = "fastjsonschema" +version = "2.21.1" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/8b/50/4b769ce1ac4071a1ef6d86b1a3fb56cdc3a37615e8c5519e1af96cdac366/fastjsonschema-2.21.1.tar.gz", hash = "sha256:794d4f0a58f848961ba16af7b9c85a3e88cd360df008c59aac6fc5ae9323b5d4", size = 373939, upload-time = "2024-12-02T10:55:15.133Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/90/2b/0817a2b257fe88725c25589d89aec060581aabf668707a8d03b2e9e0cb2a/fastjsonschema-2.21.1-py3-none-any.whl", hash = "sha256:c9e5b7e908310918cf494a434eeb31384dd84a98b57a30bcb1f535015b554667", size = 23924, upload-time = "2024-12-02T10:55:07.599Z" }, +] + +[[package]] +name = "filelock" +version = "3.18.0" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/0a/10/c23352565a6544bdc5353e0b15fc1c563352101f30e24bf500207a54df9a/filelock-3.18.0.tar.gz", hash = "sha256:adbc88eabb99d2fec8c9c1b229b171f18afa655400173ddc653d5d01501fb9f2", size = 18075, upload-time = "2025-03-14T07:11:40.47Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/4d/36/2a115987e2d8c300a974597416d9de88f2444426de9571f4b59b2cca3acc/filelock-3.18.0-py3-none-any.whl", hash = "sha256:c401f4f8377c4464e6db25fff06205fd89bdd83b65eb0488ed1b160f780e21de", size = 16215, upload-time = "2025-03-14T07:11:39.145Z" }, +] + +[[package]] +name = "findlibs" +version = "0.1.1" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/29/66/4ed4652c0f91dceb13403192793caaf1d58c28296b1f5af5efaea63e2616/findlibs-0.1.1.tar.gz", hash = "sha256:dc7a0d4fe2bb6635f1e4bdcde60aecec42b6755c4783c2f629069c2f9321d732", size = 11284, upload-time = "2025-04-10T14:10:33.657Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/eb/f8/945427d587d3d3ec6dea3297f5a5b6b1cd4c5e126d0638cb148d3c360487/findlibs-0.1.1-py3-none-any.whl", hash = "sha256:1b611a040d41e491049568e530c7e032f5c2be8f582386961b8d14a6936406a9", size = 10713, upload-time = "2025-04-10T14:10:32.825Z" }, +] + +[[package]] +name = "flash-attn" +version = "2.7.3" +source = { url = "https://object-store.os-api.cci1.ecmwf.int/weathergenerator-dev/wheels/flash_attn-2.7.3-cp312-cp312-linux_aarch64.whl" } +resolution-markers = [ + "platform_machine == 'aarch64' and sys_platform == 'linux'", +] +dependencies = [ + { name = "einops", marker = "platform_machine == 'aarch64' and sys_platform == 'linux'" }, + { name = "torch", version = "2.6.0+cu126", source = { url = "https://download.pytorch.org/whl/cu126/torch-2.6.0%2Bcu126-cp312-cp312-linux_aarch64.whl" }, marker = "platform_machine == 'aarch64' and sys_platform == 'linux'" }, +] +wheels = [ + { url = "https://object-store.os-api.cci1.ecmwf.int/weathergenerator-dev/wheels/flash_attn-2.7.3-cp312-cp312-linux_aarch64.whl", hash = "sha256:7b52f161201b3fe98df274c75f5ced3edb90d4d2428e34a4dd95d0541f24b71a" }, +] + +[package.metadata] +requires-dist = [ + { name = "einops" }, + { name = "torch" }, +] + +[[package]] +name = "flash-attn" +version = "2.7.4.post1" +source = { url = "https://github.com/Dao-AILab/flash-attention/releases/download/v2.7.4.post1/flash_attn-2.7.4.post1+cu12torch2.6cxx11abiTRUE-cp312-cp312-linux_x86_64.whl" } +resolution-markers = [ + "platform_machine == 'x86_64' and sys_platform == 'linux'", +] +dependencies = [ + { name = "einops", marker = "platform_machine == 'x86_64' and sys_platform == 'linux'" }, + { name = "torch", version = "2.6.0+cu126", source = { url = "https://download.pytorch.org/whl/cu126/torch-2.6.0%2Bcu126-cp312-cp312-manylinux_2_28_x86_64.whl" }, marker = "platform_machine == 'x86_64' and sys_platform == 'linux'" }, +] +wheels = [ + { url = "https://github.com/Dao-AILab/flash-attention/releases/download/v2.7.4.post1/flash_attn-2.7.4.post1+cu12torch2.6cxx11abiTRUE-cp312-cp312-linux_x86_64.whl", hash = "sha256:8cb78ca2176735d7d7d867a8b355d728341be1928b912ce0b9df10591c9b9f7c" }, +] + +[package.metadata] +requires-dist = [ + { name = "einops" }, + { name = "torch" }, +] + +[[package]] +name = "flexcache" +version = "0.3" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "typing-extensions", marker = "(platform_machine != 'aarch64' and platform_machine != 'x86_64' and extra == 'extra-10-weathergen-cpu' and extra == 'extra-10-weathergen-gpu') or (platform_machine == 'aarch64' and sys_platform == 'linux') or (platform_machine == 'x86_64' and sys_platform == 'linux') or (sys_platform != 'linux' and extra == 'extra-10-weathergen-cpu' and extra == 'extra-10-weathergen-gpu')" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/55/b0/8a21e330561c65653d010ef112bf38f60890051d244ede197ddaa08e50c1/flexcache-0.3.tar.gz", hash = "sha256:18743bd5a0621bfe2cf8d519e4c3bfdf57a269c15d1ced3fb4b64e0ff4600656", size = 15816, upload-time = "2024-03-09T03:21:07.555Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/27/cd/c883e1a7c447479d6e13985565080e3fea88ab5a107c21684c813dba1875/flexcache-0.3-py3-none-any.whl", hash = "sha256:d43c9fea82336af6e0115e308d9d33a185390b8346a017564611f1466dcd2e32", size = 13263, upload-time = "2024-03-09T03:21:05.635Z" }, +] + +[[package]] +name = "flexparser" +version = "0.4" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "typing-extensions", marker = "(platform_machine != 'aarch64' and platform_machine != 'x86_64' and extra == 'extra-10-weathergen-cpu' and extra == 'extra-10-weathergen-gpu') or (platform_machine == 'aarch64' and sys_platform == 'linux') or (platform_machine == 'x86_64' and sys_platform == 'linux') or (sys_platform != 'linux' and extra == 'extra-10-weathergen-cpu' and extra == 'extra-10-weathergen-gpu')" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/82/99/b4de7e39e8eaf8207ba1a8fa2241dd98b2ba72ae6e16960d8351736d8702/flexparser-0.4.tar.gz", hash = "sha256:266d98905595be2ccc5da964fe0a2c3526fbbffdc45b65b3146d75db992ef6b2", size = 31799, upload-time = "2024-11-07T02:00:56.249Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/fe/5e/3be305568fe5f34448807976dc82fc151d76c3e0e03958f34770286278c1/flexparser-0.4-py3-none-any.whl", hash = "sha256:3738b456192dcb3e15620f324c447721023c0293f6af9955b481e91d00179846", size = 27625, upload-time = "2024-11-07T02:00:54.523Z" }, +] + +[[package]] +name = "fonttools" +version = "4.58.4" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/2e/5a/1124b2c8cb3a8015faf552e92714040bcdbc145dfa29928891b02d147a18/fonttools-4.58.4.tar.gz", hash = "sha256:928a8009b9884ed3aae17724b960987575155ca23c6f0b8146e400cc9e0d44ba", size = 3525026, upload-time = "2025-06-13T17:25:15.426Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/04/3c/1d1792bfe91ef46f22a3d23b4deb514c325e73c17d4f196b385b5e2faf1c/fonttools-4.58.4-cp312-cp312-macosx_10_13_universal2.whl", hash = "sha256:462211c0f37a278494e74267a994f6be9a2023d0557aaa9ecbcbfce0f403b5a6", size = 2754082, upload-time = "2025-06-13T17:24:24.862Z" }, + { url = "https://files.pythonhosted.org/packages/2a/1f/2b261689c901a1c3bc57a6690b0b9fc21a9a93a8b0c83aae911d3149f34e/fonttools-4.58.4-cp312-cp312-macosx_10_13_x86_64.whl", hash = "sha256:0c7a12fb6f769165547f00fcaa8d0df9517603ae7e04b625e5acb8639809b82d", size = 2321677, upload-time = "2025-06-13T17:24:26.815Z" }, + { url = "https://files.pythonhosted.org/packages/fe/6b/4607add1755a1e6581ae1fc0c9a640648e0d9cdd6591cc2d581c2e07b8c3/fonttools-4.58.4-cp312-cp312-manylinux1_x86_64.manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_5_x86_64.whl", hash = "sha256:2d42c63020a922154add0a326388a60a55504629edc3274bc273cd3806b4659f", size = 4896354, upload-time = "2025-06-13T17:24:28.428Z" }, + { url = "https://files.pythonhosted.org/packages/cd/95/34b4f483643d0cb11a1f830b72c03fdd18dbd3792d77a2eb2e130a96fada/fonttools-4.58.4-cp312-cp312-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:8f2b4e6fd45edc6805f5f2c355590b092ffc7e10a945bd6a569fc66c1d2ae7aa", size = 4941633, upload-time = "2025-06-13T17:24:30.568Z" }, + { url = "https://files.pythonhosted.org/packages/81/ac/9bafbdb7694059c960de523e643fa5a61dd2f698f3f72c0ca18ae99257c7/fonttools-4.58.4-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:f155b927f6efb1213a79334e4cb9904d1e18973376ffc17a0d7cd43d31981f1e", size = 4886170, upload-time = "2025-06-13T17:24:32.724Z" }, + { url = "https://files.pythonhosted.org/packages/ae/44/a3a3b70d5709405f7525bb7cb497b4e46151e0c02e3c8a0e40e5e9fe030b/fonttools-4.58.4-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:e38f687d5de97c7fb7da3e58169fb5ba349e464e141f83c3c2e2beb91d317816", size = 5037851, upload-time = "2025-06-13T17:24:35.034Z" }, + { url = "https://files.pythonhosted.org/packages/21/cb/e8923d197c78969454eb876a4a55a07b59c9c4c46598f02b02411dc3b45c/fonttools-4.58.4-cp312-cp312-win32.whl", hash = "sha256:636c073b4da9db053aa683db99580cac0f7c213a953b678f69acbca3443c12cc", size = 2187428, upload-time = "2025-06-13T17:24:36.996Z" }, + { url = "https://files.pythonhosted.org/packages/46/e6/fe50183b1a0e1018e7487ee740fa8bb127b9f5075a41e20d017201e8ab14/fonttools-4.58.4-cp312-cp312-win_amd64.whl", hash = "sha256:82e8470535743409b30913ba2822e20077acf9ea70acec40b10fcf5671dceb58", size = 2236649, upload-time = "2025-06-13T17:24:38.985Z" }, + { url = "https://files.pythonhosted.org/packages/0b/2f/c536b5b9bb3c071e91d536a4d11f969e911dbb6b227939f4c5b0bca090df/fonttools-4.58.4-py3-none-any.whl", hash = "sha256:a10ce13a13f26cbb9f37512a4346bb437ad7e002ff6fa966a7ce7ff5ac3528bd", size = 1114660, upload-time = "2025-06-13T17:25:13.321Z" }, +] + +[[package]] +name = "fsspec" +version = "2025.5.1" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/00/f7/27f15d41f0ed38e8fcc488584b57e902b331da7f7c6dcda53721b15838fc/fsspec-2025.5.1.tar.gz", hash = "sha256:2e55e47a540b91843b755e83ded97c6e897fa0942b11490113f09e9c443c2475", size = 303033, upload-time = "2025-05-24T12:03:23.792Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/bb/61/78c7b3851add1481b048b5fdc29067397a1784e2910592bc81bb3f608635/fsspec-2025.5.1-py3-none-any.whl", hash = "sha256:24d3a2e663d5fc735ab256263c4075f374a174c3410c0b25e5bd1970bceaa462", size = 199052, upload-time = "2025-05-24T12:03:21.66Z" }, +] + +[[package]] +name = "gitdb" +version = "4.0.12" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "smmap", marker = "(platform_machine != 'aarch64' and platform_machine != 'x86_64' and extra == 'extra-10-weathergen-cpu' and extra == 'extra-10-weathergen-gpu') or (platform_machine == 'aarch64' and sys_platform == 'linux') or (platform_machine == 'x86_64' and sys_platform == 'linux') or (sys_platform != 'linux' and extra == 'extra-10-weathergen-cpu' and extra == 'extra-10-weathergen-gpu')" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/72/94/63b0fc47eb32792c7ba1fe1b694daec9a63620db1e313033d18140c2320a/gitdb-4.0.12.tar.gz", hash = "sha256:5ef71f855d191a3326fcfbc0d5da835f26b13fbcba60c32c21091c349ffdb571", size = 394684, upload-time = "2025-01-02T07:20:46.413Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/a0/61/5c78b91c3143ed5c14207f463aecfc8f9dbb5092fb2869baf37c273b2705/gitdb-4.0.12-py3-none-any.whl", hash = "sha256:67073e15955400952c6565cc3e707c554a4eea2e428946f7a4c162fab9bd9bcf", size = 62794, upload-time = "2025-01-02T07:20:43.624Z" }, +] + +[[package]] +name = "gitpython" +version = "3.1.44" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "gitdb", marker = "(platform_machine != 'aarch64' and platform_machine != 'x86_64' and extra == 'extra-10-weathergen-cpu' and extra == 'extra-10-weathergen-gpu') or (platform_machine == 'aarch64' and sys_platform == 'linux') or (platform_machine == 'x86_64' and sys_platform == 'linux') or (sys_platform != 'linux' and extra == 'extra-10-weathergen-cpu' and extra == 'extra-10-weathergen-gpu')" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/c0/89/37df0b71473153574a5cdef8f242de422a0f5d26d7a9e231e6f169b4ad14/gitpython-3.1.44.tar.gz", hash = "sha256:c87e30b26253bf5418b01b0660f818967f3c503193838337fe5e573331249269", size = 214196, upload-time = "2025-01-02T07:32:43.59Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/1d/9a/4114a9057db2f1462d5c8f8390ab7383925fe1ac012eaa42402ad65c2963/GitPython-3.1.44-py3-none-any.whl", hash = "sha256:9e0e10cda9bed1ee64bc9a6de50e7e38a9c9943241cd7f585f6df3ed28011110", size = 207599, upload-time = "2025-01-02T07:32:40.731Z" }, +] + +[[package]] +name = "google-auth" +version = "2.41.1" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "cachetools", marker = "(platform_machine != 'aarch64' and platform_machine != 'x86_64' and extra == 'extra-10-weathergen-cpu' and extra == 'extra-10-weathergen-gpu') or (platform_machine == 'aarch64' and sys_platform == 'linux') or (platform_machine == 'x86_64' and sys_platform == 'linux') or (sys_platform != 'linux' and extra == 'extra-10-weathergen-cpu' and extra == 'extra-10-weathergen-gpu')" }, + { name = "pyasn1-modules", marker = "(platform_machine != 'aarch64' and platform_machine != 'x86_64' and extra == 'extra-10-weathergen-cpu' and extra == 'extra-10-weathergen-gpu') or (platform_machine == 'aarch64' and sys_platform == 'linux') or (platform_machine == 'x86_64' and sys_platform == 'linux') or (sys_platform != 'linux' and extra == 'extra-10-weathergen-cpu' and extra == 'extra-10-weathergen-gpu')" }, + { name = "rsa", marker = "(platform_machine != 'aarch64' and platform_machine != 'x86_64' and extra == 'extra-10-weathergen-cpu' and extra == 'extra-10-weathergen-gpu') or (platform_machine == 'aarch64' and sys_platform == 'linux') or (platform_machine == 'x86_64' and sys_platform == 'linux') or (sys_platform != 'linux' and extra == 'extra-10-weathergen-cpu' and extra == 'extra-10-weathergen-gpu')" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/a8/af/5129ce5b2f9688d2fa49b463e544972a7c82b0fdb50980dafee92e121d9f/google_auth-2.41.1.tar.gz", hash = "sha256:b76b7b1f9e61f0cb7e88870d14f6a94aeef248959ef6992670efee37709cbfd2", size = 292284, upload-time = "2025-09-30T22:51:26.363Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/be/a4/7319a2a8add4cc352be9e3efeff5e2aacee917c85ca2fa1647e29089983c/google_auth-2.41.1-py2.py3-none-any.whl", hash = "sha256:754843be95575b9a19c604a848a41be03f7f2afd8c019f716dc1f51ee41c639d", size = 221302, upload-time = "2025-09-30T22:51:24.212Z" }, +] + +[[package]] +name = "grpcio" +version = "1.74.0" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/38/b4/35feb8f7cab7239c5b94bd2db71abb3d6adb5f335ad8f131abb6060840b6/grpcio-1.74.0.tar.gz", hash = "sha256:80d1f4fbb35b0742d3e3d3bb654b7381cd5f015f8497279a1e9c21ba623e01b1", size = 12756048, upload-time = "2025-07-24T18:54:23.039Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/4c/5d/e504d5d5c4469823504f65687d6c8fb97b7f7bf0b34873b7598f1df24630/grpcio-1.74.0-cp312-cp312-linux_armv7l.whl", hash = "sha256:8533e6e9c5bd630ca98062e3a1326249e6ada07d05acf191a77bc33f8948f3d8", size = 5445551, upload-time = "2025-07-24T18:53:23.641Z" }, + { url = "https://files.pythonhosted.org/packages/43/01/730e37056f96f2f6ce9f17999af1556df62ee8dab7fa48bceeaab5fd3008/grpcio-1.74.0-cp312-cp312-macosx_11_0_universal2.whl", hash = "sha256:2918948864fec2a11721d91568effffbe0a02b23ecd57f281391d986847982f6", size = 10979810, upload-time = "2025-07-24T18:53:25.349Z" }, + { url = "https://files.pythonhosted.org/packages/79/3d/09fd100473ea5c47083889ca47ffd356576173ec134312f6aa0e13111dee/grpcio-1.74.0-cp312-cp312-manylinux_2_17_aarch64.whl", hash = "sha256:60d2d48b0580e70d2e1954d0d19fa3c2e60dd7cbed826aca104fff518310d1c5", size = 5941946, upload-time = "2025-07-24T18:53:27.387Z" }, + { url = "https://files.pythonhosted.org/packages/8a/99/12d2cca0a63c874c6d3d195629dcd85cdf5d6f98a30d8db44271f8a97b93/grpcio-1.74.0-cp312-cp312-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:3601274bc0523f6dc07666c0e01682c94472402ac2fd1226fd96e079863bfa49", size = 6621763, upload-time = "2025-07-24T18:53:29.193Z" }, + { url = "https://files.pythonhosted.org/packages/9d/2c/930b0e7a2f1029bbc193443c7bc4dc2a46fedb0203c8793dcd97081f1520/grpcio-1.74.0-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:176d60a5168d7948539def20b2a3adcce67d72454d9ae05969a2e73f3a0feee7", size = 6180664, upload-time = "2025-07-24T18:53:30.823Z" }, + { url = "https://files.pythonhosted.org/packages/db/d5/ff8a2442180ad0867717e670f5ec42bfd8d38b92158ad6bcd864e6d4b1ed/grpcio-1.74.0-cp312-cp312-musllinux_1_1_aarch64.whl", hash = "sha256:e759f9e8bc908aaae0412642afe5416c9f983a80499448fcc7fab8692ae044c3", size = 6301083, upload-time = "2025-07-24T18:53:32.454Z" }, + { url = "https://files.pythonhosted.org/packages/b0/ba/b361d390451a37ca118e4ec7dccec690422e05bc85fba2ec72b06cefec9f/grpcio-1.74.0-cp312-cp312-musllinux_1_1_i686.whl", hash = "sha256:9e7c4389771855a92934b2846bd807fc25a3dfa820fd912fe6bd8136026b2707", size = 6994132, upload-time = "2025-07-24T18:53:34.506Z" }, + { url = "https://files.pythonhosted.org/packages/3b/0c/3a5fa47d2437a44ced74141795ac0251bbddeae74bf81df3447edd767d27/grpcio-1.74.0-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:cce634b10aeab37010449124814b05a62fb5f18928ca878f1bf4750d1f0c815b", size = 6489616, upload-time = "2025-07-24T18:53:36.217Z" }, + { url = "https://files.pythonhosted.org/packages/ae/95/ab64703b436d99dc5217228babc76047d60e9ad14df129e307b5fec81fd0/grpcio-1.74.0-cp312-cp312-win32.whl", hash = "sha256:885912559974df35d92219e2dc98f51a16a48395f37b92865ad45186f294096c", size = 3807083, upload-time = "2025-07-24T18:53:37.911Z" }, + { url = "https://files.pythonhosted.org/packages/84/59/900aa2445891fc47a33f7d2f76e00ca5d6ae6584b20d19af9c06fa09bf9a/grpcio-1.74.0-cp312-cp312-win_amd64.whl", hash = "sha256:42f8fee287427b94be63d916c90399ed310ed10aadbf9e2e5538b3e497d269bc", size = 4490123, upload-time = "2025-07-24T18:53:39.528Z" }, +] + +[[package]] +name = "h11" +version = "0.16.0" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/01/ee/02a2c011bdab74c6fb3c75474d40b3052059d95df7e73351460c8588d963/h11-0.16.0.tar.gz", hash = "sha256:4e35b956cf45792e4caa5885e69fba00bdbc6ffafbfa020300e549b208ee5ff1", size = 101250, upload-time = "2025-04-24T03:35:25.427Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/04/4b/29cac41a4d98d144bf5f6d33995617b185d14b22401f75ca86f384e87ff1/h11-0.16.0-py3-none-any.whl", hash = "sha256:63cf8bbe7522de3bf65932fda1d9c2772064ffb3dae62d55932da54b31cb6c86", size = 37515, upload-time = "2025-04-24T03:35:24.344Z" }, +] + +[[package]] +name = "hatchling" +version = "1.27.0" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "packaging", marker = "(platform_machine != 'aarch64' and platform_machine != 'x86_64' and extra == 'extra-10-weathergen-cpu' and extra == 'extra-10-weathergen-gpu') or (platform_machine == 'aarch64' and sys_platform == 'linux') or (platform_machine == 'x86_64' and sys_platform == 'linux') or (sys_platform != 'linux' and extra == 'extra-10-weathergen-cpu' and extra == 'extra-10-weathergen-gpu')" }, + { name = "pathspec", marker = "(platform_machine != 'aarch64' and platform_machine != 'x86_64' and extra == 'extra-10-weathergen-cpu' and extra == 'extra-10-weathergen-gpu') or (platform_machine == 'aarch64' and sys_platform == 'linux') or (platform_machine == 'x86_64' and sys_platform == 'linux') or (sys_platform != 'linux' and extra == 'extra-10-weathergen-cpu' and extra == 'extra-10-weathergen-gpu')" }, + { name = "pluggy", marker = "(platform_machine != 'aarch64' and platform_machine != 'x86_64' and extra == 'extra-10-weathergen-cpu' and extra == 'extra-10-weathergen-gpu') or (platform_machine == 'aarch64' and sys_platform == 'linux') or (platform_machine == 'x86_64' and sys_platform == 'linux') or (sys_platform != 'linux' and extra == 'extra-10-weathergen-cpu' and extra == 'extra-10-weathergen-gpu')" }, + { name = "trove-classifiers", marker = "(platform_machine != 'aarch64' and platform_machine != 'x86_64' and extra == 'extra-10-weathergen-cpu' and extra == 'extra-10-weathergen-gpu') or (platform_machine == 'aarch64' and sys_platform == 'linux') or (platform_machine == 'x86_64' and sys_platform == 'linux') or (sys_platform != 'linux' and extra == 'extra-10-weathergen-cpu' and extra == 'extra-10-weathergen-gpu')" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/8f/8a/cc1debe3514da292094f1c3a700e4ca25442489731ef7c0814358816bb03/hatchling-1.27.0.tar.gz", hash = "sha256:971c296d9819abb3811112fc52c7a9751c8d381898f36533bb16f9791e941fd6", size = 54983, upload-time = "2024-12-15T17:08:11.894Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/08/e7/ae38d7a6dfba0533684e0b2136817d667588ae3ec984c1a4e5df5eb88482/hatchling-1.27.0-py3-none-any.whl", hash = "sha256:d3a2f3567c4f926ea39849cdf924c7e99e6686c9c8e288ae1037c8fa2a5d937b", size = 75794, upload-time = "2024-12-15T17:08:10.364Z" }, +] + +[[package]] +name = "idna" +version = "3.10" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/f1/70/7703c29685631f5a7590aa73f1f1d3fa9a380e654b86af429e0934a32f7d/idna-3.10.tar.gz", hash = "sha256:12f65c9b470abda6dc35cf8e63cc574b1c52b11df2c86030af0ac09b01b13ea9", size = 190490, upload-time = "2024-09-15T18:07:39.745Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/76/c6/c88e154df9c4e1a2a66ccf0005a88dfb2650c1dffb6f5ce603dfbd452ce3/idna-3.10-py3-none-any.whl", hash = "sha256:946d195a0d259cbba61165e88e65941f16e9b36ea6ddb97f00452bae8b1287d3", size = 70442, upload-time = "2024-09-15T18:07:37.964Z" }, +] + +[[package]] +name = "importlib-metadata" +version = "8.7.0" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "zipp", marker = "(platform_machine != 'aarch64' and platform_machine != 'x86_64' and extra == 'extra-10-weathergen-cpu' and extra == 'extra-10-weathergen-gpu') or (platform_machine == 'aarch64' and sys_platform == 'linux') or (platform_machine == 'x86_64' and sys_platform == 'linux') or (sys_platform != 'linux' and extra == 'extra-10-weathergen-cpu' and extra == 'extra-10-weathergen-gpu')" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/76/66/650a33bd90f786193e4de4b3ad86ea60b53c89b669a5c7be931fac31cdb0/importlib_metadata-8.7.0.tar.gz", hash = "sha256:d13b81ad223b890aa16c5471f2ac3056cf76c5f10f82d6f9292f0b415f389000", size = 56641, upload-time = "2025-04-27T15:29:01.736Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/20/b0/36bd937216ec521246249be3bf9855081de4c5e06a0c9b4219dbeda50373/importlib_metadata-8.7.0-py3-none-any.whl", hash = "sha256:e5dd1551894c77868a30651cef00984d50e1002d06942a7101d34870c5f02afd", size = 27656, upload-time = "2025-04-27T15:29:00.214Z" }, +] + +[[package]] +name = "iniconfig" +version = "2.1.0" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/f2/97/ebf4da567aa6827c909642694d71c9fcf53e5b504f2d96afea02718862f3/iniconfig-2.1.0.tar.gz", hash = "sha256:3abbd2e30b36733fee78f9c7f7308f2d0050e88f0087fd25c2645f63c773e1c7", size = 4793, upload-time = "2025-03-19T20:09:59.721Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/2c/e1/e6716421ea10d38022b952c159d5161ca1193197fb744506875fbb87ea7b/iniconfig-2.1.0-py3-none-any.whl", hash = "sha256:9deba5723312380e77435581c6bf4935c94cbfab9b1ed33ef8d238ea168eb760", size = 6050, upload-time = "2025-03-19T20:10:01.071Z" }, +] + +[[package]] +name = "ipykernel" +version = "6.30.0" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "comm", marker = "(platform_machine != 'aarch64' and platform_machine != 'x86_64' and extra == 'extra-10-weathergen-cpu' and extra == 'extra-10-weathergen-gpu') or (platform_machine == 'aarch64' and sys_platform == 'linux') or (platform_machine == 'x86_64' and sys_platform == 'linux') or (sys_platform != 'linux' and extra == 'extra-10-weathergen-cpu' and extra == 'extra-10-weathergen-gpu')" }, + { name = "debugpy", marker = "(platform_machine != 'aarch64' and platform_machine != 'x86_64' and extra == 'extra-10-weathergen-cpu' and extra == 'extra-10-weathergen-gpu') or (platform_machine == 'aarch64' and sys_platform == 'linux') or (platform_machine == 'x86_64' and sys_platform == 'linux') or (sys_platform != 'linux' and extra == 'extra-10-weathergen-cpu' and extra == 'extra-10-weathergen-gpu')" }, + { name = "ipython", marker = "(platform_machine != 'aarch64' and platform_machine != 'x86_64' and extra == 'extra-10-weathergen-cpu' and extra == 'extra-10-weathergen-gpu') or (platform_machine == 'aarch64' and sys_platform == 'linux') or (platform_machine == 'x86_64' and sys_platform == 'linux') or (sys_platform != 'linux' and extra == 'extra-10-weathergen-cpu' and extra == 'extra-10-weathergen-gpu')" }, + { name = "jupyter-client", marker = "(platform_machine != 'aarch64' and platform_machine != 'x86_64' and extra == 'extra-10-weathergen-cpu' and extra == 'extra-10-weathergen-gpu') or (platform_machine == 'aarch64' and sys_platform == 'linux') or (platform_machine == 'x86_64' and sys_platform == 'linux') or (sys_platform != 'linux' and extra == 'extra-10-weathergen-cpu' and extra == 'extra-10-weathergen-gpu')" }, + { name = "jupyter-core", marker = "(platform_machine != 'aarch64' and platform_machine != 'x86_64' and extra == 'extra-10-weathergen-cpu' and extra == 'extra-10-weathergen-gpu') or (platform_machine == 'aarch64' and sys_platform == 'linux') or (platform_machine == 'x86_64' and sys_platform == 'linux') or (sys_platform != 'linux' and extra == 'extra-10-weathergen-cpu' and extra == 'extra-10-weathergen-gpu')" }, + { name = "matplotlib-inline", marker = "(platform_machine != 'aarch64' and platform_machine != 'x86_64' and extra == 'extra-10-weathergen-cpu' and extra == 'extra-10-weathergen-gpu') or (platform_machine == 'aarch64' and sys_platform == 'linux') or (platform_machine == 'x86_64' and sys_platform == 'linux') or (sys_platform != 'linux' and extra == 'extra-10-weathergen-cpu' and extra == 'extra-10-weathergen-gpu')" }, + { name = "nest-asyncio", marker = "(platform_machine != 'aarch64' and platform_machine != 'x86_64' and extra == 'extra-10-weathergen-cpu' and extra == 'extra-10-weathergen-gpu') or (platform_machine == 'aarch64' and sys_platform == 'linux') or (platform_machine == 'x86_64' and sys_platform == 'linux') or (sys_platform != 'linux' and extra == 'extra-10-weathergen-cpu' and extra == 'extra-10-weathergen-gpu')" }, + { name = "packaging", marker = "(platform_machine != 'aarch64' and platform_machine != 'x86_64' and extra == 'extra-10-weathergen-cpu' and extra == 'extra-10-weathergen-gpu') or (platform_machine == 'aarch64' and sys_platform == 'linux') or (platform_machine == 'x86_64' and sys_platform == 'linux') or (sys_platform != 'linux' and extra == 'extra-10-weathergen-cpu' and extra == 'extra-10-weathergen-gpu')" }, + { name = "psutil", marker = "(platform_machine != 'aarch64' and platform_machine != 'x86_64' and extra == 'extra-10-weathergen-cpu' and extra == 'extra-10-weathergen-gpu') or (platform_machine == 'aarch64' and sys_platform == 'linux') or (platform_machine == 'x86_64' and sys_platform == 'linux') or (sys_platform != 'linux' and extra == 'extra-10-weathergen-cpu' and extra == 'extra-10-weathergen-gpu')" }, + { name = "pyzmq", marker = "(platform_machine != 'aarch64' and platform_machine != 'x86_64' and extra == 'extra-10-weathergen-cpu' and extra == 'extra-10-weathergen-gpu') or (platform_machine == 'aarch64' and sys_platform == 'linux') or (platform_machine == 'x86_64' and sys_platform == 'linux') or (sys_platform != 'linux' and extra == 'extra-10-weathergen-cpu' and extra == 'extra-10-weathergen-gpu')" }, + { name = "tornado", marker = "(platform_machine != 'aarch64' and platform_machine != 'x86_64' and extra == 'extra-10-weathergen-cpu' and extra == 'extra-10-weathergen-gpu') or (platform_machine == 'aarch64' and sys_platform == 'linux') or (platform_machine == 'x86_64' and sys_platform == 'linux') or (sys_platform != 'linux' and extra == 'extra-10-weathergen-cpu' and extra == 'extra-10-weathergen-gpu')" }, + { name = "traitlets", marker = "(platform_machine != 'aarch64' and platform_machine != 'x86_64' and extra == 'extra-10-weathergen-cpu' and extra == 'extra-10-weathergen-gpu') or (platform_machine == 'aarch64' and sys_platform == 'linux') or (platform_machine == 'x86_64' and sys_platform == 'linux') or (sys_platform != 'linux' and extra == 'extra-10-weathergen-cpu' and extra == 'extra-10-weathergen-gpu')" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/38/27/9e6e30ed92f2ac53d29f70b09da8b2dc456e256148e289678fa0e825f46a/ipykernel-6.30.0.tar.gz", hash = "sha256:b7b808ddb2d261aae2df3a26ff3ff810046e6de3dfbc6f7de8c98ea0a6cb632c", size = 165125, upload-time = "2025-07-21T10:36:09.259Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/1f/3d/00813c3d9b46e3dcd88bd4530e0a3c63c0509e5d8c9eff34723ea243ab04/ipykernel-6.30.0-py3-none-any.whl", hash = "sha256:fd2936e55c4a1c2ee8b1e5fa6a372b8eecc0ab1338750dee76f48fa5cca1301e", size = 117264, upload-time = "2025-07-21T10:36:06.854Z" }, +] + +[[package]] +name = "ipython" +version = "9.4.0" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "decorator", marker = "(platform_machine != 'aarch64' and platform_machine != 'x86_64' and extra == 'extra-10-weathergen-cpu' and extra == 'extra-10-weathergen-gpu') or (platform_machine == 'aarch64' and sys_platform == 'linux') or (platform_machine == 'x86_64' and sys_platform == 'linux') or (sys_platform != 'linux' and extra == 'extra-10-weathergen-cpu' and extra == 'extra-10-weathergen-gpu')" }, + { name = "ipython-pygments-lexers", marker = "(platform_machine != 'aarch64' and platform_machine != 'x86_64' and extra == 'extra-10-weathergen-cpu' and extra == 'extra-10-weathergen-gpu') or (platform_machine == 'aarch64' and sys_platform == 'linux') or (platform_machine == 'x86_64' and sys_platform == 'linux') or (sys_platform != 'linux' and extra == 'extra-10-weathergen-cpu' and extra == 'extra-10-weathergen-gpu')" }, + { name = "jedi", marker = "(platform_machine != 'aarch64' and platform_machine != 'x86_64' and extra == 'extra-10-weathergen-cpu' and extra == 'extra-10-weathergen-gpu') or (platform_machine == 'aarch64' and sys_platform == 'linux') or (platform_machine == 'x86_64' and sys_platform == 'linux') or (sys_platform != 'linux' and extra == 'extra-10-weathergen-cpu' and extra == 'extra-10-weathergen-gpu')" }, + { name = "matplotlib-inline", marker = "(platform_machine != 'aarch64' and platform_machine != 'x86_64' and extra == 'extra-10-weathergen-cpu' and extra == 'extra-10-weathergen-gpu') or (platform_machine == 'aarch64' and sys_platform == 'linux') or (platform_machine == 'x86_64' and sys_platform == 'linux') or (sys_platform != 'linux' and extra == 'extra-10-weathergen-cpu' and extra == 'extra-10-weathergen-gpu')" }, + { name = "pexpect", marker = "(platform_machine != 'aarch64' and platform_machine != 'x86_64' and extra == 'extra-10-weathergen-cpu' and extra == 'extra-10-weathergen-gpu') or (platform_machine == 'aarch64' and sys_platform == 'linux') or (platform_machine == 'x86_64' and sys_platform == 'linux') or (sys_platform != 'linux' and extra == 'extra-10-weathergen-cpu' and extra == 'extra-10-weathergen-gpu')" }, + { name = "prompt-toolkit", marker = "(platform_machine != 'aarch64' and platform_machine != 'x86_64' and extra == 'extra-10-weathergen-cpu' and extra == 'extra-10-weathergen-gpu') or (platform_machine == 'aarch64' and sys_platform == 'linux') or (platform_machine == 'x86_64' and sys_platform == 'linux') or (sys_platform != 'linux' and extra == 'extra-10-weathergen-cpu' and extra == 'extra-10-weathergen-gpu')" }, + { name = "pygments", marker = "(platform_machine != 'aarch64' and platform_machine != 'x86_64' and extra == 'extra-10-weathergen-cpu' and extra == 'extra-10-weathergen-gpu') or (platform_machine == 'aarch64' and sys_platform == 'linux') or (platform_machine == 'x86_64' and sys_platform == 'linux') or (sys_platform != 'linux' and extra == 'extra-10-weathergen-cpu' and extra == 'extra-10-weathergen-gpu')" }, + { name = "stack-data", marker = "(platform_machine != 'aarch64' and platform_machine != 'x86_64' and extra == 'extra-10-weathergen-cpu' and extra == 'extra-10-weathergen-gpu') or (platform_machine == 'aarch64' and sys_platform == 'linux') or (platform_machine == 'x86_64' and sys_platform == 'linux') or (sys_platform != 'linux' and extra == 'extra-10-weathergen-cpu' and extra == 'extra-10-weathergen-gpu')" }, + { name = "traitlets", marker = "(platform_machine != 'aarch64' and platform_machine != 'x86_64' and extra == 'extra-10-weathergen-cpu' and extra == 'extra-10-weathergen-gpu') or (platform_machine == 'aarch64' and sys_platform == 'linux') or (platform_machine == 'x86_64' and sys_platform == 'linux') or (sys_platform != 'linux' and extra == 'extra-10-weathergen-cpu' and extra == 'extra-10-weathergen-gpu')" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/54/80/406f9e3bde1c1fd9bf5a0be9d090f8ae623e401b7670d8f6fdf2ab679891/ipython-9.4.0.tar.gz", hash = "sha256:c033c6d4e7914c3d9768aabe76bbe87ba1dc66a92a05db6bfa1125d81f2ee270", size = 4385338, upload-time = "2025-07-01T11:11:30.606Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/63/f8/0031ee2b906a15a33d6bfc12dd09c3dfa966b3cb5b284ecfb7549e6ac3c4/ipython-9.4.0-py3-none-any.whl", hash = "sha256:25850f025a446d9b359e8d296ba175a36aedd32e83ca9b5060430fe16801f066", size = 611021, upload-time = "2025-07-01T11:11:27.85Z" }, +] + +[[package]] +name = "ipython-pygments-lexers" +version = "1.1.1" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "pygments", marker = "(platform_machine != 'aarch64' and platform_machine != 'x86_64' and extra == 'extra-10-weathergen-cpu' and extra == 'extra-10-weathergen-gpu') or (platform_machine == 'aarch64' and sys_platform == 'linux') or (platform_machine == 'x86_64' and sys_platform == 'linux') or (sys_platform != 'linux' and extra == 'extra-10-weathergen-cpu' and extra == 'extra-10-weathergen-gpu')" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/ef/4c/5dd1d8af08107f88c7f741ead7a40854b8ac24ddf9ae850afbcf698aa552/ipython_pygments_lexers-1.1.1.tar.gz", hash = "sha256:09c0138009e56b6854f9535736f4171d855c8c08a563a0dcd8022f78355c7e81", size = 8393, upload-time = "2025-01-17T11:24:34.505Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/d9/33/1f075bf72b0b747cb3288d011319aaf64083cf2efef8354174e3ed4540e2/ipython_pygments_lexers-1.1.1-py3-none-any.whl", hash = "sha256:a9462224a505ade19a605f71f8fa63c2048833ce50abc86768a0d81d876dc81c", size = 8074, upload-time = "2025-01-17T11:24:33.271Z" }, +] + +[[package]] +name = "jedi" +version = "0.19.2" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "parso", marker = "(platform_machine != 'aarch64' and platform_machine != 'x86_64' and extra == 'extra-10-weathergen-cpu' and extra == 'extra-10-weathergen-gpu') or (platform_machine == 'aarch64' and sys_platform == 'linux') or (platform_machine == 'x86_64' and sys_platform == 'linux') or (sys_platform != 'linux' and extra == 'extra-10-weathergen-cpu' and extra == 'extra-10-weathergen-gpu')" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/72/3a/79a912fbd4d8dd6fbb02bf69afd3bb72cf0c729bb3063c6f4498603db17a/jedi-0.19.2.tar.gz", hash = "sha256:4770dc3de41bde3966b02eb84fbcf557fb33cce26ad23da12c742fb50ecb11f0", size = 1231287, upload-time = "2024-11-11T01:41:42.873Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/c0/5a/9cac0c82afec3d09ccd97c8b6502d48f165f9124db81b4bcb90b4af974ee/jedi-0.19.2-py2.py3-none-any.whl", hash = "sha256:a8ef22bde8490f57fe5c7681a3c83cb58874daf72b4784de3cce5b6ef6edb5b9", size = 1572278, upload-time = "2024-11-11T01:41:40.175Z" }, +] + +[[package]] +name = "jinja2" +version = "3.1.6" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "markupsafe", marker = "(platform_machine != 'aarch64' and platform_machine != 'x86_64' and extra == 'extra-10-weathergen-cpu' and extra == 'extra-10-weathergen-gpu') or (platform_machine == 'aarch64' and sys_platform == 'linux') or (platform_machine == 'x86_64' and sys_platform == 'linux') or (sys_platform != 'linux' and extra == 'extra-10-weathergen-cpu' and extra == 'extra-10-weathergen-gpu')" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/df/bf/f7da0350254c0ed7c72f3e33cef02e048281fec7ecec5f032d4aac52226b/jinja2-3.1.6.tar.gz", hash = "sha256:0137fb05990d35f1275a587e9aee6d56da821fc83491a0fb838183be43f66d6d", size = 245115, upload-time = "2025-03-05T20:05:02.478Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/62/a1/3d680cbfd5f4b8f15abc1d571870c5fc3e594bb582bc3b64ea099db13e56/jinja2-3.1.6-py3-none-any.whl", hash = "sha256:85ece4451f492d0c13c5dd7c13a64681a86afae63a5f347908daf103ce6d2f67", size = 134899, upload-time = "2025-03-05T20:05:00.369Z" }, +] + +[[package]] +name = "jsonschema" +version = "4.24.0" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "attrs", marker = "(platform_machine != 'aarch64' and platform_machine != 'x86_64' and extra == 'extra-10-weathergen-cpu' and extra == 'extra-10-weathergen-gpu') or (platform_machine == 'aarch64' and sys_platform == 'linux') or (platform_machine == 'x86_64' and sys_platform == 'linux') or (sys_platform != 'linux' and extra == 'extra-10-weathergen-cpu' and extra == 'extra-10-weathergen-gpu')" }, + { name = "jsonschema-specifications", marker = "(platform_machine != 'aarch64' and platform_machine != 'x86_64' and extra == 'extra-10-weathergen-cpu' and extra == 'extra-10-weathergen-gpu') or (platform_machine == 'aarch64' and sys_platform == 'linux') or (platform_machine == 'x86_64' and sys_platform == 'linux') or (sys_platform != 'linux' and extra == 'extra-10-weathergen-cpu' and extra == 'extra-10-weathergen-gpu')" }, + { name = "referencing", marker = "(platform_machine != 'aarch64' and platform_machine != 'x86_64' and extra == 'extra-10-weathergen-cpu' and extra == 'extra-10-weathergen-gpu') or (platform_machine == 'aarch64' and sys_platform == 'linux') or (platform_machine == 'x86_64' and sys_platform == 'linux') or (sys_platform != 'linux' and extra == 'extra-10-weathergen-cpu' and extra == 'extra-10-weathergen-gpu')" }, + { name = "rpds-py", marker = "(platform_machine != 'aarch64' and platform_machine != 'x86_64' and extra == 'extra-10-weathergen-cpu' and extra == 'extra-10-weathergen-gpu') or (platform_machine == 'aarch64' and sys_platform == 'linux') or (platform_machine == 'x86_64' and sys_platform == 'linux') or (sys_platform != 'linux' and extra == 'extra-10-weathergen-cpu' and extra == 'extra-10-weathergen-gpu')" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/bf/d3/1cf5326b923a53515d8f3a2cd442e6d7e94fcc444716e879ea70a0ce3177/jsonschema-4.24.0.tar.gz", hash = "sha256:0b4e8069eb12aedfa881333004bccaec24ecef5a8a6a4b6df142b2cc9599d196", size = 353480, upload-time = "2025-05-26T18:48:10.459Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/a2/3d/023389198f69c722d039351050738d6755376c8fd343e91dc493ea485905/jsonschema-4.24.0-py3-none-any.whl", hash = "sha256:a462455f19f5faf404a7902952b6f0e3ce868f3ee09a359b05eca6673bd8412d", size = 88709, upload-time = "2025-05-26T18:48:08.417Z" }, +] + +[[package]] +name = "jsonschema-specifications" +version = "2025.4.1" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "referencing", marker = "(platform_machine != 'aarch64' and platform_machine != 'x86_64' and extra == 'extra-10-weathergen-cpu' and extra == 'extra-10-weathergen-gpu') or (platform_machine == 'aarch64' and sys_platform == 'linux') or (platform_machine == 'x86_64' and sys_platform == 'linux') or (sys_platform != 'linux' and extra == 'extra-10-weathergen-cpu' and extra == 'extra-10-weathergen-gpu')" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/bf/ce/46fbd9c8119cfc3581ee5643ea49464d168028cfb5caff5fc0596d0cf914/jsonschema_specifications-2025.4.1.tar.gz", hash = "sha256:630159c9f4dbea161a6a2205c3011cc4f18ff381b189fff48bb39b9bf26ae608", size = 15513, upload-time = "2025-04-23T12:34:07.418Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/01/0e/b27cdbaccf30b890c40ed1da9fd4a3593a5cf94dae54fb34f8a4b74fcd3f/jsonschema_specifications-2025.4.1-py3-none-any.whl", hash = "sha256:4653bffbd6584f7de83a67e0d620ef16900b390ddc7939d56684d6c81e33f1af", size = 18437, upload-time = "2025-04-23T12:34:05.422Z" }, +] + +[[package]] +name = "jupyter-client" +version = "8.6.3" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "jupyter-core", marker = "(platform_machine != 'aarch64' and platform_machine != 'x86_64' and extra == 'extra-10-weathergen-cpu' and extra == 'extra-10-weathergen-gpu') or (platform_machine == 'aarch64' and sys_platform == 'linux') or (platform_machine == 'x86_64' and sys_platform == 'linux') or (sys_platform != 'linux' and extra == 'extra-10-weathergen-cpu' and extra == 'extra-10-weathergen-gpu')" }, + { name = "python-dateutil", marker = "(platform_machine != 'aarch64' and platform_machine != 'x86_64' and extra == 'extra-10-weathergen-cpu' and extra == 'extra-10-weathergen-gpu') or (platform_machine == 'aarch64' and sys_platform == 'linux') or (platform_machine == 'x86_64' and sys_platform == 'linux') or (sys_platform != 'linux' and extra == 'extra-10-weathergen-cpu' and extra == 'extra-10-weathergen-gpu')" }, + { name = "pyzmq", marker = "(platform_machine != 'aarch64' and platform_machine != 'x86_64' and extra == 'extra-10-weathergen-cpu' and extra == 'extra-10-weathergen-gpu') or (platform_machine == 'aarch64' and sys_platform == 'linux') or (platform_machine == 'x86_64' and sys_platform == 'linux') or (sys_platform != 'linux' and extra == 'extra-10-weathergen-cpu' and extra == 'extra-10-weathergen-gpu')" }, + { name = "tornado", marker = "(platform_machine != 'aarch64' and platform_machine != 'x86_64' and extra == 'extra-10-weathergen-cpu' and extra == 'extra-10-weathergen-gpu') or (platform_machine == 'aarch64' and sys_platform == 'linux') or (platform_machine == 'x86_64' and sys_platform == 'linux') or (sys_platform != 'linux' and extra == 'extra-10-weathergen-cpu' and extra == 'extra-10-weathergen-gpu')" }, + { name = "traitlets", marker = "(platform_machine != 'aarch64' and platform_machine != 'x86_64' and extra == 'extra-10-weathergen-cpu' and extra == 'extra-10-weathergen-gpu') or (platform_machine == 'aarch64' and sys_platform == 'linux') or (platform_machine == 'x86_64' and sys_platform == 'linux') or (sys_platform != 'linux' and extra == 'extra-10-weathergen-cpu' and extra == 'extra-10-weathergen-gpu')" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/71/22/bf9f12fdaeae18019a468b68952a60fe6dbab5d67cd2a103cac7659b41ca/jupyter_client-8.6.3.tar.gz", hash = "sha256:35b3a0947c4a6e9d589eb97d7d4cd5e90f910ee73101611f01283732bd6d9419", size = 342019, upload-time = "2024-09-17T10:44:17.613Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/11/85/b0394e0b6fcccd2c1eeefc230978a6f8cb0c5df1e4cd3e7625735a0d7d1e/jupyter_client-8.6.3-py3-none-any.whl", hash = "sha256:e8a19cc986cc45905ac3362915f410f3af85424b4c0905e94fa5f2cb08e8f23f", size = 106105, upload-time = "2024-09-17T10:44:15.218Z" }, +] + +[[package]] +name = "jupyter-core" +version = "5.8.1" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "platformdirs", marker = "(platform_machine != 'aarch64' and platform_machine != 'x86_64' and extra == 'extra-10-weathergen-cpu' and extra == 'extra-10-weathergen-gpu') or (platform_machine == 'aarch64' and sys_platform == 'linux') or (platform_machine == 'x86_64' and sys_platform == 'linux') or (sys_platform != 'linux' and extra == 'extra-10-weathergen-cpu' and extra == 'extra-10-weathergen-gpu')" }, + { name = "traitlets", marker = "(platform_machine != 'aarch64' and platform_machine != 'x86_64' and extra == 'extra-10-weathergen-cpu' and extra == 'extra-10-weathergen-gpu') or (platform_machine == 'aarch64' and sys_platform == 'linux') or (platform_machine == 'x86_64' and sys_platform == 'linux') or (sys_platform != 'linux' and extra == 'extra-10-weathergen-cpu' and extra == 'extra-10-weathergen-gpu')" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/99/1b/72906d554acfeb588332eaaa6f61577705e9ec752ddb486f302dafa292d9/jupyter_core-5.8.1.tar.gz", hash = "sha256:0a5f9706f70e64786b75acba995988915ebd4601c8a52e534a40b51c95f59941", size = 88923, upload-time = "2025-05-27T07:38:16.655Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/2f/57/6bffd4b20b88da3800c5d691e0337761576ee688eb01299eae865689d2df/jupyter_core-5.8.1-py3-none-any.whl", hash = "sha256:c28d268fc90fb53f1338ded2eb410704c5449a358406e8a948b75706e24863d0", size = 28880, upload-time = "2025-05-27T07:38:15.137Z" }, +] + +[[package]] +name = "jupytext" +version = "1.17.2" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "markdown-it-py", marker = "(platform_machine != 'aarch64' and platform_machine != 'x86_64' and extra == 'extra-10-weathergen-cpu' and extra == 'extra-10-weathergen-gpu') or (platform_machine == 'aarch64' and sys_platform == 'linux') or (platform_machine == 'x86_64' and sys_platform == 'linux') or (sys_platform != 'linux' and extra == 'extra-10-weathergen-cpu' and extra == 'extra-10-weathergen-gpu')" }, + { name = "mdit-py-plugins", marker = "(platform_machine != 'aarch64' and platform_machine != 'x86_64' and extra == 'extra-10-weathergen-cpu' and extra == 'extra-10-weathergen-gpu') or (platform_machine == 'aarch64' and sys_platform == 'linux') or (platform_machine == 'x86_64' and sys_platform == 'linux') or (sys_platform != 'linux' and extra == 'extra-10-weathergen-cpu' and extra == 'extra-10-weathergen-gpu')" }, + { name = "nbformat", marker = "(platform_machine != 'aarch64' and platform_machine != 'x86_64' and extra == 'extra-10-weathergen-cpu' and extra == 'extra-10-weathergen-gpu') or (platform_machine == 'aarch64' and sys_platform == 'linux') or (platform_machine == 'x86_64' and sys_platform == 'linux') or (sys_platform != 'linux' and extra == 'extra-10-weathergen-cpu' and extra == 'extra-10-weathergen-gpu')" }, + { name = "packaging", marker = "(platform_machine != 'aarch64' and platform_machine != 'x86_64' and extra == 'extra-10-weathergen-cpu' and extra == 'extra-10-weathergen-gpu') or (platform_machine == 'aarch64' and sys_platform == 'linux') or (platform_machine == 'x86_64' and sys_platform == 'linux') or (sys_platform != 'linux' and extra == 'extra-10-weathergen-cpu' and extra == 'extra-10-weathergen-gpu')" }, + { name = "pyyaml", marker = "(platform_machine != 'aarch64' and platform_machine != 'x86_64' and extra == 'extra-10-weathergen-cpu' and extra == 'extra-10-weathergen-gpu') or (platform_machine == 'aarch64' and sys_platform == 'linux') or (platform_machine == 'x86_64' and sys_platform == 'linux') or (sys_platform != 'linux' and extra == 'extra-10-weathergen-cpu' and extra == 'extra-10-weathergen-gpu')" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/30/ce/0bd5290ca4978777154e2683413dca761781aacf57f7dc0146f5210df8b1/jupytext-1.17.2.tar.gz", hash = "sha256:772d92898ac1f2ded69106f897b34af48ce4a85c985fa043a378ff5a65455f02", size = 3748577, upload-time = "2025-06-01T21:31:48.231Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/ed/f1/82ea8e783433707cafd9790099a2d19f113c22f32a31c8bb5abdc7a61dbb/jupytext-1.17.2-py3-none-any.whl", hash = "sha256:4f85dc43bb6a24b75491c5c434001ad5ef563932f68f15dd3e1c8ce12a4a426b", size = 164401, upload-time = "2025-06-01T21:31:46.319Z" }, +] + +[[package]] +name = "kiwisolver" +version = "1.4.8" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/82/59/7c91426a8ac292e1cdd53a63b6d9439abd573c875c3f92c146767dd33faf/kiwisolver-1.4.8.tar.gz", hash = "sha256:23d5f023bdc8c7e54eb65f03ca5d5bb25b601eac4d7f1a042888a1f45237987e", size = 97538, upload-time = "2024-12-24T18:30:51.519Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/fc/aa/cea685c4ab647f349c3bc92d2daf7ae34c8e8cf405a6dcd3a497f58a2ac3/kiwisolver-1.4.8-cp312-cp312-macosx_10_13_universal2.whl", hash = "sha256:d6af5e8815fd02997cb6ad9bbed0ee1e60014438ee1a5c2444c96f87b8843502", size = 124152, upload-time = "2024-12-24T18:29:16.85Z" }, + { url = "https://files.pythonhosted.org/packages/c5/0b/8db6d2e2452d60d5ebc4ce4b204feeb16176a851fd42462f66ade6808084/kiwisolver-1.4.8-cp312-cp312-macosx_10_13_x86_64.whl", hash = "sha256:bade438f86e21d91e0cf5dd7c0ed00cda0f77c8c1616bd83f9fc157fa6760d31", size = 66555, upload-time = "2024-12-24T18:29:19.146Z" }, + { url = "https://files.pythonhosted.org/packages/60/26/d6a0db6785dd35d3ba5bf2b2df0aedc5af089962c6eb2cbf67a15b81369e/kiwisolver-1.4.8-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:b83dc6769ddbc57613280118fb4ce3cd08899cc3369f7d0e0fab518a7cf37fdb", size = 65067, upload-time = "2024-12-24T18:29:20.096Z" }, + { url = "https://files.pythonhosted.org/packages/c9/ed/1d97f7e3561e09757a196231edccc1bcf59d55ddccefa2afc9c615abd8e0/kiwisolver-1.4.8-cp312-cp312-manylinux_2_12_i686.manylinux2010_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:111793b232842991be367ed828076b03d96202c19221b5ebab421ce8bcad016f", size = 1378443, upload-time = "2024-12-24T18:29:22.843Z" }, + { url = "https://files.pythonhosted.org/packages/29/61/39d30b99954e6b46f760e6289c12fede2ab96a254c443639052d1b573fbc/kiwisolver-1.4.8-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:257af1622860e51b1a9d0ce387bf5c2c4f36a90594cb9514f55b074bcc787cfc", size = 1472728, upload-time = "2024-12-24T18:29:24.463Z" }, + { url = "https://files.pythonhosted.org/packages/0c/3e/804163b932f7603ef256e4a715e5843a9600802bb23a68b4e08c8c0ff61d/kiwisolver-1.4.8-cp312-cp312-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:69b5637c3f316cab1ec1c9a12b8c5f4750a4c4b71af9157645bf32830e39c03a", size = 1478388, upload-time = "2024-12-24T18:29:25.776Z" }, + { url = "https://files.pythonhosted.org/packages/8a/9e/60eaa75169a154700be74f875a4d9961b11ba048bef315fbe89cb6999056/kiwisolver-1.4.8-cp312-cp312-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:782bb86f245ec18009890e7cb8d13a5ef54dcf2ebe18ed65f795e635a96a1c6a", size = 1413849, upload-time = "2024-12-24T18:29:27.202Z" }, + { url = "https://files.pythonhosted.org/packages/bc/b3/9458adb9472e61a998c8c4d95cfdfec91c73c53a375b30b1428310f923e4/kiwisolver-1.4.8-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:cc978a80a0db3a66d25767b03688f1147a69e6237175c0f4ffffaaedf744055a", size = 1475533, upload-time = "2024-12-24T18:29:28.638Z" }, + { url = "https://files.pythonhosted.org/packages/e4/7a/0a42d9571e35798de80aef4bb43a9b672aa7f8e58643d7bd1950398ffb0a/kiwisolver-1.4.8-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:36dbbfd34838500a31f52c9786990d00150860e46cd5041386f217101350f0d3", size = 2268898, upload-time = "2024-12-24T18:29:30.368Z" }, + { url = "https://files.pythonhosted.org/packages/d9/07/1255dc8d80271400126ed8db35a1795b1a2c098ac3a72645075d06fe5c5d/kiwisolver-1.4.8-cp312-cp312-musllinux_1_2_i686.whl", hash = "sha256:eaa973f1e05131de5ff3569bbba7f5fd07ea0595d3870ed4a526d486fe57fa1b", size = 2425605, upload-time = "2024-12-24T18:29:33.151Z" }, + { url = "https://files.pythonhosted.org/packages/84/df/5a3b4cf13780ef6f6942df67b138b03b7e79e9f1f08f57c49957d5867f6e/kiwisolver-1.4.8-cp312-cp312-musllinux_1_2_ppc64le.whl", hash = "sha256:a66f60f8d0c87ab7f59b6fb80e642ebb29fec354a4dfad687ca4092ae69d04f4", size = 2375801, upload-time = "2024-12-24T18:29:34.584Z" }, + { url = "https://files.pythonhosted.org/packages/8f/10/2348d068e8b0f635c8c86892788dac7a6b5c0cb12356620ab575775aad89/kiwisolver-1.4.8-cp312-cp312-musllinux_1_2_s390x.whl", hash = "sha256:858416b7fb777a53f0c59ca08190ce24e9abbd3cffa18886a5781b8e3e26f65d", size = 2520077, upload-time = "2024-12-24T18:29:36.138Z" }, + { url = "https://files.pythonhosted.org/packages/32/d8/014b89fee5d4dce157d814303b0fce4d31385a2af4c41fed194b173b81ac/kiwisolver-1.4.8-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:085940635c62697391baafaaeabdf3dd7a6c3643577dde337f4d66eba021b2b8", size = 2338410, upload-time = "2024-12-24T18:29:39.991Z" }, + { url = "https://files.pythonhosted.org/packages/bd/72/dfff0cc97f2a0776e1c9eb5bef1ddfd45f46246c6533b0191887a427bca5/kiwisolver-1.4.8-cp312-cp312-win_amd64.whl", hash = "sha256:01c3d31902c7db5fb6182832713d3b4122ad9317c2c5877d0539227d96bb2e50", size = 71853, upload-time = "2024-12-24T18:29:42.006Z" }, + { url = "https://files.pythonhosted.org/packages/dc/85/220d13d914485c0948a00f0b9eb419efaf6da81b7d72e88ce2391f7aed8d/kiwisolver-1.4.8-cp312-cp312-win_arm64.whl", hash = "sha256:a3c44cb68861de93f0c4a8175fbaa691f0aa22550c331fefef02b618a9dcb476", size = 65424, upload-time = "2024-12-24T18:29:44.38Z" }, +] + +[[package]] +name = "linkify-it-py" +version = "2.0.3" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "uc-micro-py", marker = "(platform_machine != 'aarch64' and platform_machine != 'x86_64' and extra == 'extra-10-weathergen-cpu' and extra == 'extra-10-weathergen-gpu') or (platform_machine == 'aarch64' and sys_platform == 'linux') or (platform_machine == 'x86_64' and sys_platform == 'linux') or (sys_platform != 'linux' and extra == 'extra-10-weathergen-cpu' and extra == 'extra-10-weathergen-gpu')" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/2a/ae/bb56c6828e4797ba5a4821eec7c43b8bf40f69cda4d4f5f8c8a2810ec96a/linkify-it-py-2.0.3.tar.gz", hash = "sha256:68cda27e162e9215c17d786649d1da0021a451bdc436ef9e0fa0ba5234b9b048", size = 27946, upload-time = "2024-02-04T14:48:04.179Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/04/1e/b832de447dee8b582cac175871d2f6c3d5077cc56d5575cadba1fd1cccfa/linkify_it_py-2.0.3-py3-none-any.whl", hash = "sha256:6bcbc417b0ac14323382aef5c5192c0075bf8a9d6b41820a2b66371eac6b6d79", size = 19820, upload-time = "2024-02-04T14:48:02.496Z" }, +] + +[[package]] +name = "locket" +version = "1.0.0" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/2f/83/97b29fe05cb6ae28d2dbd30b81e2e402a3eed5f460c26e9eaa5895ceacf5/locket-1.0.0.tar.gz", hash = "sha256:5c0d4c052a8bbbf750e056a8e65ccd309086f4f0f18a2eac306a8dfa4112a632", size = 4350, upload-time = "2022-04-20T22:04:44.312Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/db/bc/83e112abc66cd466c6b83f99118035867cecd41802f8d044638aa78a106e/locket-1.0.0-py2.py3-none-any.whl", hash = "sha256:b6c819a722f7b6bd955b80781788e4a66a55628b858d347536b7e81325a3a5e3", size = 4398, upload-time = "2022-04-20T22:04:42.23Z" }, +] + +[[package]] +name = "lru-dict" +version = "1.3.0" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/96/e3/42c87871920602a3c8300915bd0292f76eccc66c38f782397acbf8a62088/lru-dict-1.3.0.tar.gz", hash = "sha256:54fd1966d6bd1fcde781596cb86068214edeebff1db13a2cea11079e3fd07b6b", size = 13123, upload-time = "2023-11-06T01:40:12.951Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/fc/5c/385f080747eb3083af87d8e4c9068f3c4cab89035f6982134889940dafd8/lru_dict-1.3.0-cp312-cp312-macosx_10_9_universal2.whl", hash = "sha256:c279068f68af3b46a5d649855e1fb87f5705fe1f744a529d82b2885c0e1fc69d", size = 17174, upload-time = "2023-11-06T01:39:07.923Z" }, + { url = "https://files.pythonhosted.org/packages/3c/de/5ef2ed75ce55d7059d1b96177ba04fa7ee1f35564f97bdfcd28fccfbe9d2/lru_dict-1.3.0-cp312-cp312-macosx_10_9_x86_64.whl", hash = "sha256:350e2233cfee9f326a0d7a08e309372d87186565e43a691b120006285a0ac549", size = 10742, upload-time = "2023-11-06T01:39:08.871Z" }, + { url = "https://files.pythonhosted.org/packages/ca/05/f69a6abb0062d2cf2ce0aaf0284b105b97d1da024ca6d3d0730e6151242e/lru_dict-1.3.0-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:4eafb188a84483b3231259bf19030859f070321b00326dcb8e8c6cbf7db4b12f", size = 11079, upload-time = "2023-11-06T01:39:09.766Z" }, + { url = "https://files.pythonhosted.org/packages/ea/59/cf891143abe58a455b8eaa9175f0e80f624a146a2bf9a1ca842ee0ef930a/lru_dict-1.3.0-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:73593791047e36b37fdc0b67b76aeed439fcea80959c7d46201240f9ec3b2563", size = 32469, upload-time = "2023-11-06T01:39:11.091Z" }, + { url = "https://files.pythonhosted.org/packages/59/88/d5976e9f70107ce11e45d93c6f0c2d5eaa1fc30bb3c8f57525eda4510dff/lru_dict-1.3.0-cp312-cp312-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:1958cb70b9542773d6241974646e5410e41ef32e5c9e437d44040d59bd80daf2", size = 33496, upload-time = "2023-11-06T01:39:12.463Z" }, + { url = "https://files.pythonhosted.org/packages/6c/f8/94d6e910d54fc1fa05c0ee1cd608c39401866a18cf5e5aff238449b33c11/lru_dict-1.3.0-cp312-cp312-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:bc1cd3ed2cee78a47f11f3b70be053903bda197a873fd146e25c60c8e5a32cd6", size = 29914, upload-time = "2023-11-06T01:39:13.395Z" }, + { url = "https://files.pythonhosted.org/packages/ca/b9/9db79780c8a3cfd66bba6847773061e5cf8a3746950273b9985d47bbfe53/lru_dict-1.3.0-cp312-cp312-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:82eb230d48eaebd6977a92ddaa6d788f14cf4f4bcf5bbffa4ddfd60d051aa9d4", size = 32241, upload-time = "2023-11-06T01:39:14.612Z" }, + { url = "https://files.pythonhosted.org/packages/9b/b6/08a623019daec22a40c4d6d2c40851dfa3d129a53b2f9469db8eb13666c1/lru_dict-1.3.0-cp312-cp312-musllinux_1_1_aarch64.whl", hash = "sha256:5ad659cbc349d0c9ba8e536b5f40f96a70c360f43323c29f4257f340d891531c", size = 37320, upload-time = "2023-11-06T01:39:15.875Z" }, + { url = "https://files.pythonhosted.org/packages/70/0b/d3717159c26155ff77679cee1b077d22e1008bf45f19921e193319cd8e46/lru_dict-1.3.0-cp312-cp312-musllinux_1_1_i686.whl", hash = "sha256:ba490b8972531d153ac0d4e421f60d793d71a2f4adbe2f7740b3c55dce0a12f1", size = 35054, upload-time = "2023-11-06T01:39:17.063Z" }, + { url = "https://files.pythonhosted.org/packages/04/74/f2ae00de7c27984a19b88d2b09ac877031c525b01199d7841ec8fa657fd6/lru_dict-1.3.0-cp312-cp312-musllinux_1_1_ppc64le.whl", hash = "sha256:c0131351b8a7226c69f1eba5814cbc9d1d8daaf0fdec1ae3f30508e3de5262d4", size = 38613, upload-time = "2023-11-06T01:39:18.136Z" }, + { url = "https://files.pythonhosted.org/packages/5a/0b/e30236aafe31b4247aa9ae61ba8aac6dde75c3ea0e47a8fb7eef53f6d5ce/lru_dict-1.3.0-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:0e88dba16695f17f41701269fa046197a3fd7b34a8dba744c8749303ddaa18df", size = 37143, upload-time = "2023-11-06T01:39:19.571Z" }, + { url = "https://files.pythonhosted.org/packages/1c/28/b59bcebb8d76ba8147a784a8be7eab6a4ad3395b9236e73740ff675a5a52/lru_dict-1.3.0-cp312-cp312-win32.whl", hash = "sha256:6ffaf595e625b388babc8e7d79b40f26c7485f61f16efe76764e32dce9ea17fc", size = 12653, upload-time = "2023-11-06T01:39:20.574Z" }, + { url = "https://files.pythonhosted.org/packages/bd/18/06d9710cb0a0d3634f8501e4bdcc07abe64a32e404d82895a6a36fab97f6/lru_dict-1.3.0-cp312-cp312-win_amd64.whl", hash = "sha256:cf9da32ef2582434842ab6ba6e67290debfae72771255a8e8ab16f3e006de0aa", size = 13811, upload-time = "2023-11-06T01:39:21.599Z" }, +] + +[[package]] +name = "markdown" +version = "3.8" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/2f/15/222b423b0b88689c266d9eac4e61396fe2cc53464459d6a37618ac863b24/markdown-3.8.tar.gz", hash = "sha256:7df81e63f0df5c4b24b7d156eb81e4690595239b7d70937d0409f1b0de319c6f", size = 360906, upload-time = "2025-04-11T14:42:50.928Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/51/3f/afe76f8e2246ffbc867440cbcf90525264df0e658f8a5ca1f872b3f6192a/markdown-3.8-py3-none-any.whl", hash = "sha256:794a929b79c5af141ef5ab0f2f642d0f7b1872981250230e72682346f7cc90dc", size = 106210, upload-time = "2025-04-11T14:42:49.178Z" }, +] + +[[package]] +name = "markdown-it-py" +version = "3.0.0" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "mdurl", marker = "(platform_machine != 'aarch64' and platform_machine != 'x86_64' and extra == 'extra-10-weathergen-cpu' and extra == 'extra-10-weathergen-gpu') or (platform_machine == 'aarch64' and sys_platform == 'linux') or (platform_machine == 'x86_64' and sys_platform == 'linux') or (sys_platform != 'linux' and extra == 'extra-10-weathergen-cpu' and extra == 'extra-10-weathergen-gpu')" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/38/71/3b932df36c1a044d397a1f92d1cf91ee0a503d91e470cbd670aa66b07ed0/markdown-it-py-3.0.0.tar.gz", hash = "sha256:e3f60a94fa066dc52ec76661e37c851cb232d92f9886b15cb560aaada2df8feb", size = 74596, upload-time = "2023-06-03T06:41:14.443Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/42/d7/1ec15b46af6af88f19b8e5ffea08fa375d433c998b8a7639e76935c14f1f/markdown_it_py-3.0.0-py3-none-any.whl", hash = "sha256:355216845c60bd96232cd8d8c40e8f9765cc86f46880e43a8fd22dc1a1a8cab1", size = 87528, upload-time = "2023-06-03T06:41:11.019Z" }, +] + +[[package]] +name = "markupsafe" +version = "3.0.2" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/b2/97/5d42485e71dfc078108a86d6de8fa46db44a1a9295e89c5d6d4a06e23a62/markupsafe-3.0.2.tar.gz", hash = "sha256:ee55d3edf80167e48ea11a923c7386f4669df67d7994554387f84e7d8b0a2bf0", size = 20537, upload-time = "2024-10-18T15:21:54.129Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/22/09/d1f21434c97fc42f09d290cbb6350d44eb12f09cc62c9476effdb33a18aa/MarkupSafe-3.0.2-cp312-cp312-macosx_10_13_universal2.whl", hash = "sha256:9778bd8ab0a994ebf6f84c2b949e65736d5575320a17ae8984a77fab08db94cf", size = 14274, upload-time = "2024-10-18T15:21:13.777Z" }, + { url = "https://files.pythonhosted.org/packages/6b/b0/18f76bba336fa5aecf79d45dcd6c806c280ec44538b3c13671d49099fdd0/MarkupSafe-3.0.2-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:846ade7b71e3536c4e56b386c2a47adf5741d2d8b94ec9dc3e92e5e1ee1e2225", size = 12348, upload-time = "2024-10-18T15:21:14.822Z" }, + { url = "https://files.pythonhosted.org/packages/e0/25/dd5c0f6ac1311e9b40f4af06c78efde0f3b5cbf02502f8ef9501294c425b/MarkupSafe-3.0.2-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:1c99d261bd2d5f6b59325c92c73df481e05e57f19837bdca8413b9eac4bd8028", size = 24149, upload-time = "2024-10-18T15:21:15.642Z" }, + { url = "https://files.pythonhosted.org/packages/f3/f0/89e7aadfb3749d0f52234a0c8c7867877876e0a20b60e2188e9850794c17/MarkupSafe-3.0.2-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:e17c96c14e19278594aa4841ec148115f9c7615a47382ecb6b82bd8fea3ab0c8", size = 23118, upload-time = "2024-10-18T15:21:17.133Z" }, + { url = "https://files.pythonhosted.org/packages/d5/da/f2eeb64c723f5e3777bc081da884b414671982008c47dcc1873d81f625b6/MarkupSafe-3.0.2-cp312-cp312-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:88416bd1e65dcea10bc7569faacb2c20ce071dd1f87539ca2ab364bf6231393c", size = 22993, upload-time = "2024-10-18T15:21:18.064Z" }, + { url = "https://files.pythonhosted.org/packages/da/0e/1f32af846df486dce7c227fe0f2398dc7e2e51d4a370508281f3c1c5cddc/MarkupSafe-3.0.2-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:2181e67807fc2fa785d0592dc2d6206c019b9502410671cc905d132a92866557", size = 24178, upload-time = "2024-10-18T15:21:18.859Z" }, + { url = "https://files.pythonhosted.org/packages/c4/f6/bb3ca0532de8086cbff5f06d137064c8410d10779c4c127e0e47d17c0b71/MarkupSafe-3.0.2-cp312-cp312-musllinux_1_2_i686.whl", hash = "sha256:52305740fe773d09cffb16f8ed0427942901f00adedac82ec8b67752f58a1b22", size = 23319, upload-time = "2024-10-18T15:21:19.671Z" }, + { url = "https://files.pythonhosted.org/packages/a2/82/8be4c96ffee03c5b4a034e60a31294daf481e12c7c43ab8e34a1453ee48b/MarkupSafe-3.0.2-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:ad10d3ded218f1039f11a75f8091880239651b52e9bb592ca27de44eed242a48", size = 23352, upload-time = "2024-10-18T15:21:20.971Z" }, + { url = "https://files.pythonhosted.org/packages/51/ae/97827349d3fcffee7e184bdf7f41cd6b88d9919c80f0263ba7acd1bbcb18/MarkupSafe-3.0.2-cp312-cp312-win32.whl", hash = "sha256:0f4ca02bea9a23221c0182836703cbf8930c5e9454bacce27e767509fa286a30", size = 15097, upload-time = "2024-10-18T15:21:22.646Z" }, + { url = "https://files.pythonhosted.org/packages/c1/80/a61f99dc3a936413c3ee4e1eecac96c0da5ed07ad56fd975f1a9da5bc630/MarkupSafe-3.0.2-cp312-cp312-win_amd64.whl", hash = "sha256:8e06879fc22a25ca47312fbe7c8264eb0b662f6db27cb2d3bbbc74b1df4b9b87", size = 15601, upload-time = "2024-10-18T15:21:23.499Z" }, +] + +[[package]] +name = "matplotlib" +version = "3.10.3" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "contourpy", marker = "(platform_machine != 'aarch64' and platform_machine != 'x86_64' and extra == 'extra-10-weathergen-cpu' and extra == 'extra-10-weathergen-gpu') or (platform_machine == 'aarch64' and sys_platform == 'linux') or (platform_machine == 'x86_64' and sys_platform == 'linux') or (sys_platform != 'linux' and extra == 'extra-10-weathergen-cpu' and extra == 'extra-10-weathergen-gpu')" }, + { name = "cycler", marker = "(platform_machine != 'aarch64' and platform_machine != 'x86_64' and extra == 'extra-10-weathergen-cpu' and extra == 'extra-10-weathergen-gpu') or (platform_machine == 'aarch64' and sys_platform == 'linux') or (platform_machine == 'x86_64' and sys_platform == 'linux') or (sys_platform != 'linux' and extra == 'extra-10-weathergen-cpu' and extra == 'extra-10-weathergen-gpu')" }, + { name = "fonttools", marker = "(platform_machine != 'aarch64' and platform_machine != 'x86_64' and extra == 'extra-10-weathergen-cpu' and extra == 'extra-10-weathergen-gpu') or (platform_machine == 'aarch64' and sys_platform == 'linux') or (platform_machine == 'x86_64' and sys_platform == 'linux') or (sys_platform != 'linux' and extra == 'extra-10-weathergen-cpu' and extra == 'extra-10-weathergen-gpu')" }, + { name = "kiwisolver", marker = "(platform_machine != 'aarch64' and platform_machine != 'x86_64' and extra == 'extra-10-weathergen-cpu' and extra == 'extra-10-weathergen-gpu') or (platform_machine == 'aarch64' and sys_platform == 'linux') or (platform_machine == 'x86_64' and sys_platform == 'linux') or (sys_platform != 'linux' and extra == 'extra-10-weathergen-cpu' and extra == 'extra-10-weathergen-gpu')" }, + { name = "numpy", marker = "(platform_machine != 'aarch64' and platform_machine != 'x86_64' and extra == 'extra-10-weathergen-cpu' and extra == 'extra-10-weathergen-gpu') or (platform_machine == 'aarch64' and sys_platform == 'linux') or (platform_machine == 'x86_64' and sys_platform == 'linux') or (sys_platform != 'linux' and extra == 'extra-10-weathergen-cpu' and extra == 'extra-10-weathergen-gpu')" }, + { name = "packaging", marker = "(platform_machine != 'aarch64' and platform_machine != 'x86_64' and extra == 'extra-10-weathergen-cpu' and extra == 'extra-10-weathergen-gpu') or (platform_machine == 'aarch64' and sys_platform == 'linux') or (platform_machine == 'x86_64' and sys_platform == 'linux') or (sys_platform != 'linux' and extra == 'extra-10-weathergen-cpu' and extra == 'extra-10-weathergen-gpu')" }, + { name = "pillow", marker = "(platform_machine != 'aarch64' and platform_machine != 'x86_64' and extra == 'extra-10-weathergen-cpu' and extra == 'extra-10-weathergen-gpu') or (platform_machine == 'aarch64' and sys_platform == 'linux') or (platform_machine == 'x86_64' and sys_platform == 'linux') or (sys_platform != 'linux' and extra == 'extra-10-weathergen-cpu' and extra == 'extra-10-weathergen-gpu')" }, + { name = "pyparsing", marker = "(platform_machine != 'aarch64' and platform_machine != 'x86_64' and extra == 'extra-10-weathergen-cpu' and extra == 'extra-10-weathergen-gpu') or (platform_machine == 'aarch64' and sys_platform == 'linux') or (platform_machine == 'x86_64' and sys_platform == 'linux') or (sys_platform != 'linux' and extra == 'extra-10-weathergen-cpu' and extra == 'extra-10-weathergen-gpu')" }, + { name = "python-dateutil", marker = "(platform_machine != 'aarch64' and platform_machine != 'x86_64' and extra == 'extra-10-weathergen-cpu' and extra == 'extra-10-weathergen-gpu') or (platform_machine == 'aarch64' and sys_platform == 'linux') or (platform_machine == 'x86_64' and sys_platform == 'linux') or (sys_platform != 'linux' and extra == 'extra-10-weathergen-cpu' and extra == 'extra-10-weathergen-gpu')" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/26/91/d49359a21893183ed2a5b6c76bec40e0b1dcbf8ca148f864d134897cfc75/matplotlib-3.10.3.tar.gz", hash = "sha256:2f82d2c5bb7ae93aaaa4cd42aca65d76ce6376f83304fa3a630b569aca274df0", size = 34799811, upload-time = "2025-05-08T19:10:54.39Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/eb/43/6b80eb47d1071f234ef0c96ca370c2ca621f91c12045f1401b5c9b28a639/matplotlib-3.10.3-cp312-cp312-macosx_10_13_x86_64.whl", hash = "sha256:0ab1affc11d1f495ab9e6362b8174a25afc19c081ba5b0775ef00533a4236eea", size = 8179689, upload-time = "2025-05-08T19:10:07.602Z" }, + { url = "https://files.pythonhosted.org/packages/0f/70/d61a591958325c357204870b5e7b164f93f2a8cca1dc6ce940f563909a13/matplotlib-3.10.3-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:2a818d8bdcafa7ed2eed74487fdb071c09c1ae24152d403952adad11fa3c65b4", size = 8050466, upload-time = "2025-05-08T19:10:09.383Z" }, + { url = "https://files.pythonhosted.org/packages/e7/75/70c9d2306203148cc7902a961240c5927dd8728afedf35e6a77e105a2985/matplotlib-3.10.3-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:748ebc3470c253e770b17d8b0557f0aa85cf8c63fd52f1a61af5b27ec0b7ffee", size = 8456252, upload-time = "2025-05-08T19:10:11.958Z" }, + { url = "https://files.pythonhosted.org/packages/c4/91/ba0ae1ff4b3f30972ad01cd4a8029e70a0ec3b8ea5be04764b128b66f763/matplotlib-3.10.3-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:ed70453fd99733293ace1aec568255bc51c6361cb0da94fa5ebf0649fdb2150a", size = 8601321, upload-time = "2025-05-08T19:10:14.47Z" }, + { url = "https://files.pythonhosted.org/packages/d2/88/d636041eb54a84b889e11872d91f7cbf036b3b0e194a70fa064eb8b04f7a/matplotlib-3.10.3-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:dbed9917b44070e55640bd13419de83b4c918e52d97561544814ba463811cbc7", size = 9406972, upload-time = "2025-05-08T19:10:16.569Z" }, + { url = "https://files.pythonhosted.org/packages/b1/79/0d1c165eac44405a86478082e225fce87874f7198300bbebc55faaf6d28d/matplotlib-3.10.3-cp312-cp312-win_amd64.whl", hash = "sha256:cf37d8c6ef1a48829443e8ba5227b44236d7fcaf7647caa3178a4ff9f7a5be05", size = 8067954, upload-time = "2025-05-08T19:10:18.663Z" }, +] + +[[package]] +name = "matplotlib-inline" +version = "0.1.7" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "traitlets", marker = "(platform_machine != 'aarch64' and platform_machine != 'x86_64' and extra == 'extra-10-weathergen-cpu' and extra == 'extra-10-weathergen-gpu') or (platform_machine == 'aarch64' and sys_platform == 'linux') or (platform_machine == 'x86_64' and sys_platform == 'linux') or (sys_platform != 'linux' and extra == 'extra-10-weathergen-cpu' and extra == 'extra-10-weathergen-gpu')" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/99/5b/a36a337438a14116b16480db471ad061c36c3694df7c2084a0da7ba538b7/matplotlib_inline-0.1.7.tar.gz", hash = "sha256:8423b23ec666be3d16e16b60bdd8ac4e86e840ebd1dd11a30b9f117f2fa0ab90", size = 8159, upload-time = "2024-04-15T13:44:44.803Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/8f/8e/9ad090d3553c280a8060fbf6e24dc1c0c29704ee7d1c372f0c174aa59285/matplotlib_inline-0.1.7-py3-none-any.whl", hash = "sha256:df192d39a4ff8f21b1895d72e6a13f5fcc5099f00fa84384e0ea28c2cc0653ca", size = 9899, upload-time = "2024-04-15T13:44:43.265Z" }, +] + +[[package]] +name = "mdit-py-plugins" +version = "0.4.2" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "markdown-it-py", marker = "(platform_machine != 'aarch64' and platform_machine != 'x86_64' and extra == 'extra-10-weathergen-cpu' and extra == 'extra-10-weathergen-gpu') or (platform_machine == 'aarch64' and sys_platform == 'linux') or (platform_machine == 'x86_64' and sys_platform == 'linux') or (sys_platform != 'linux' and extra == 'extra-10-weathergen-cpu' and extra == 'extra-10-weathergen-gpu')" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/19/03/a2ecab526543b152300717cf232bb4bb8605b6edb946c845016fa9c9c9fd/mdit_py_plugins-0.4.2.tar.gz", hash = "sha256:5f2cd1fdb606ddf152d37ec30e46101a60512bc0e5fa1a7002c36647b09e26b5", size = 43542, upload-time = "2024-09-09T20:27:49.564Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/a7/f7/7782a043553ee469c1ff49cfa1cdace2d6bf99a1f333cf38676b3ddf30da/mdit_py_plugins-0.4.2-py3-none-any.whl", hash = "sha256:0c673c3f889399a33b95e88d2f0d111b4447bdfea7f237dab2d488f459835636", size = 55316, upload-time = "2024-09-09T20:27:48.397Z" }, +] + +[[package]] +name = "mdurl" +version = "0.1.2" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/d6/54/cfe61301667036ec958cb99bd3efefba235e65cdeb9c84d24a8293ba1d90/mdurl-0.1.2.tar.gz", hash = "sha256:bb413d29f5eea38f31dd4754dd7377d4465116fb207585f97bf925588687c1ba", size = 8729, upload-time = "2022-08-14T12:40:10.846Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/b3/38/89ba8ad64ae25be8de66a6d463314cf1eb366222074cfda9ee839c56a4b4/mdurl-0.1.2-py3-none-any.whl", hash = "sha256:84008a41e51615a49fc9966191ff91509e3c40b939176e643fd50a5c2196b8f8", size = 9979, upload-time = "2022-08-14T12:40:09.779Z" }, +] + +[[package]] +name = "mlflow-skinny" +version = "3.5.0" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "cachetools", marker = "(platform_machine != 'aarch64' and platform_machine != 'x86_64' and extra == 'extra-10-weathergen-cpu' and extra == 'extra-10-weathergen-gpu') or (platform_machine == 'aarch64' and sys_platform == 'linux') or (platform_machine == 'x86_64' and sys_platform == 'linux') or (sys_platform != 'linux' and extra == 'extra-10-weathergen-cpu' and extra == 'extra-10-weathergen-gpu')" }, + { name = "click", marker = "(platform_machine != 'aarch64' and platform_machine != 'x86_64' and extra == 'extra-10-weathergen-cpu' and extra == 'extra-10-weathergen-gpu') or (platform_machine == 'aarch64' and sys_platform == 'linux') or (platform_machine == 'x86_64' and sys_platform == 'linux') or (sys_platform != 'linux' and extra == 'extra-10-weathergen-cpu' and extra == 'extra-10-weathergen-gpu')" }, + { name = "cloudpickle", marker = "(platform_machine != 'aarch64' and platform_machine != 'x86_64' and extra == 'extra-10-weathergen-cpu' and extra == 'extra-10-weathergen-gpu') or (platform_machine == 'aarch64' and sys_platform == 'linux') or (platform_machine == 'x86_64' and sys_platform == 'linux') or (sys_platform != 'linux' and extra == 'extra-10-weathergen-cpu' and extra == 'extra-10-weathergen-gpu')" }, + { name = "databricks-sdk", marker = "(platform_machine != 'aarch64' and platform_machine != 'x86_64' and extra == 'extra-10-weathergen-cpu' and extra == 'extra-10-weathergen-gpu') or (platform_machine == 'aarch64' and sys_platform == 'linux') or (platform_machine == 'x86_64' and sys_platform == 'linux') or (sys_platform != 'linux' and extra == 'extra-10-weathergen-cpu' and extra == 'extra-10-weathergen-gpu')" }, + { name = "fastapi", marker = "(platform_machine != 'aarch64' and platform_machine != 'x86_64' and extra == 'extra-10-weathergen-cpu' and extra == 'extra-10-weathergen-gpu') or (platform_machine == 'aarch64' and sys_platform == 'linux') or (platform_machine == 'x86_64' and sys_platform == 'linux') or (sys_platform != 'linux' and extra == 'extra-10-weathergen-cpu' and extra == 'extra-10-weathergen-gpu')" }, + { name = "gitpython", marker = "(platform_machine != 'aarch64' and platform_machine != 'x86_64' and extra == 'extra-10-weathergen-cpu' and extra == 'extra-10-weathergen-gpu') or (platform_machine == 'aarch64' and sys_platform == 'linux') or (platform_machine == 'x86_64' and sys_platform == 'linux') or (sys_platform != 'linux' and extra == 'extra-10-weathergen-cpu' and extra == 'extra-10-weathergen-gpu')" }, + { name = "importlib-metadata", marker = "(platform_machine != 'aarch64' and platform_machine != 'x86_64' and extra == 'extra-10-weathergen-cpu' and extra == 'extra-10-weathergen-gpu') or (platform_machine == 'aarch64' and sys_platform == 'linux') or (platform_machine == 'x86_64' and sys_platform == 'linux') or (sys_platform != 'linux' and extra == 'extra-10-weathergen-cpu' and extra == 'extra-10-weathergen-gpu')" }, + { name = "opentelemetry-api", marker = "(platform_machine != 'aarch64' and platform_machine != 'x86_64' and extra == 'extra-10-weathergen-cpu' and extra == 'extra-10-weathergen-gpu') or (platform_machine == 'aarch64' and sys_platform == 'linux') or (platform_machine == 'x86_64' and sys_platform == 'linux') or (sys_platform != 'linux' and extra == 'extra-10-weathergen-cpu' and extra == 'extra-10-weathergen-gpu')" }, + { name = "opentelemetry-proto", marker = "(platform_machine != 'aarch64' and platform_machine != 'x86_64' and extra == 'extra-10-weathergen-cpu' and extra == 'extra-10-weathergen-gpu') or (platform_machine == 'aarch64' and sys_platform == 'linux') or (platform_machine == 'x86_64' and sys_platform == 'linux') or (sys_platform != 'linux' and extra == 'extra-10-weathergen-cpu' and extra == 'extra-10-weathergen-gpu')" }, + { name = "opentelemetry-sdk", marker = "(platform_machine != 'aarch64' and platform_machine != 'x86_64' and extra == 'extra-10-weathergen-cpu' and extra == 'extra-10-weathergen-gpu') or (platform_machine == 'aarch64' and sys_platform == 'linux') or (platform_machine == 'x86_64' and sys_platform == 'linux') or (sys_platform != 'linux' and extra == 'extra-10-weathergen-cpu' and extra == 'extra-10-weathergen-gpu')" }, + { name = "packaging", marker = "(platform_machine != 'aarch64' and platform_machine != 'x86_64' and extra == 'extra-10-weathergen-cpu' and extra == 'extra-10-weathergen-gpu') or (platform_machine == 'aarch64' and sys_platform == 'linux') or (platform_machine == 'x86_64' and sys_platform == 'linux') or (sys_platform != 'linux' and extra == 'extra-10-weathergen-cpu' and extra == 'extra-10-weathergen-gpu')" }, + { name = "protobuf", marker = "(platform_machine != 'aarch64' and platform_machine != 'x86_64' and extra == 'extra-10-weathergen-cpu' and extra == 'extra-10-weathergen-gpu') or (platform_machine == 'aarch64' and sys_platform == 'linux') or (platform_machine == 'x86_64' and sys_platform == 'linux') or (sys_platform != 'linux' and extra == 'extra-10-weathergen-cpu' and extra == 'extra-10-weathergen-gpu')" }, + { name = "pydantic", marker = "(platform_machine != 'aarch64' and platform_machine != 'x86_64' and extra == 'extra-10-weathergen-cpu' and extra == 'extra-10-weathergen-gpu') or (platform_machine == 'aarch64' and sys_platform == 'linux') or (platform_machine == 'x86_64' and sys_platform == 'linux') or (sys_platform != 'linux' and extra == 'extra-10-weathergen-cpu' and extra == 'extra-10-weathergen-gpu')" }, + { name = "python-dotenv", marker = "(platform_machine != 'aarch64' and platform_machine != 'x86_64' and extra == 'extra-10-weathergen-cpu' and extra == 'extra-10-weathergen-gpu') or (platform_machine == 'aarch64' and sys_platform == 'linux') or (platform_machine == 'x86_64' and sys_platform == 'linux') or (sys_platform != 'linux' and extra == 'extra-10-weathergen-cpu' and extra == 'extra-10-weathergen-gpu')" }, + { name = "pyyaml", marker = "(platform_machine != 'aarch64' and platform_machine != 'x86_64' and extra == 'extra-10-weathergen-cpu' and extra == 'extra-10-weathergen-gpu') or (platform_machine == 'aarch64' and sys_platform == 'linux') or (platform_machine == 'x86_64' and sys_platform == 'linux') or (sys_platform != 'linux' and extra == 'extra-10-weathergen-cpu' and extra == 'extra-10-weathergen-gpu')" }, + { name = "requests", marker = "(platform_machine != 'aarch64' and platform_machine != 'x86_64' and extra == 'extra-10-weathergen-cpu' and extra == 'extra-10-weathergen-gpu') or (platform_machine == 'aarch64' and sys_platform == 'linux') or (platform_machine == 'x86_64' and sys_platform == 'linux') or (sys_platform != 'linux' and extra == 'extra-10-weathergen-cpu' and extra == 'extra-10-weathergen-gpu')" }, + { name = "sqlparse", marker = "(platform_machine != 'aarch64' and platform_machine != 'x86_64' and extra == 'extra-10-weathergen-cpu' and extra == 'extra-10-weathergen-gpu') or (platform_machine == 'aarch64' and sys_platform == 'linux') or (platform_machine == 'x86_64' and sys_platform == 'linux') or (sys_platform != 'linux' and extra == 'extra-10-weathergen-cpu' and extra == 'extra-10-weathergen-gpu')" }, + { name = "typing-extensions", marker = "(platform_machine != 'aarch64' and platform_machine != 'x86_64' and extra == 'extra-10-weathergen-cpu' and extra == 'extra-10-weathergen-gpu') or (platform_machine == 'aarch64' and sys_platform == 'linux') or (platform_machine == 'x86_64' and sys_platform == 'linux') or (sys_platform != 'linux' and extra == 'extra-10-weathergen-cpu' and extra == 'extra-10-weathergen-gpu')" }, + { name = "uvicorn", marker = "(platform_machine != 'aarch64' and platform_machine != 'x86_64' and extra == 'extra-10-weathergen-cpu' and extra == 'extra-10-weathergen-gpu') or (platform_machine == 'aarch64' and sys_platform == 'linux') or (platform_machine == 'x86_64' and sys_platform == 'linux') or (sys_platform != 'linux' and extra == 'extra-10-weathergen-cpu' and extra == 'extra-10-weathergen-gpu')" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/d2/12/3143c5275531cc318146a1b36f0780991e899639551e5554d27573ba74be/mlflow_skinny-3.5.0.tar.gz", hash = "sha256:d9cf914ed6746a6097ef51d1a377a4c5c0f46aa174d3f89efbdc31feb2cf572b", size = 1925967, upload-time = "2025-10-16T14:04:13.777Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/9a/bc/1e0c324bdd4e49d386625e6d5259a1352d8b4a39dc4af36b9dd474536843/mlflow_skinny-3.5.0-py3-none-any.whl", hash = "sha256:496cb9bf4e0d5b96082407a923e34636ea748ab928d35c288d1f19ec5493705e", size = 2311609, upload-time = "2025-10-16T14:04:12.142Z" }, +] + +[[package]] +name = "mpmath" +version = "1.3.0" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/e0/47/dd32fa426cc72114383ac549964eecb20ecfd886d1e5ccf5340b55b02f57/mpmath-1.3.0.tar.gz", hash = "sha256:7a28eb2a9774d00c7bc92411c19a89209d5da7c4c9a9e227be8330a23a25b91f", size = 508106, upload-time = "2023-03-07T16:47:11.061Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/43/e3/7d92a15f894aa0c9c4b49b8ee9ac9850d6e63b03c9c32c0367a13ae62209/mpmath-1.3.0-py3-none-any.whl", hash = "sha256:a0b2b9fe80bbcd81a6647ff13108738cfb482d481d826cc0e02f5b35e5c88d2c", size = 536198, upload-time = "2023-03-07T16:47:09.197Z" }, +] + +[[package]] +name = "multiurl" +version = "0.3.5" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "python-dateutil", marker = "(platform_machine != 'aarch64' and platform_machine != 'x86_64' and extra == 'extra-10-weathergen-cpu' and extra == 'extra-10-weathergen-gpu') or (platform_machine == 'aarch64' and sys_platform == 'linux') or (platform_machine == 'x86_64' and sys_platform == 'linux') or (sys_platform != 'linux' and extra == 'extra-10-weathergen-cpu' and extra == 'extra-10-weathergen-gpu')" }, + { name = "pytz", marker = "(platform_machine != 'aarch64' and platform_machine != 'x86_64' and extra == 'extra-10-weathergen-cpu' and extra == 'extra-10-weathergen-gpu') or (platform_machine == 'aarch64' and sys_platform == 'linux') or (platform_machine == 'x86_64' and sys_platform == 'linux') or (sys_platform != 'linux' and extra == 'extra-10-weathergen-cpu' and extra == 'extra-10-weathergen-gpu')" }, + { name = "requests", marker = "(platform_machine != 'aarch64' and platform_machine != 'x86_64' and extra == 'extra-10-weathergen-cpu' and extra == 'extra-10-weathergen-gpu') or (platform_machine == 'aarch64' and sys_platform == 'linux') or (platform_machine == 'x86_64' and sys_platform == 'linux') or (sys_platform != 'linux' and extra == 'extra-10-weathergen-cpu' and extra == 'extra-10-weathergen-gpu')" }, + { name = "tqdm", marker = "(platform_machine != 'aarch64' and platform_machine != 'x86_64' and extra == 'extra-10-weathergen-cpu' and extra == 'extra-10-weathergen-gpu') or (platform_machine == 'aarch64' and sys_platform == 'linux') or (platform_machine == 'x86_64' and sys_platform == 'linux') or (sys_platform != 'linux' and extra == 'extra-10-weathergen-cpu' and extra == 'extra-10-weathergen-gpu')" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/7f/05/f6cb8e9506936c638550dda75e97c70535acf89eb147bfb8655e6d154256/multiurl-0.3.5.tar.gz", hash = "sha256:c2fb8b85227caa453fa0c9e711c5a83e3fd6d9a30b5010ce8a8a4e872d31211e", size = 18527, upload-time = "2025-03-20T11:43:01.426Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/c3/b4/4dd3f8f8bdb79bf65a1882fbf769a27e3ce27e9566faa0aeaa295ed755d7/multiurl-0.3.5-py3-none-any.whl", hash = "sha256:37b920c3116861198ec5b24080fed5344514006021eec969784dabc76fcf3d63", size = 21323, upload-time = "2025-03-20T11:43:00.195Z" }, +] + +[[package]] +name = "narwhals" +version = "1.48.1" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/9b/da/fe15ccd311ebb8fbbdacc447ba5888306c0b4a6253f628d60df351c36c7d/narwhals-1.48.1.tar.gz", hash = "sha256:b375cfdfc20b84b5ac0926f34c5c1373eb23ebea48d47bf75e282161cda63e34", size = 515882, upload-time = "2025-07-24T19:02:19.14Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/cd/cf/411b2083991c6906634910ea0c5e5ea0a01f7f14da4194b39d7ad054c187/narwhals-1.48.1-py3-none-any.whl", hash = "sha256:76e3b069cf20a2746d8e227686b959530e98e8018c594a04e5f4f6f77e0872d9", size = 377332, upload-time = "2025-07-24T19:02:17.548Z" }, +] + +[[package]] +name = "nbformat" +version = "5.10.4" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "fastjsonschema", marker = "(platform_machine != 'aarch64' and platform_machine != 'x86_64' and extra == 'extra-10-weathergen-cpu' and extra == 'extra-10-weathergen-gpu') or (platform_machine == 'aarch64' and sys_platform == 'linux') or (platform_machine == 'x86_64' and sys_platform == 'linux') or (sys_platform != 'linux' and extra == 'extra-10-weathergen-cpu' and extra == 'extra-10-weathergen-gpu')" }, + { name = "jsonschema", marker = "(platform_machine != 'aarch64' and platform_machine != 'x86_64' and extra == 'extra-10-weathergen-cpu' and extra == 'extra-10-weathergen-gpu') or (platform_machine == 'aarch64' and sys_platform == 'linux') or (platform_machine == 'x86_64' and sys_platform == 'linux') or (sys_platform != 'linux' and extra == 'extra-10-weathergen-cpu' and extra == 'extra-10-weathergen-gpu')" }, + { name = "jupyter-core", marker = "(platform_machine != 'aarch64' and platform_machine != 'x86_64' and extra == 'extra-10-weathergen-cpu' and extra == 'extra-10-weathergen-gpu') or (platform_machine == 'aarch64' and sys_platform == 'linux') or (platform_machine == 'x86_64' and sys_platform == 'linux') or (sys_platform != 'linux' and extra == 'extra-10-weathergen-cpu' and extra == 'extra-10-weathergen-gpu')" }, + { name = "traitlets", marker = "(platform_machine != 'aarch64' and platform_machine != 'x86_64' and extra == 'extra-10-weathergen-cpu' and extra == 'extra-10-weathergen-gpu') or (platform_machine == 'aarch64' and sys_platform == 'linux') or (platform_machine == 'x86_64' and sys_platform == 'linux') or (sys_platform != 'linux' and extra == 'extra-10-weathergen-cpu' and extra == 'extra-10-weathergen-gpu')" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/6d/fd/91545e604bc3dad7dca9ed03284086039b294c6b3d75c0d2fa45f9e9caf3/nbformat-5.10.4.tar.gz", hash = "sha256:322168b14f937a5d11362988ecac2a4952d3d8e3a2cbeb2319584631226d5b3a", size = 142749, upload-time = "2024-04-04T11:20:37.371Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/a9/82/0340caa499416c78e5d8f5f05947ae4bc3cba53c9f038ab6e9ed964e22f1/nbformat-5.10.4-py3-none-any.whl", hash = "sha256:3b48d6c8fbca4b299bf3982ea7db1af21580e4fec269ad087b9e81588891200b", size = 78454, upload-time = "2024-04-04T11:20:34.895Z" }, +] + +[[package]] +name = "nest-asyncio" +version = "1.6.0" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/83/f8/51569ac65d696c8ecbee95938f89d4abf00f47d58d48f6fbabfe8f0baefe/nest_asyncio-1.6.0.tar.gz", hash = "sha256:6f172d5449aca15afd6c646851f4e31e02c598d553a667e38cafa997cfec55fe", size = 7418, upload-time = "2024-01-21T14:25:19.227Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/a0/c4/c2971a3ba4c6103a3d10c4b0f24f461ddc027f0f09763220cf35ca1401b3/nest_asyncio-1.6.0-py3-none-any.whl", hash = "sha256:87af6efd6b5e897c81050477ef65c62e2b2f35d51703cae01aff2905b1852e1c", size = 5195, upload-time = "2024-01-21T14:25:17.223Z" }, +] + +[[package]] +name = "netcdf4" +version = "1.7.2" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "certifi", marker = "(platform_machine != 'aarch64' and platform_machine != 'x86_64' and extra == 'extra-10-weathergen-cpu' and extra == 'extra-10-weathergen-gpu') or (platform_machine == 'aarch64' and sys_platform == 'linux') or (platform_machine == 'x86_64' and sys_platform == 'linux') or (sys_platform != 'linux' and extra == 'extra-10-weathergen-cpu' and extra == 'extra-10-weathergen-gpu')" }, + { name = "cftime", marker = "(platform_machine != 'aarch64' and platform_machine != 'x86_64' and extra == 'extra-10-weathergen-cpu' and extra == 'extra-10-weathergen-gpu') or (platform_machine == 'aarch64' and sys_platform == 'linux') or (platform_machine == 'x86_64' and sys_platform == 'linux') or (sys_platform != 'linux' and extra == 'extra-10-weathergen-cpu' and extra == 'extra-10-weathergen-gpu')" }, + { name = "numpy", marker = "(platform_machine != 'aarch64' and platform_machine != 'x86_64' and extra == 'extra-10-weathergen-cpu' and extra == 'extra-10-weathergen-gpu') or (platform_machine == 'aarch64' and sys_platform == 'linux') or (platform_machine == 'x86_64' and sys_platform == 'linux') or (sys_platform != 'linux' and extra == 'extra-10-weathergen-cpu' and extra == 'extra-10-weathergen-gpu')" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/71/ed/4d27fcfa40ebfdad3d2088a3de7ee48dbff7f35163e815ec1870d2a7398c/netcdf4-1.7.2.tar.gz", hash = "sha256:a4c6375540b19989896136943abb6d44850ff6f1fa7d3f063253b1ad3f8b7fce", size = 835064, upload-time = "2024-10-22T19:01:25.521Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/52/7f/3a0f18a39efca0e093b54d634b66573c25ecab5c482d73138ae14aa55c6d/netCDF4-1.7.2-cp312-cp312-macosx_12_0_x86_64.whl", hash = "sha256:e73e3baa0b74afc414e53ff5095748fdbec7fb346eda351e567c23f2f0d247f1", size = 2952127, upload-time = "2024-10-22T19:00:50.613Z" }, + { url = "https://files.pythonhosted.org/packages/ed/c4/8aac0f8ca95a41bdf1364d34ff4e9bcc24494bfe69a1157301d884c2e392/netCDF4-1.7.2-cp312-cp312-macosx_14_0_arm64.whl", hash = "sha256:a51da09258b31776f474c1d47e484fc7214914cdc59edf4cee789ba632184591", size = 2460781, upload-time = "2024-10-22T19:00:52.383Z" }, + { url = "https://files.pythonhosted.org/packages/2d/1a/32b7427aaf62fed3d4e4456f874b25ce39373dbddf6cfde9edbcfc2417fc/netCDF4-1.7.2-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:cb95b11804fe051897d1f2044b05d82a1847bc2549631cdd2f655dde7de77a9c", size = 9377415, upload-time = "2024-10-22T19:00:54.412Z" }, + { url = "https://files.pythonhosted.org/packages/fd/bf/5e671495c8bdf6b628e091aa8980793579474a10e51bc6ba302a3af6a778/netCDF4-1.7.2-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:f9d8a848373723f41ef662590b4f5e1832227501c9fd4513e8ad8da58c269977", size = 9260579, upload-time = "2024-10-22T19:00:56.594Z" }, + { url = "https://files.pythonhosted.org/packages/d4/57/0a0bcdebcfaf72e96e7bcaa512f80ee096bf71945a3318d38253338e9c25/netCDF4-1.7.2-cp312-cp312-win_amd64.whl", hash = "sha256:568ea369e00b581302d77fc5fd0b8f78e520c7e08d0b5af5219ba51f3f1cd694", size = 6991523, upload-time = "2024-10-22T19:00:58.97Z" }, + { url = "https://files.pythonhosted.org/packages/84/0a/182bb4fe5639699ba39d558b553b8e6f04fbfea6cf78404c0f21ef149bf7/netcdf4-1.7.2-cp311-abi3-macosx_13_0_x86_64.whl", hash = "sha256:7e81c3c47f2772eab0b93fba8bb05b17b58dce17720e1bed25e9d76551deecd0", size = 2751391, upload-time = "2025-10-13T18:32:22.749Z" }, + { url = "https://files.pythonhosted.org/packages/2d/1f/54ac27c791360f7452ca27ed1cb2917946bbe1ea4337c590a5abcef6332d/netcdf4-1.7.2-cp311-abi3-macosx_14_0_arm64.whl", hash = "sha256:cb2791dba37fc98fd1ac4e236c97822909f54efbcdf7f1415c9777810e0a28f4", size = 2387513, upload-time = "2025-10-13T18:32:27.499Z" }, + { url = "https://files.pythonhosted.org/packages/5c/5e/9bf3008a9e45c08f4c9fedce4d6f722ef5d970f56a9c5eb375a200dd2b66/netcdf4-1.7.2-cp311-abi3-manylinux_2_27_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:bf11480f6b8a5b246818ffff6b4d90481e51f8b9555b41af0c372eb0aaf8b65f", size = 9621674, upload-time = "2025-10-13T18:32:29.193Z" }, + { url = "https://files.pythonhosted.org/packages/a1/75/46871e85f2bbfb1efe229623d25d7c9daa17e2e968d5235572b2c8bb53e8/netcdf4-1.7.2-cp311-abi3-manylinux_2_27_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:1ccc05328a8ff31921b539821791aeb20b054879f3fdf6d1d505bf6422824fec", size = 9453759, upload-time = "2025-10-13T18:32:31.136Z" }, + { url = "https://files.pythonhosted.org/packages/cd/10/c52f12297965938d9b9be666ea1f9d8340c2aea31d6909d90aa650847248/netcdf4-1.7.2-cp311-abi3-win_amd64.whl", hash = "sha256:999bfc4acebf400ed724d5e7329e2e768accc7ee1fa1d82d505da782f730301b", size = 7148514, upload-time = "2025-10-13T18:32:33.121Z" }, +] + +[[package]] +name = "networkx" +version = "3.5" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/6c/4f/ccdb8ad3a38e583f214547fd2f7ff1fc160c43a75af88e6aec213404b96a/networkx-3.5.tar.gz", hash = "sha256:d4c6f9cf81f52d69230866796b82afbccdec3db7ae4fbd1b65ea750feed50037", size = 2471065, upload-time = "2025-05-29T11:35:07.804Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/eb/8d/776adee7bbf76365fdd7f2552710282c79a4ead5d2a46408c9043a2b70ba/networkx-3.5-py3-none-any.whl", hash = "sha256:0030d386a9a06dee3565298b4a734b68589749a544acbb6c412dc9e2489ec6ec", size = 2034406, upload-time = "2025-05-29T11:35:04.961Z" }, +] + +[[package]] +name = "numcodecs" +version = "0.15.1" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "deprecated", marker = "(platform_machine != 'aarch64' and platform_machine != 'x86_64' and extra == 'extra-10-weathergen-cpu' and extra == 'extra-10-weathergen-gpu') or (platform_machine == 'aarch64' and sys_platform == 'linux') or (platform_machine == 'x86_64' and sys_platform == 'linux') or (sys_platform != 'linux' and extra == 'extra-10-weathergen-cpu' and extra == 'extra-10-weathergen-gpu')" }, + { name = "numpy", marker = "(platform_machine != 'aarch64' and platform_machine != 'x86_64' and extra == 'extra-10-weathergen-cpu' and extra == 'extra-10-weathergen-gpu') or (platform_machine == 'aarch64' and sys_platform == 'linux') or (platform_machine == 'x86_64' and sys_platform == 'linux') or (sys_platform != 'linux' and extra == 'extra-10-weathergen-cpu' and extra == 'extra-10-weathergen-gpu')" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/63/fc/bb532969eb8236984ba65e4f0079a7da885b8ac0ce1f0835decbb3938a62/numcodecs-0.15.1.tar.gz", hash = "sha256:eeed77e4d6636641a2cc605fbc6078c7a8f2cc40f3dfa2b3f61e52e6091b04ff", size = 6267275, upload-time = "2025-02-10T10:23:33.254Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/e7/7e/f12fc32d3beedc6a8f1ec69ea0ba72e93cb99c0350feed2cff5d04679bc3/numcodecs-0.15.1-cp312-cp312-macosx_10_13_x86_64.whl", hash = "sha256:b0a9d9cd29a0088220682dda4a9898321f7813ff7802be2bbb545f6e3d2f10ff", size = 1691889, upload-time = "2025-02-10T10:23:12.934Z" }, + { url = "https://files.pythonhosted.org/packages/81/38/88e40d40288b73c3b3a390ed5614a34b0661d00255bdd4cfb91c32101364/numcodecs-0.15.1-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:a34f0fe5e5f3b837bbedbeb98794a6d4a12eeeef8d4697b523905837900b5e1c", size = 1189149, upload-time = "2025-02-10T10:23:15.803Z" }, + { url = "https://files.pythonhosted.org/packages/28/7d/7527d9180bc76011d6163c848c9cf02cd28a623c2c66cf543e1e86de7c5e/numcodecs-0.15.1-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:c3a09e22140f2c691f7df26303ff8fa2dadcf26d7d0828398c0bc09b69e5efa3", size = 8879163, upload-time = "2025-02-10T10:23:18.582Z" }, + { url = "https://files.pythonhosted.org/packages/ab/bc/b6c3cde91c754860a3467a8c058dcf0b1a5ca14d82b1c5397c700cf8b1eb/numcodecs-0.15.1-cp312-cp312-win_amd64.whl", hash = "sha256:daed6066ffcf40082da847d318b5ab6123d69ceb433ba603cb87c323a541a8bc", size = 836785, upload-time = "2025-02-10T10:23:22.314Z" }, +] + +[[package]] +name = "numexpr" +version = "2.11.0" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "numpy", marker = "(platform_machine != 'aarch64' and platform_machine != 'x86_64' and extra == 'extra-10-weathergen-cpu' and extra == 'extra-10-weathergen-gpu') or (platform_machine == 'aarch64' and sys_platform == 'linux') or (platform_machine == 'x86_64' and sys_platform == 'linux') or (sys_platform != 'linux' and extra == 'extra-10-weathergen-cpu' and extra == 'extra-10-weathergen-gpu')" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/d2/8f/2cc977e91adbfbcdb6b49fdb9147e1d1c7566eb2c0c1e737e9a47020b5ca/numexpr-2.11.0.tar.gz", hash = "sha256:75b2c01a4eda2e7c357bc67a3f5c3dd76506c15b5fd4dc42845ef2e182181bad", size = 108960, upload-time = "2025-06-09T11:05:56.79Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/38/45/7a0e5a0b800d92e73825494ac695fa05a52c7fc7088d69a336880136b437/numexpr-2.11.0-cp312-cp312-macosx_10_13_x86_64.whl", hash = "sha256:4229060be866813122385c608bbd3ea48fe0b33e91f2756810d28c1cdbfc98f1", size = 147494, upload-time = "2025-06-09T11:05:17.015Z" }, + { url = "https://files.pythonhosted.org/packages/74/46/3a26b84e44f4739ec98de0ede4b95b4b8096f721e22d0e97517eeb02017e/numexpr-2.11.0-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:097aa8835d32d6ac52f2be543384019b4b134d1fb67998cbfc4271155edfe54a", size = 136832, upload-time = "2025-06-09T11:05:18.55Z" }, + { url = "https://files.pythonhosted.org/packages/75/05/e3076ff25d4a108b47640c169c0a64811748c43b63d9cc052ea56de1631e/numexpr-2.11.0-cp312-cp312-manylinux_2_27_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:7f082321c244ff5d0e252071fb2c4fe02063a45934144a1456a5370ca139bec2", size = 412618, upload-time = "2025-06-09T11:05:20.093Z" }, + { url = "https://files.pythonhosted.org/packages/70/e8/15e0e077a004db0edd530da96c60c948689c888c464ee5d14b82405ebd86/numexpr-2.11.0-cp312-cp312-manylinux_2_27_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:d7a19435ca3d7dd502b8d8dce643555eb1b6013989e3f7577857289f6db6be16", size = 403363, upload-time = "2025-06-09T11:05:21.217Z" }, + { url = "https://files.pythonhosted.org/packages/10/14/f22afb3a7ae41d03ba87f62d00fbcfb76389f9cc91b7a82593c39c509318/numexpr-2.11.0-cp312-cp312-win32.whl", hash = "sha256:f326218262c8d8537887cc4bbd613c8409d62f2cac799835c0360e0d9cefaa5c", size = 153307, upload-time = "2025-06-09T11:05:22.855Z" }, + { url = "https://files.pythonhosted.org/packages/18/70/abc585269424582b3cd6db261e33b2ec96b5d4971da3edb29fc9b62a8926/numexpr-2.11.0-cp312-cp312-win_amd64.whl", hash = "sha256:0a184e5930c77ab91dd9beee4df403b825cd9dfc4e9ba4670d31c9fcb4e2c08e", size = 146337, upload-time = "2025-06-09T11:05:23.976Z" }, +] + +[[package]] +name = "numpy" +version = "2.3.0" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/f3/db/8e12381333aea300890829a0a36bfa738cac95475d88982d538725143fd9/numpy-2.3.0.tar.gz", hash = "sha256:581f87f9e9e9db2cba2141400e160e9dd644ee248788d6f90636eeb8fd9260a6", size = 20382813, upload-time = "2025-06-07T14:54:32.608Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/89/59/9df493df81ac6f76e9f05cdbe013cdb0c9a37b434f6e594f5bd25e278908/numpy-2.3.0-cp312-cp312-macosx_10_13_x86_64.whl", hash = "sha256:389b85335838155a9076e9ad7f8fdba0827496ec2d2dc32ce69ce7898bde03ba", size = 20897025, upload-time = "2025-06-07T14:40:33.558Z" }, + { url = "https://files.pythonhosted.org/packages/2f/86/4ff04335901d6cf3a6bb9c748b0097546ae5af35e455ae9b962ebff4ecd7/numpy-2.3.0-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:9498f60cd6bb8238d8eaf468a3d5bb031d34cd12556af53510f05fcf581c1b7e", size = 14129882, upload-time = "2025-06-07T14:40:55.034Z" }, + { url = "https://files.pythonhosted.org/packages/71/8d/a942cd4f959de7f08a79ab0c7e6cecb7431d5403dce78959a726f0f57aa1/numpy-2.3.0-cp312-cp312-macosx_14_0_arm64.whl", hash = "sha256:622a65d40d8eb427d8e722fd410ac3ad4958002f109230bc714fa551044ebae2", size = 5110181, upload-time = "2025-06-07T14:41:04.4Z" }, + { url = "https://files.pythonhosted.org/packages/86/5d/45850982efc7b2c839c5626fb67fbbc520d5b0d7c1ba1ae3651f2f74c296/numpy-2.3.0-cp312-cp312-macosx_14_0_x86_64.whl", hash = "sha256:b9446d9d8505aadadb686d51d838f2b6688c9e85636a0c3abaeb55ed54756459", size = 6647581, upload-time = "2025-06-07T14:41:14.695Z" }, + { url = "https://files.pythonhosted.org/packages/1a/c0/c871d4a83f93b00373d3eebe4b01525eee8ef10b623a335ec262b58f4dc1/numpy-2.3.0-cp312-cp312-manylinux_2_28_aarch64.whl", hash = "sha256:50080245365d75137a2bf46151e975de63146ae6d79f7e6bd5c0e85c9931d06a", size = 14262317, upload-time = "2025-06-07T14:41:35.862Z" }, + { url = "https://files.pythonhosted.org/packages/b7/f6/bc47f5fa666d5ff4145254f9e618d56e6a4ef9b874654ca74c19113bb538/numpy-2.3.0-cp312-cp312-manylinux_2_28_x86_64.whl", hash = "sha256:c24bb4113c66936eeaa0dc1e47c74770453d34f46ee07ae4efd853a2ed1ad10a", size = 16633919, upload-time = "2025-06-07T14:42:00.622Z" }, + { url = "https://files.pythonhosted.org/packages/f5/b4/65f48009ca0c9b76df5f404fccdea5a985a1bb2e34e97f21a17d9ad1a4ba/numpy-2.3.0-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:4d8d294287fdf685281e671886c6dcdf0291a7c19db3e5cb4178d07ccf6ecc67", size = 15567651, upload-time = "2025-06-07T14:42:24.429Z" }, + { url = "https://files.pythonhosted.org/packages/f1/62/5367855a2018578e9334ed08252ef67cc302e53edc869666f71641cad40b/numpy-2.3.0-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:6295f81f093b7f5769d1728a6bd8bf7466de2adfa771ede944ce6711382b89dc", size = 18361723, upload-time = "2025-06-07T14:42:51.167Z" }, + { url = "https://files.pythonhosted.org/packages/d4/75/5baed8cd867eabee8aad1e74d7197d73971d6a3d40c821f1848b8fab8b84/numpy-2.3.0-cp312-cp312-win32.whl", hash = "sha256:e6648078bdd974ef5d15cecc31b0c410e2e24178a6e10bf511e0557eed0f2570", size = 6318285, upload-time = "2025-06-07T14:43:02.052Z" }, + { url = "https://files.pythonhosted.org/packages/bc/49/d5781eaa1a15acb3b3a3f49dc9e2ff18d92d0ce5c2976f4ab5c0a7360250/numpy-2.3.0-cp312-cp312-win_amd64.whl", hash = "sha256:0898c67a58cdaaf29994bc0e2c65230fd4de0ac40afaf1584ed0b02cd74c6fdd", size = 12732594, upload-time = "2025-06-07T14:43:21.071Z" }, + { url = "https://files.pythonhosted.org/packages/c2/1c/6d343e030815c7c97a1f9fbad00211b47717c7fe446834c224bd5311e6f1/numpy-2.3.0-cp312-cp312-win_arm64.whl", hash = "sha256:bd8df082b6c4695753ad6193018c05aac465d634834dca47a3ae06d4bb22d9ea", size = 9891498, upload-time = "2025-06-07T14:43:36.332Z" }, +] + +[[package]] +name = "nvidia-cublas-cu12" +version = "12.6.4.1" +source = { registry = "https://pypi.org/simple" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/af/eb/ff4b8c503fa1f1796679dce648854d58751982426e4e4b37d6fce49d259c/nvidia_cublas_cu12-12.6.4.1-py3-none-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:08ed2686e9875d01b58e3cb379c6896df8e76c75e0d4a7f7dace3d7b6d9ef8eb", size = 393138322, upload-time = "2024-11-20T17:40:25.65Z" }, + { url = "https://files.pythonhosted.org/packages/97/0d/f1f0cadbf69d5b9ef2e4f744c9466cb0a850741d08350736dfdb4aa89569/nvidia_cublas_cu12-12.6.4.1-py3-none-manylinux_2_27_aarch64.whl", hash = "sha256:235f728d6e2a409eddf1df58d5b0921cf80cfa9e72b9f2775ccb7b4a87984668", size = 390794615, upload-time = "2024-11-20T17:39:52.715Z" }, + { url = "https://files.pythonhosted.org/packages/84/f7/985e9bdbe3e0ac9298fcc8cfa51a392862a46a0ffaccbbd56939b62a9c83/nvidia_cublas_cu12-12.6.4.1-py3-none-win_amd64.whl", hash = "sha256:9e4fa264f4d8a4eb0cdbd34beadc029f453b3bafae02401e999cf3d5a5af75f8", size = 434535301, upload-time = "2024-11-20T17:50:41.681Z" }, +] + +[[package]] +name = "nvidia-cuda-cupti-cu12" +version = "12.6.80" +source = { registry = "https://pypi.org/simple" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/e6/8b/2f6230cb715646c3a9425636e513227ce5c93c4d65823a734f4bb86d43c3/nvidia_cuda_cupti_cu12-12.6.80-py3-none-manylinux2014_aarch64.manylinux_2_17_aarch64.whl", hash = "sha256:166ee35a3ff1587f2490364f90eeeb8da06cd867bd5b701bf7f9a02b78bc63fc", size = 8236764, upload-time = "2024-11-20T17:35:41.03Z" }, + { url = "https://files.pythonhosted.org/packages/25/0f/acb326ac8fd26e13c799e0b4f3b2751543e1834f04d62e729485872198d4/nvidia_cuda_cupti_cu12-12.6.80-py3-none-manylinux2014_aarch64.whl", hash = "sha256:358b4a1d35370353d52e12f0a7d1769fc01ff74a191689d3870b2123156184c4", size = 8236756, upload-time = "2024-10-01T16:57:45.507Z" }, + { url = "https://files.pythonhosted.org/packages/49/60/7b6497946d74bcf1de852a21824d63baad12cd417db4195fc1bfe59db953/nvidia_cuda_cupti_cu12-12.6.80-py3-none-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:6768bad6cab4f19e8292125e5f1ac8aa7d1718704012a0e3272a6f61c4bce132", size = 8917980, upload-time = "2024-11-20T17:36:04.019Z" }, + { url = "https://files.pythonhosted.org/packages/a5/24/120ee57b218d9952c379d1e026c4479c9ece9997a4fb46303611ee48f038/nvidia_cuda_cupti_cu12-12.6.80-py3-none-manylinux2014_x86_64.whl", hash = "sha256:a3eff6cdfcc6a4c35db968a06fcadb061cbc7d6dde548609a941ff8701b98b73", size = 8917972, upload-time = "2024-10-01T16:58:06.036Z" }, + { url = "https://files.pythonhosted.org/packages/1c/81/7796f096afaf726796b1b648f3bc80cafc61fe7f77f44a483c89e6c5ef34/nvidia_cuda_cupti_cu12-12.6.80-py3-none-win_amd64.whl", hash = "sha256:bbe6ae76e83ce5251b56e8c8e61a964f757175682bbad058b170b136266ab00a", size = 5724175, upload-time = "2024-10-01T17:09:47.955Z" }, +] + +[[package]] +name = "nvidia-cuda-nvrtc-cu12" +version = "12.6.77" +source = { registry = "https://pypi.org/simple" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/f4/2f/72df534873235983cc0a5371c3661bebef7c4682760c275590b972c7b0f9/nvidia_cuda_nvrtc_cu12-12.6.77-py3-none-manylinux2014_aarch64.whl", hash = "sha256:5847f1d6e5b757f1d2b3991a01082a44aad6f10ab3c5c0213fa3e25bddc25a13", size = 23162955, upload-time = "2024-10-01T16:59:50.922Z" }, + { url = "https://files.pythonhosted.org/packages/75/2e/46030320b5a80661e88039f59060d1790298b4718944a65a7f2aeda3d9e9/nvidia_cuda_nvrtc_cu12-12.6.77-py3-none-manylinux2014_x86_64.whl", hash = "sha256:35b0cc6ee3a9636d5409133e79273ce1f3fd087abb0532d2d2e8fff1fe9efc53", size = 23650380, upload-time = "2024-10-01T17:00:14.643Z" }, + { url = "https://files.pythonhosted.org/packages/f5/46/d3a1cdda8bb113c80f43a0a6f3a853356d487b830f3483f92d49ce87fa55/nvidia_cuda_nvrtc_cu12-12.6.77-py3-none-win_amd64.whl", hash = "sha256:f7007dbd914c56bd80ea31bc43e8e149da38f68158f423ba845fc3292684e45a", size = 39026742, upload-time = "2024-10-01T17:10:49.058Z" }, +] + +[[package]] +name = "nvidia-cuda-runtime-cu12" +version = "12.6.77" +source = { registry = "https://pypi.org/simple" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/8f/ea/590b2ac00d772a8abd1c387a92b46486d2679ca6622fd25c18ff76265663/nvidia_cuda_runtime_cu12-12.6.77-py3-none-manylinux2014_aarch64.manylinux_2_17_aarch64.whl", hash = "sha256:6116fad3e049e04791c0256a9778c16237837c08b27ed8c8401e2e45de8d60cd", size = 908052, upload-time = "2024-11-20T17:35:19.905Z" }, + { url = "https://files.pythonhosted.org/packages/b7/3d/159023799677126e20c8fd580cca09eeb28d5c5a624adc7f793b9aa8bbfa/nvidia_cuda_runtime_cu12-12.6.77-py3-none-manylinux2014_aarch64.whl", hash = "sha256:d461264ecb429c84c8879a7153499ddc7b19b5f8d84c204307491989a365588e", size = 908040, upload-time = "2024-10-01T16:57:22.221Z" }, + { url = "https://files.pythonhosted.org/packages/e1/23/e717c5ac26d26cf39a27fbc076240fad2e3b817e5889d671b67f4f9f49c5/nvidia_cuda_runtime_cu12-12.6.77-py3-none-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:ba3b56a4f896141e25e19ab287cd71e52a6a0f4b29d0d31609f60e3b4d5219b7", size = 897690, upload-time = "2024-11-20T17:35:30.697Z" }, + { url = "https://files.pythonhosted.org/packages/f0/62/65c05e161eeddbafeca24dc461f47de550d9fa8a7e04eb213e32b55cfd99/nvidia_cuda_runtime_cu12-12.6.77-py3-none-manylinux2014_x86_64.whl", hash = "sha256:a84d15d5e1da416dd4774cb42edf5e954a3e60cc945698dc1d5be02321c44dc8", size = 897678, upload-time = "2024-10-01T16:57:33.821Z" }, + { url = "https://files.pythonhosted.org/packages/fa/76/4c80fa138333cc975743fd0687a745fccb30d167f906f13c1c7f9a85e5ea/nvidia_cuda_runtime_cu12-12.6.77-py3-none-win_amd64.whl", hash = "sha256:86c58044c824bf3c173c49a2dbc7a6c8b53cb4e4dca50068be0bf64e9dab3f7f", size = 891773, upload-time = "2024-10-01T17:09:26.362Z" }, +] + +[[package]] +name = "nvidia-cudnn-cu12" +version = "9.5.1.17" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "nvidia-cublas-cu12", marker = "platform_machine == 'x86_64' and sys_platform == 'linux'" }, +] +wheels = [ + { url = "https://files.pythonhosted.org/packages/99/93/a201a12d3ec1caa8c6ac34c1c2f9eeb696b886f0c36ff23c638b46603bd0/nvidia_cudnn_cu12-9.5.1.17-py3-none-manylinux_2_28_aarch64.whl", hash = "sha256:9fd4584468533c61873e5fda8ca41bac3a38bcb2d12350830c69b0a96a7e4def", size = 570523509, upload-time = "2024-10-25T19:53:03.148Z" }, + { url = "https://files.pythonhosted.org/packages/2a/78/4535c9c7f859a64781e43c969a3a7e84c54634e319a996d43ef32ce46f83/nvidia_cudnn_cu12-9.5.1.17-py3-none-manylinux_2_28_x86_64.whl", hash = "sha256:30ac3869f6db17d170e0e556dd6cc5eee02647abc31ca856634d5a40f82c15b2", size = 570988386, upload-time = "2024-10-25T19:54:26.39Z" }, + { url = "https://files.pythonhosted.org/packages/b6/b2/3f60d15f037fa5419d9d7f788b100ef33ea913ae5315c87ca6d6fa606c35/nvidia_cudnn_cu12-9.5.1.17-py3-none-win_amd64.whl", hash = "sha256:d7af0f8a4f3b4b9dbb3122f2ef553b45694ed9c384d5a75bab197b8eefb79ab8", size = 565440743, upload-time = "2024-10-25T19:55:49.74Z" }, +] + +[[package]] +name = "nvidia-cufft-cu12" +version = "11.3.0.4" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "nvidia-nvjitlink-cu12", marker = "platform_machine == 'x86_64' and sys_platform == 'linux'" }, +] +wheels = [ + { url = "https://files.pythonhosted.org/packages/1f/37/c50d2b2f2c07e146776389e3080f4faf70bcc4fa6e19d65bb54ca174ebc3/nvidia_cufft_cu12-11.3.0.4-py3-none-manylinux2014_aarch64.manylinux_2_17_aarch64.whl", hash = "sha256:d16079550df460376455cba121db6564089176d9bac9e4f360493ca4741b22a6", size = 200164144, upload-time = "2024-11-20T17:40:58.288Z" }, + { url = "https://files.pythonhosted.org/packages/ce/f5/188566814b7339e893f8d210d3a5332352b1409815908dad6a363dcceac1/nvidia_cufft_cu12-11.3.0.4-py3-none-manylinux2014_aarch64.whl", hash = "sha256:8510990de9f96c803a051822618d42bf6cb8f069ff3f48d93a8486efdacb48fb", size = 200164135, upload-time = "2024-10-01T17:03:24.212Z" }, + { url = "https://files.pythonhosted.org/packages/8f/16/73727675941ab8e6ffd86ca3a4b7b47065edcca7a997920b831f8147c99d/nvidia_cufft_cu12-11.3.0.4-py3-none-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:ccba62eb9cef5559abd5e0d54ceed2d9934030f51163df018532142a8ec533e5", size = 200221632, upload-time = "2024-11-20T17:41:32.357Z" }, + { url = "https://files.pythonhosted.org/packages/60/de/99ec247a07ea40c969d904fc14f3a356b3e2a704121675b75c366b694ee1/nvidia_cufft_cu12-11.3.0.4-py3-none-manylinux2014_x86_64.whl", hash = "sha256:768160ac89f6f7b459bee747e8d175dbf53619cfe74b2a5636264163138013ca", size = 200221622, upload-time = "2024-10-01T17:03:58.79Z" }, + { url = "https://files.pythonhosted.org/packages/b4/38/36fd800cec8f6e89b7c1576edaaf8076e69ec631644cdbc1b5f2e2b5a9df/nvidia_cufft_cu12-11.3.0.4-py3-none-win_amd64.whl", hash = "sha256:6048ebddfb90d09d2707efb1fd78d4e3a77cb3ae4dc60e19aab6be0ece2ae464", size = 199356881, upload-time = "2024-10-01T17:13:01.861Z" }, +] + +[[package]] +name = "nvidia-curand-cu12" +version = "10.3.7.77" +source = { registry = "https://pypi.org/simple" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/42/ac/36543605358a355632f1a6faa3e2d5dfb91eab1e4bc7d552040e0383c335/nvidia_curand_cu12-10.3.7.77-py3-none-manylinux2014_aarch64.whl", hash = "sha256:6e82df077060ea28e37f48a3ec442a8f47690c7499bff392a5938614b56c98d8", size = 56289881, upload-time = "2024-10-01T17:04:18.981Z" }, + { url = "https://files.pythonhosted.org/packages/73/1b/44a01c4e70933637c93e6e1a8063d1e998b50213a6b65ac5a9169c47e98e/nvidia_curand_cu12-10.3.7.77-py3-none-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:a42cd1344297f70b9e39a1e4f467a4e1c10f1da54ff7a85c12197f6c652c8bdf", size = 56279010, upload-time = "2024-11-20T17:42:50.958Z" }, + { url = "https://files.pythonhosted.org/packages/4a/aa/2c7ff0b5ee02eaef890c0ce7d4f74bc30901871c5e45dee1ae6d0083cd80/nvidia_curand_cu12-10.3.7.77-py3-none-manylinux2014_x86_64.whl", hash = "sha256:99f1a32f1ac2bd134897fc7a203f779303261268a65762a623bf30cc9fe79117", size = 56279000, upload-time = "2024-10-01T17:04:45.274Z" }, + { url = "https://files.pythonhosted.org/packages/a6/02/5362a9396f23f7de1dd8a64369e87c85ffff8216fc8194ace0fa45ba27a5/nvidia_curand_cu12-10.3.7.77-py3-none-manylinux_2_27_aarch64.whl", hash = "sha256:7b2ed8e95595c3591d984ea3603dd66fe6ce6812b886d59049988a712ed06b6e", size = 56289882, upload-time = "2024-11-20T17:42:25.222Z" }, + { url = "https://files.pythonhosted.org/packages/a9/a8/0cd0cec757bd4b4b4ef150fca62ec064db7d08a291dced835a0be7d2c147/nvidia_curand_cu12-10.3.7.77-py3-none-win_amd64.whl", hash = "sha256:6d6d935ffba0f3d439b7cd968192ff068fafd9018dbf1b85b37261b13cfc9905", size = 55783873, upload-time = "2024-10-01T17:13:30.377Z" }, +] + +[[package]] +name = "nvidia-cusolver-cu12" +version = "11.7.1.2" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "nvidia-cublas-cu12", marker = "platform_machine == 'x86_64' and sys_platform == 'linux'" }, + { name = "nvidia-cusparse-cu12", marker = "platform_machine == 'x86_64' and sys_platform == 'linux'" }, + { name = "nvidia-nvjitlink-cu12", marker = "platform_machine == 'x86_64' and sys_platform == 'linux'" }, +] +wheels = [ + { url = "https://files.pythonhosted.org/packages/93/17/dbe1aa865e4fdc7b6d4d0dd308fdd5aaab60f939abfc0ea1954eac4fb113/nvidia_cusolver_cu12-11.7.1.2-py3-none-manylinux2014_aarch64.whl", hash = "sha256:0ce237ef60acde1efc457335a2ddadfd7610b892d94efee7b776c64bb1cac9e0", size = 157833628, upload-time = "2024-10-01T17:05:05.591Z" }, + { url = "https://files.pythonhosted.org/packages/f0/6e/c2cf12c9ff8b872e92b4a5740701e51ff17689c4d726fca91875b07f655d/nvidia_cusolver_cu12-11.7.1.2-py3-none-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:e9e49843a7707e42022babb9bcfa33c29857a93b88020c4e4434656a655b698c", size = 158229790, upload-time = "2024-11-20T17:43:43.211Z" }, + { url = "https://files.pythonhosted.org/packages/9f/81/baba53585da791d043c10084cf9553e074548408e04ae884cfe9193bd484/nvidia_cusolver_cu12-11.7.1.2-py3-none-manylinux2014_x86_64.whl", hash = "sha256:6cf28f17f64107a0c4d7802be5ff5537b2130bfc112f25d5a30df227058ca0e6", size = 158229780, upload-time = "2024-10-01T17:05:39.875Z" }, + { url = "https://files.pythonhosted.org/packages/7c/5f/07d0ba3b7f19be5a5ec32a8679fc9384cfd9fc6c869825e93be9f28d6690/nvidia_cusolver_cu12-11.7.1.2-py3-none-manylinux_2_27_aarch64.whl", hash = "sha256:dbbe4fc38ec1289c7e5230e16248365e375c3673c9c8bac5796e2e20db07f56e", size = 157833630, upload-time = "2024-11-20T17:43:16.77Z" }, + { url = "https://files.pythonhosted.org/packages/d4/53/fff50a0808df7113d77e3bbc7c2b7eaed6f57d5eb80fbe93ead2aea1e09a/nvidia_cusolver_cu12-11.7.1.2-py3-none-win_amd64.whl", hash = "sha256:6813f9d8073f555444a8705f3ab0296d3e1cb37a16d694c5fc8b862a0d8706d7", size = 149287877, upload-time = "2024-10-01T17:13:49.804Z" }, +] + +[[package]] +name = "nvidia-cusparse-cu12" +version = "12.5.4.2" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "nvidia-nvjitlink-cu12", marker = "platform_machine == 'x86_64' and sys_platform == 'linux'" }, +] +wheels = [ + { url = "https://files.pythonhosted.org/packages/eb/eb/6681efd0aa7df96b4f8067b3ce7246833dd36830bb4cec8896182773db7d/nvidia_cusparse_cu12-12.5.4.2-py3-none-manylinux2014_aarch64.manylinux_2_17_aarch64.whl", hash = "sha256:d25b62fb18751758fe3c93a4a08eff08effedfe4edf1c6bb5afd0890fe88f887", size = 216451147, upload-time = "2024-11-20T17:44:18.055Z" }, + { url = "https://files.pythonhosted.org/packages/d3/56/3af21e43014eb40134dea004e8d0f1ef19d9596a39e4d497d5a7de01669f/nvidia_cusparse_cu12-12.5.4.2-py3-none-manylinux2014_aarch64.whl", hash = "sha256:7aa32fa5470cf754f72d1116c7cbc300b4e638d3ae5304cfa4a638a5b87161b1", size = 216451135, upload-time = "2024-10-01T17:06:03.826Z" }, + { url = "https://files.pythonhosted.org/packages/06/1e/b8b7c2f4099a37b96af5c9bb158632ea9e5d9d27d7391d7eb8fc45236674/nvidia_cusparse_cu12-12.5.4.2-py3-none-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:7556d9eca156e18184b94947ade0fba5bb47d69cec46bf8660fd2c71a4b48b73", size = 216561367, upload-time = "2024-11-20T17:44:54.824Z" }, + { url = "https://files.pythonhosted.org/packages/43/ac/64c4316ba163e8217a99680c7605f779accffc6a4bcd0c778c12948d3707/nvidia_cusparse_cu12-12.5.4.2-py3-none-manylinux2014_x86_64.whl", hash = "sha256:23749a6571191a215cb74d1cdbff4a86e7b19f1200c071b3fcf844a5bea23a2f", size = 216561357, upload-time = "2024-10-01T17:06:29.861Z" }, + { url = "https://files.pythonhosted.org/packages/45/ef/876ad8e4260e1128e6d4aac803d9d51baf3791ebdb4a9b8d9b8db032b4b0/nvidia_cusparse_cu12-12.5.4.2-py3-none-win_amd64.whl", hash = "sha256:4acb8c08855a26d737398cba8fb6f8f5045d93f82612b4cfd84645a2332ccf20", size = 213712630, upload-time = "2024-10-01T17:14:23.779Z" }, +] + +[[package]] +name = "nvidia-cusparselt-cu12" +version = "0.6.3" +source = { registry = "https://pypi.org/simple" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/62/da/4de092c61c6dea1fc9c936e69308a02531d122e12f1f649825934ad651b5/nvidia_cusparselt_cu12-0.6.3-py3-none-manylinux2014_aarch64.whl", hash = "sha256:8371549623ba601a06322af2133c4a44350575f5a3108fb75f3ef20b822ad5f1", size = 156402859, upload-time = "2024-10-16T02:23:17.184Z" }, + { url = "https://files.pythonhosted.org/packages/3b/9a/72ef35b399b0e183bc2e8f6f558036922d453c4d8237dab26c666a04244b/nvidia_cusparselt_cu12-0.6.3-py3-none-manylinux2014_x86_64.whl", hash = "sha256:e5c8a26c36445dd2e6812f1177978a24e2d37cacce7e090f297a688d1ec44f46", size = 156785796, upload-time = "2024-10-15T21:29:17.709Z" }, + { url = "https://files.pythonhosted.org/packages/46/3e/9e1e394a02a06f694be2c97bbe47288bb7c90ea84c7e9cf88f7b28afe165/nvidia_cusparselt_cu12-0.6.3-py3-none-win_amd64.whl", hash = "sha256:3b325bcbd9b754ba43df5a311488fca11a6b5dc3d11df4d190c000cf1a0765c7", size = 155595972, upload-time = "2024-10-15T22:58:35.426Z" }, +] + +[[package]] +name = "nvidia-ml-py" +version = "12.575.51" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/d2/4d/6f017814ed5ac28e08e1b8a62e3a258957da27582c89b7f8f8b15ac3d2e7/nvidia_ml_py-12.575.51.tar.gz", hash = "sha256:6490e93fea99eb4e966327ae18c6eec6256194c921f23459c8767aee28c54581", size = 46597, upload-time = "2025-05-06T20:46:37.962Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/db/24/552ebea28f0570b9e65e62b50287a273804c9f997cc1c2dcd4e2d64b9e7d/nvidia_ml_py-12.575.51-py3-none-any.whl", hash = "sha256:eb8641800d98ce40a22f479873f34b482e214a7e80349c63be51c3919845446e", size = 47547, upload-time = "2025-05-06T20:46:36.457Z" }, +] + +[[package]] +name = "nvidia-nccl-cu12" +version = "2.21.5" +source = { registry = "https://pypi.org/simple" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/df/99/12cd266d6233f47d00daf3a72739872bdc10267d0383508b0b9c84a18bb6/nvidia_nccl_cu12-2.21.5-py3-none-manylinux2014_x86_64.whl", hash = "sha256:8579076d30a8c24988834445f8d633c697d42397e92ffc3f63fa26766d25e0a0", size = 188654414, upload-time = "2024-04-03T15:32:57.427Z" }, +] + +[[package]] +name = "nvidia-nvjitlink-cu12" +version = "12.6.85" +source = { registry = "https://pypi.org/simple" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/9d/d7/c5383e47c7e9bf1c99d5bd2a8c935af2b6d705ad831a7ec5c97db4d82f4f/nvidia_nvjitlink_cu12-12.6.85-py3-none-manylinux2010_x86_64.manylinux_2_12_x86_64.whl", hash = "sha256:eedc36df9e88b682efe4309aa16b5b4e78c2407eac59e8c10a6a47535164369a", size = 19744971, upload-time = "2024-11-20T17:46:53.366Z" }, + { url = "https://files.pythonhosted.org/packages/31/db/dc71113d441f208cdfe7ae10d4983884e13f464a6252450693365e166dcf/nvidia_nvjitlink_cu12-12.6.85-py3-none-manylinux2014_aarch64.manylinux_2_17_aarch64.whl", hash = "sha256:cf4eaa7d4b6b543ffd69d6abfb11efdeb2db48270d94dfd3a452c24150829e41", size = 19270338, upload-time = "2024-11-20T17:46:29.758Z" }, + { url = "https://files.pythonhosted.org/packages/89/76/93c1467b1387387440a4d25102d86b7794535449b689f8e2dc22c1c8ff7f/nvidia_nvjitlink_cu12-12.6.85-py3-none-win_amd64.whl", hash = "sha256:e61120e52ed675747825cdd16febc6a0730537451d867ee58bee3853b1b13d1c", size = 161908572, upload-time = "2024-11-20T17:52:40.124Z" }, +] + +[[package]] +name = "nvidia-nvtx-cu12" +version = "12.6.77" +source = { registry = "https://pypi.org/simple" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/b9/93/80f8a520375af9d7ee44571a6544653a176e53c2b8ccce85b97b83c2491b/nvidia_nvtx_cu12-12.6.77-py3-none-manylinux2014_aarch64.manylinux_2_17_aarch64.whl", hash = "sha256:f44f8d86bb7d5629988d61c8d3ae61dddb2015dee142740536bc7481b022fe4b", size = 90549, upload-time = "2024-11-20T17:38:17.387Z" }, + { url = "https://files.pythonhosted.org/packages/2b/53/36e2fd6c7068997169b49ffc8c12d5af5e5ff209df6e1a2c4d373b3a638f/nvidia_nvtx_cu12-12.6.77-py3-none-manylinux2014_aarch64.whl", hash = "sha256:adcaabb9d436c9761fca2b13959a2d237c5f9fd406c8e4b723c695409ff88059", size = 90539, upload-time = "2024-10-01T17:00:27.179Z" }, + { url = "https://files.pythonhosted.org/packages/56/9a/fff8376f8e3d084cd1530e1ef7b879bb7d6d265620c95c1b322725c694f4/nvidia_nvtx_cu12-12.6.77-py3-none-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:b90bed3df379fa79afbd21be8e04a0314336b8ae16768b58f2d34cb1d04cd7d2", size = 89276, upload-time = "2024-11-20T17:38:27.621Z" }, + { url = "https://files.pythonhosted.org/packages/9e/4e/0d0c945463719429b7bd21dece907ad0bde437a2ff12b9b12fee94722ab0/nvidia_nvtx_cu12-12.6.77-py3-none-manylinux2014_x86_64.whl", hash = "sha256:6574241a3ec5fdc9334353ab8c479fe75841dbe8f4532a8fc97ce63503330ba1", size = 89265, upload-time = "2024-10-01T17:00:38.172Z" }, + { url = "https://files.pythonhosted.org/packages/f7/cd/98a447919d4ed14d407ac82b14b0a0c9c1dbfe81099934b1fc3bfd1e6316/nvidia_nvtx_cu12-12.6.77-py3-none-win_amd64.whl", hash = "sha256:2fb11a4af04a5e6c84073e6404d26588a34afd35379f0855a99797897efa75c0", size = 56434, upload-time = "2024-10-01T17:11:13.124Z" }, +] + +[[package]] +name = "nvsmi" +version = "0.4.2" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/9d/13/c5da04d29f4e5f830a8558601b3e179163d0d94e0da06529d5a8e62eed9e/nvsmi-0.4.2.tar.gz", hash = "sha256:c1a391c7c4dadc6ec572909ff0372451d464ebadc144e5aa5fbbcc893dcb7bfa", size = 5248, upload-time = "2020-02-28T09:32:05.357Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/d2/d5/6ec6d6410b434463ba76900d2363a1f75c474f3442a4365557b2588fa14b/nvsmi-0.4.2-py3-none-any.whl", hash = "sha256:718894c24bdf7b58b8ecdfd282dceb06ef120a4b4e0b8517193cba876174945e", size = 5466, upload-time = "2020-02-28T09:32:03.88Z" }, +] + +[[package]] +name = "omegaconf" +version = "2.3.0" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "antlr4-python3-runtime", marker = "(platform_machine != 'aarch64' and platform_machine != 'x86_64' and extra == 'extra-10-weathergen-cpu' and extra == 'extra-10-weathergen-gpu') or (platform_machine == 'aarch64' and sys_platform == 'linux') or (platform_machine == 'x86_64' and sys_platform == 'linux') or (sys_platform != 'linux' and extra == 'extra-10-weathergen-cpu' and extra == 'extra-10-weathergen-gpu')" }, + { name = "pyyaml", marker = "(platform_machine != 'aarch64' and platform_machine != 'x86_64' and extra == 'extra-10-weathergen-cpu' and extra == 'extra-10-weathergen-gpu') or (platform_machine == 'aarch64' and sys_platform == 'linux') or (platform_machine == 'x86_64' and sys_platform == 'linux') or (sys_platform != 'linux' and extra == 'extra-10-weathergen-cpu' and extra == 'extra-10-weathergen-gpu')" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/09/48/6388f1bb9da707110532cb70ec4d2822858ddfb44f1cdf1233c20a80ea4b/omegaconf-2.3.0.tar.gz", hash = "sha256:d5d4b6d29955cc50ad50c46dc269bcd92c6e00f5f90d23ab5fee7bfca4ba4cc7", size = 3298120, upload-time = "2022-12-08T20:59:22.753Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/e3/94/1843518e420fa3ed6919835845df698c7e27e183cb997394e4a670973a65/omegaconf-2.3.0-py3-none-any.whl", hash = "sha256:7b4df175cdb08ba400f45cae3bdcae7ba8365db4d165fc65fd04b050ab63b46b", size = 79500, upload-time = "2022-12-08T20:59:19.686Z" }, +] + +[[package]] +name = "opentelemetry-api" +version = "1.38.0" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "importlib-metadata", marker = "(platform_machine != 'aarch64' and platform_machine != 'x86_64' and extra == 'extra-10-weathergen-cpu' and extra == 'extra-10-weathergen-gpu') or (platform_machine == 'aarch64' and sys_platform == 'linux') or (platform_machine == 'x86_64' and sys_platform == 'linux') or (sys_platform != 'linux' and extra == 'extra-10-weathergen-cpu' and extra == 'extra-10-weathergen-gpu')" }, + { name = "typing-extensions", marker = "(platform_machine != 'aarch64' and platform_machine != 'x86_64' and extra == 'extra-10-weathergen-cpu' and extra == 'extra-10-weathergen-gpu') or (platform_machine == 'aarch64' and sys_platform == 'linux') or (platform_machine == 'x86_64' and sys_platform == 'linux') or (sys_platform != 'linux' and extra == 'extra-10-weathergen-cpu' and extra == 'extra-10-weathergen-gpu')" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/08/d8/0f354c375628e048bd0570645b310797299754730079853095bf000fba69/opentelemetry_api-1.38.0.tar.gz", hash = "sha256:f4c193b5e8acb0912b06ac5b16321908dd0843d75049c091487322284a3eea12", size = 65242, upload-time = "2025-10-16T08:35:50.25Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/ae/a2/d86e01c28300bd41bab8f18afd613676e2bd63515417b77636fc1add426f/opentelemetry_api-1.38.0-py3-none-any.whl", hash = "sha256:2891b0197f47124454ab9f0cf58f3be33faca394457ac3e09daba13ff50aa582", size = 65947, upload-time = "2025-10-16T08:35:30.23Z" }, +] + +[[package]] +name = "opentelemetry-proto" +version = "1.38.0" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "protobuf", marker = "(platform_machine != 'aarch64' and platform_machine != 'x86_64' and extra == 'extra-10-weathergen-cpu' and extra == 'extra-10-weathergen-gpu') or (platform_machine == 'aarch64' and sys_platform == 'linux') or (platform_machine == 'x86_64' and sys_platform == 'linux') or (sys_platform != 'linux' and extra == 'extra-10-weathergen-cpu' and extra == 'extra-10-weathergen-gpu')" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/51/14/f0c4f0f6371b9cb7f9fa9ee8918bfd59ac7040c7791f1e6da32a1839780d/opentelemetry_proto-1.38.0.tar.gz", hash = "sha256:88b161e89d9d372ce723da289b7da74c3a8354a8e5359992be813942969ed468", size = 46152, upload-time = "2025-10-16T08:36:01.612Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/b6/6a/82b68b14efca5150b2632f3692d627afa76b77378c4999f2648979409528/opentelemetry_proto-1.38.0-py3-none-any.whl", hash = "sha256:b6ebe54d3217c42e45462e2a1ae28c3e2bf2ec5a5645236a490f55f45f1a0a18", size = 72535, upload-time = "2025-10-16T08:35:45.749Z" }, +] + +[[package]] +name = "opentelemetry-sdk" +version = "1.38.0" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "opentelemetry-api", marker = "(platform_machine != 'aarch64' and platform_machine != 'x86_64' and extra == 'extra-10-weathergen-cpu' and extra == 'extra-10-weathergen-gpu') or (platform_machine == 'aarch64' and sys_platform == 'linux') or (platform_machine == 'x86_64' and sys_platform == 'linux') or (sys_platform != 'linux' and extra == 'extra-10-weathergen-cpu' and extra == 'extra-10-weathergen-gpu')" }, + { name = "opentelemetry-semantic-conventions", marker = "(platform_machine != 'aarch64' and platform_machine != 'x86_64' and extra == 'extra-10-weathergen-cpu' and extra == 'extra-10-weathergen-gpu') or (platform_machine == 'aarch64' and sys_platform == 'linux') or (platform_machine == 'x86_64' and sys_platform == 'linux') or (sys_platform != 'linux' and extra == 'extra-10-weathergen-cpu' and extra == 'extra-10-weathergen-gpu')" }, + { name = "typing-extensions", marker = "(platform_machine != 'aarch64' and platform_machine != 'x86_64' and extra == 'extra-10-weathergen-cpu' and extra == 'extra-10-weathergen-gpu') or (platform_machine == 'aarch64' and sys_platform == 'linux') or (platform_machine == 'x86_64' and sys_platform == 'linux') or (sys_platform != 'linux' and extra == 'extra-10-weathergen-cpu' and extra == 'extra-10-weathergen-gpu')" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/85/cb/f0eee1445161faf4c9af3ba7b848cc22a50a3d3e2515051ad8628c35ff80/opentelemetry_sdk-1.38.0.tar.gz", hash = "sha256:93df5d4d871ed09cb4272305be4d996236eedb232253e3ab864c8620f051cebe", size = 171942, upload-time = "2025-10-16T08:36:02.257Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/2f/2e/e93777a95d7d9c40d270a371392b6d6f1ff170c2a3cb32d6176741b5b723/opentelemetry_sdk-1.38.0-py3-none-any.whl", hash = "sha256:1c66af6564ecc1553d72d811a01df063ff097cdc82ce188da9951f93b8d10f6b", size = 132349, upload-time = "2025-10-16T08:35:46.995Z" }, +] + +[[package]] +name = "opentelemetry-semantic-conventions" +version = "0.59b0" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "opentelemetry-api", marker = "(platform_machine != 'aarch64' and platform_machine != 'x86_64' and extra == 'extra-10-weathergen-cpu' and extra == 'extra-10-weathergen-gpu') or (platform_machine == 'aarch64' and sys_platform == 'linux') or (platform_machine == 'x86_64' and sys_platform == 'linux') or (sys_platform != 'linux' and extra == 'extra-10-weathergen-cpu' and extra == 'extra-10-weathergen-gpu')" }, + { name = "typing-extensions", marker = "(platform_machine != 'aarch64' and platform_machine != 'x86_64' and extra == 'extra-10-weathergen-cpu' and extra == 'extra-10-weathergen-gpu') or (platform_machine == 'aarch64' and sys_platform == 'linux') or (platform_machine == 'x86_64' and sys_platform == 'linux') or (sys_platform != 'linux' and extra == 'extra-10-weathergen-cpu' and extra == 'extra-10-weathergen-gpu')" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/40/bc/8b9ad3802cd8ac6583a4eb7de7e5d7db004e89cb7efe7008f9c8a537ee75/opentelemetry_semantic_conventions-0.59b0.tar.gz", hash = "sha256:7a6db3f30d70202d5bf9fa4b69bc866ca6a30437287de6c510fb594878aed6b0", size = 129861, upload-time = "2025-10-16T08:36:03.346Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/24/7d/c88d7b15ba8fe5c6b8f93be50fc11795e9fc05386c44afaf6b76fe191f9b/opentelemetry_semantic_conventions-0.59b0-py3-none-any.whl", hash = "sha256:35d3b8833ef97d614136e253c1da9342b4c3c083bbaf29ce31d572a1c3825eed", size = 207954, upload-time = "2025-10-16T08:35:48.054Z" }, +] + +[[package]] +name = "packaging" +version = "25.0" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/a1/d4/1fc4078c65507b51b96ca8f8c3ba19e6a61c8253c72794544580a7b6c24d/packaging-25.0.tar.gz", hash = "sha256:d443872c98d677bf60f6a1f2f8c1cb748e8fe762d2bf9d3148b5599295b0fc4f", size = 165727, upload-time = "2025-04-19T11:48:59.673Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/20/12/38679034af332785aac8774540895e234f4d07f7545804097de4b666afd8/packaging-25.0-py3-none-any.whl", hash = "sha256:29572ef2b1f17581046b3a2227d5c611fb25ec70ca1ba8554b24b0e69331a484", size = 66469, upload-time = "2025-04-19T11:48:57.875Z" }, +] + +[[package]] +name = "pandas" +version = "2.3.0" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "numpy", marker = "(platform_machine != 'aarch64' and platform_machine != 'x86_64' and extra == 'extra-10-weathergen-cpu' and extra == 'extra-10-weathergen-gpu') or (platform_machine == 'aarch64' and sys_platform == 'linux') or (platform_machine == 'x86_64' and sys_platform == 'linux') or (sys_platform != 'linux' and extra == 'extra-10-weathergen-cpu' and extra == 'extra-10-weathergen-gpu')" }, + { name = "python-dateutil", marker = "(platform_machine != 'aarch64' and platform_machine != 'x86_64' and extra == 'extra-10-weathergen-cpu' and extra == 'extra-10-weathergen-gpu') or (platform_machine == 'aarch64' and sys_platform == 'linux') or (platform_machine == 'x86_64' and sys_platform == 'linux') or (sys_platform != 'linux' and extra == 'extra-10-weathergen-cpu' and extra == 'extra-10-weathergen-gpu')" }, + { name = "pytz", marker = "(platform_machine != 'aarch64' and platform_machine != 'x86_64' and extra == 'extra-10-weathergen-cpu' and extra == 'extra-10-weathergen-gpu') or (platform_machine == 'aarch64' and sys_platform == 'linux') or (platform_machine == 'x86_64' and sys_platform == 'linux') or (sys_platform != 'linux' and extra == 'extra-10-weathergen-cpu' and extra == 'extra-10-weathergen-gpu')" }, + { name = "tzdata", marker = "(platform_machine != 'aarch64' and platform_machine != 'x86_64' and extra == 'extra-10-weathergen-cpu' and extra == 'extra-10-weathergen-gpu') or (platform_machine == 'aarch64' and sys_platform == 'linux') or (platform_machine == 'x86_64' and sys_platform == 'linux') or (sys_platform != 'linux' and extra == 'extra-10-weathergen-cpu' and extra == 'extra-10-weathergen-gpu')" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/72/51/48f713c4c728d7c55ef7444ba5ea027c26998d96d1a40953b346438602fc/pandas-2.3.0.tar.gz", hash = "sha256:34600ab34ebf1131a7613a260a61dbe8b62c188ec0ea4c296da7c9a06b004133", size = 4484490, upload-time = "2025-06-05T03:27:54.133Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/94/46/24192607058dd607dbfacdd060a2370f6afb19c2ccb617406469b9aeb8e7/pandas-2.3.0-cp312-cp312-macosx_10_13_x86_64.whl", hash = "sha256:2eb4728a18dcd2908c7fccf74a982e241b467d178724545a48d0caf534b38ebf", size = 11573865, upload-time = "2025-06-05T03:26:46.774Z" }, + { url = "https://files.pythonhosted.org/packages/9f/cc/ae8ea3b800757a70c9fdccc68b67dc0280a6e814efcf74e4211fd5dea1ca/pandas-2.3.0-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:b9d8c3187be7479ea5c3d30c32a5d73d62a621166675063b2edd21bc47614027", size = 10702154, upload-time = "2025-06-05T16:50:14.439Z" }, + { url = "https://files.pythonhosted.org/packages/d8/ba/a7883d7aab3d24c6540a2768f679e7414582cc389876d469b40ec749d78b/pandas-2.3.0-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:9ff730713d4c4f2f1c860e36c005c7cefc1c7c80c21c0688fd605aa43c9fcf09", size = 11262180, upload-time = "2025-06-05T16:50:17.453Z" }, + { url = "https://files.pythonhosted.org/packages/01/a5/931fc3ad333d9d87b10107d948d757d67ebcfc33b1988d5faccc39c6845c/pandas-2.3.0-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:ba24af48643b12ffe49b27065d3babd52702d95ab70f50e1b34f71ca703e2c0d", size = 11991493, upload-time = "2025-06-05T03:26:51.813Z" }, + { url = "https://files.pythonhosted.org/packages/d7/bf/0213986830a92d44d55153c1d69b509431a972eb73f204242988c4e66e86/pandas-2.3.0-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:404d681c698e3c8a40a61d0cd9412cc7364ab9a9cc6e144ae2992e11a2e77a20", size = 12470733, upload-time = "2025-06-06T00:00:18.651Z" }, + { url = "https://files.pythonhosted.org/packages/a4/0e/21eb48a3a34a7d4bac982afc2c4eb5ab09f2d988bdf29d92ba9ae8e90a79/pandas-2.3.0-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:6021910b086b3ca756755e86ddc64e0ddafd5e58e076c72cb1585162e5ad259b", size = 13212406, upload-time = "2025-06-05T03:26:55.992Z" }, + { url = "https://files.pythonhosted.org/packages/1f/d9/74017c4eec7a28892d8d6e31ae9de3baef71f5a5286e74e6b7aad7f8c837/pandas-2.3.0-cp312-cp312-win_amd64.whl", hash = "sha256:094e271a15b579650ebf4c5155c05dcd2a14fd4fdd72cf4854b2f7ad31ea30be", size = 10976199, upload-time = "2025-06-05T03:26:59.594Z" }, +] + +[[package]] +name = "panel" +version = "1.7.5" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "bleach", marker = "(platform_machine != 'aarch64' and platform_machine != 'x86_64' and extra == 'extra-10-weathergen-cpu' and extra == 'extra-10-weathergen-gpu') or (platform_machine == 'aarch64' and sys_platform == 'linux') or (platform_machine == 'x86_64' and sys_platform == 'linux') or (sys_platform != 'linux' and extra == 'extra-10-weathergen-cpu' and extra == 'extra-10-weathergen-gpu')" }, + { name = "bokeh", marker = "(platform_machine != 'aarch64' and platform_machine != 'x86_64' and extra == 'extra-10-weathergen-cpu' and extra == 'extra-10-weathergen-gpu') or (platform_machine == 'aarch64' and sys_platform == 'linux') or (platform_machine == 'x86_64' and sys_platform == 'linux') or (sys_platform != 'linux' and extra == 'extra-10-weathergen-cpu' and extra == 'extra-10-weathergen-gpu')" }, + { name = "linkify-it-py", marker = "(platform_machine != 'aarch64' and platform_machine != 'x86_64' and extra == 'extra-10-weathergen-cpu' and extra == 'extra-10-weathergen-gpu') or (platform_machine == 'aarch64' and sys_platform == 'linux') or (platform_machine == 'x86_64' and sys_platform == 'linux') or (sys_platform != 'linux' and extra == 'extra-10-weathergen-cpu' and extra == 'extra-10-weathergen-gpu')" }, + { name = "markdown", marker = "(platform_machine != 'aarch64' and platform_machine != 'x86_64' and extra == 'extra-10-weathergen-cpu' and extra == 'extra-10-weathergen-gpu') or (platform_machine == 'aarch64' and sys_platform == 'linux') or (platform_machine == 'x86_64' and sys_platform == 'linux') or (sys_platform != 'linux' and extra == 'extra-10-weathergen-cpu' and extra == 'extra-10-weathergen-gpu')" }, + { name = "markdown-it-py", marker = "(platform_machine != 'aarch64' and platform_machine != 'x86_64' and extra == 'extra-10-weathergen-cpu' and extra == 'extra-10-weathergen-gpu') or (platform_machine == 'aarch64' and sys_platform == 'linux') or (platform_machine == 'x86_64' and sys_platform == 'linux') or (sys_platform != 'linux' and extra == 'extra-10-weathergen-cpu' and extra == 'extra-10-weathergen-gpu')" }, + { name = "mdit-py-plugins", marker = "(platform_machine != 'aarch64' and platform_machine != 'x86_64' and extra == 'extra-10-weathergen-cpu' and extra == 'extra-10-weathergen-gpu') or (platform_machine == 'aarch64' and sys_platform == 'linux') or (platform_machine == 'x86_64' and sys_platform == 'linux') or (sys_platform != 'linux' and extra == 'extra-10-weathergen-cpu' and extra == 'extra-10-weathergen-gpu')" }, + { name = "packaging", marker = "(platform_machine != 'aarch64' and platform_machine != 'x86_64' and extra == 'extra-10-weathergen-cpu' and extra == 'extra-10-weathergen-gpu') or (platform_machine == 'aarch64' and sys_platform == 'linux') or (platform_machine == 'x86_64' and sys_platform == 'linux') or (sys_platform != 'linux' and extra == 'extra-10-weathergen-cpu' and extra == 'extra-10-weathergen-gpu')" }, + { name = "pandas", marker = "(platform_machine != 'aarch64' and platform_machine != 'x86_64' and extra == 'extra-10-weathergen-cpu' and extra == 'extra-10-weathergen-gpu') or (platform_machine == 'aarch64' and sys_platform == 'linux') or (platform_machine == 'x86_64' and sys_platform == 'linux') or (sys_platform != 'linux' and extra == 'extra-10-weathergen-cpu' and extra == 'extra-10-weathergen-gpu')" }, + { name = "param", marker = "(platform_machine != 'aarch64' and platform_machine != 'x86_64' and extra == 'extra-10-weathergen-cpu' and extra == 'extra-10-weathergen-gpu') or (platform_machine == 'aarch64' and sys_platform == 'linux') or (platform_machine == 'x86_64' and sys_platform == 'linux') or (sys_platform != 'linux' and extra == 'extra-10-weathergen-cpu' and extra == 'extra-10-weathergen-gpu')" }, + { name = "pyviz-comms", marker = "(platform_machine != 'aarch64' and platform_machine != 'x86_64' and extra == 'extra-10-weathergen-cpu' and extra == 'extra-10-weathergen-gpu') or (platform_machine == 'aarch64' and sys_platform == 'linux') or (platform_machine == 'x86_64' and sys_platform == 'linux') or (sys_platform != 'linux' and extra == 'extra-10-weathergen-cpu' and extra == 'extra-10-weathergen-gpu')" }, + { name = "requests", marker = "(platform_machine != 'aarch64' and platform_machine != 'x86_64' and extra == 'extra-10-weathergen-cpu' and extra == 'extra-10-weathergen-gpu') or (platform_machine == 'aarch64' and sys_platform == 'linux') or (platform_machine == 'x86_64' and sys_platform == 'linux') or (sys_platform != 'linux' and extra == 'extra-10-weathergen-cpu' and extra == 'extra-10-weathergen-gpu')" }, + { name = "tqdm", marker = "(platform_machine != 'aarch64' and platform_machine != 'x86_64' and extra == 'extra-10-weathergen-cpu' and extra == 'extra-10-weathergen-gpu') or (platform_machine == 'aarch64' and sys_platform == 'linux') or (platform_machine == 'x86_64' and sys_platform == 'linux') or (sys_platform != 'linux' and extra == 'extra-10-weathergen-cpu' and extra == 'extra-10-weathergen-gpu')" }, + { name = "typing-extensions", marker = "(platform_machine != 'aarch64' and platform_machine != 'x86_64' and extra == 'extra-10-weathergen-cpu' and extra == 'extra-10-weathergen-gpu') or (platform_machine == 'aarch64' and sys_platform == 'linux') or (platform_machine == 'x86_64' and sys_platform == 'linux') or (sys_platform != 'linux' and extra == 'extra-10-weathergen-cpu' and extra == 'extra-10-weathergen-gpu')" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/68/c9/3d09242515d50b40738b3003561dfb687619407d2f4c3c6dec6502e74be6/panel-1.7.5.tar.gz", hash = "sha256:8bf5041174593fdb0e8c46bef3ade334ae7e97ef64f52c8955a1d7c62b5db18d", size = 31523412, upload-time = "2025-07-22T12:53:17.738Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/f0/12/2d9700ddd550962e099d15ad94e6f7249bdf4402445b0004bcea49a00474/panel-1.7.5-py3-none-any.whl", hash = "sha256:1c3b4a335d56d5aa0cf5d6e1c3684a297e24a62cf99345c5e9eb8552837b97c3", size = 29522301, upload-time = "2025-07-22T12:53:13.149Z" }, +] + +[[package]] +name = "param" +version = "2.2.1" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/97/9c/69a576ffb9da36072ffc1f7ef7afaad88366d30dcb327caeb92c8b6cc4ee/param-2.2.1.tar.gz", hash = "sha256:ba1f7cec6455ea8ad96f641f4082759bf1057dcbe629aa79d956b25973252422", size = 176980, upload-time = "2025-06-11T15:10:26.683Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/6a/57/2b46b199482bbaaade2f978164577cf7c2fdc2782a7caf29fabd5265a84f/param-2.2.1-py3-none-any.whl", hash = "sha256:e3a4ca7f3d7610615129a55dbde2e90eb67d11cef70936487b0a59717dba0bdc", size = 119047, upload-time = "2025-06-11T15:10:25.136Z" }, +] + +[[package]] +name = "parso" +version = "0.8.4" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/66/94/68e2e17afaa9169cf6412ab0f28623903be73d1b32e208d9e8e541bb086d/parso-0.8.4.tar.gz", hash = "sha256:eb3a7b58240fb99099a345571deecc0f9540ea5f4dd2fe14c2a99d6b281ab92d", size = 400609, upload-time = "2024-04-05T09:43:55.897Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/c6/ac/dac4a63f978e4dcb3c6d3a78c4d8e0192a113d288502a1216950c41b1027/parso-0.8.4-py2.py3-none-any.whl", hash = "sha256:a418670a20291dacd2dddc80c377c5c3791378ee1e8d12bffc35420643d43f18", size = 103650, upload-time = "2024-04-05T09:43:53.299Z" }, +] + +[[package]] +name = "partd" +version = "1.4.2" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "locket", marker = "(platform_machine != 'aarch64' and platform_machine != 'x86_64' and extra == 'extra-10-weathergen-cpu' and extra == 'extra-10-weathergen-gpu') or (platform_machine == 'aarch64' and sys_platform == 'linux') or (platform_machine == 'x86_64' and sys_platform == 'linux') or (sys_platform != 'linux' and extra == 'extra-10-weathergen-cpu' and extra == 'extra-10-weathergen-gpu')" }, + { name = "toolz", marker = "(platform_machine != 'aarch64' and platform_machine != 'x86_64' and extra == 'extra-10-weathergen-cpu' and extra == 'extra-10-weathergen-gpu') or (platform_machine == 'aarch64' and sys_platform == 'linux') or (platform_machine == 'x86_64' and sys_platform == 'linux') or (sys_platform != 'linux' and extra == 'extra-10-weathergen-cpu' and extra == 'extra-10-weathergen-gpu')" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/b2/3a/3f06f34820a31257ddcabdfafc2672c5816be79c7e353b02c1f318daa7d4/partd-1.4.2.tar.gz", hash = "sha256:d022c33afbdc8405c226621b015e8067888173d85f7f5ecebb3cafed9a20f02c", size = 21029, upload-time = "2024-05-06T19:51:41.945Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/71/e7/40fb618334dcdf7c5a316c0e7343c5cd82d3d866edc100d98e29bc945ecd/partd-1.4.2-py3-none-any.whl", hash = "sha256:978e4ac767ec4ba5b86c6eaa52e5a2a3bc748a2ca839e8cc798f1cc6ce6efb0f", size = 18905, upload-time = "2024-05-06T19:51:39.271Z" }, +] + +[[package]] +name = "pathspec" +version = "0.12.1" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/ca/bc/f35b8446f4531a7cb215605d100cd88b7ac6f44ab3fc94870c120ab3adbf/pathspec-0.12.1.tar.gz", hash = "sha256:a482d51503a1ab33b1c67a6c3813a26953dbdc71c31dacaef9a838c4e29f5712", size = 51043, upload-time = "2023-12-10T22:30:45Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/cc/20/ff623b09d963f88bfde16306a54e12ee5ea43e9b597108672ff3a408aad6/pathspec-0.12.1-py3-none-any.whl", hash = "sha256:a0d503e138a4c123b27490a4f7beda6a01c6f288df0e4a8b79c7eb0dc7b4cc08", size = 31191, upload-time = "2023-12-10T22:30:43.14Z" }, +] + +[[package]] +name = "patsy" +version = "1.0.1" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "numpy", marker = "(platform_machine != 'aarch64' and platform_machine != 'x86_64' and extra == 'extra-10-weathergen-cpu' and extra == 'extra-10-weathergen-gpu') or (platform_machine == 'aarch64' and sys_platform == 'linux') or (platform_machine == 'x86_64' and sys_platform == 'linux') or (sys_platform != 'linux' and extra == 'extra-10-weathergen-cpu' and extra == 'extra-10-weathergen-gpu')" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/d1/81/74f6a65b848ffd16c18f920620ce999fe45fe27f01ab3911260ce4ed85e4/patsy-1.0.1.tar.gz", hash = "sha256:e786a9391eec818c054e359b737bbce692f051aee4c661f4141cc88fb459c0c4", size = 396010, upload-time = "2024-11-12T14:10:54.642Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/87/2b/b50d3d08ea0fc419c183a84210571eba005328efa62b6b98bc28e9ead32a/patsy-1.0.1-py2.py3-none-any.whl", hash = "sha256:751fb38f9e97e62312e921a1954b81e1bb2bcda4f5eeabaf94db251ee791509c", size = 232923, upload-time = "2024-11-12T14:10:52.85Z" }, +] + +[[package]] +name = "pdbpp" +version = "0.11.7" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "fancycompleter", marker = "(platform_machine != 'aarch64' and platform_machine != 'x86_64' and extra == 'extra-10-weathergen-cpu' and extra == 'extra-10-weathergen-gpu') or (platform_machine == 'aarch64' and sys_platform == 'linux') or (platform_machine == 'x86_64' and sys_platform == 'linux') or (sys_platform != 'linux' and extra == 'extra-10-weathergen-cpu' and extra == 'extra-10-weathergen-gpu')" }, + { name = "pygments", marker = "(platform_machine != 'aarch64' and platform_machine != 'x86_64' and extra == 'extra-10-weathergen-cpu' and extra == 'extra-10-weathergen-gpu') or (platform_machine == 'aarch64' and sys_platform == 'linux') or (platform_machine == 'x86_64' and sys_platform == 'linux') or (sys_platform != 'linux' and extra == 'extra-10-weathergen-cpu' and extra == 'extra-10-weathergen-gpu')" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/c6/4c/118ef9534ac0632859b48c305d8c5dc9d6f963564fdfa66bc785c560247c/pdbpp-0.11.7.tar.gz", hash = "sha256:cb6604ac31a35ed0f2a29650a8c022b26284620be3e01cfd41b683b91da1ff14", size = 76026, upload-time = "2025-07-18T09:36:02.781Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/99/e9/704bbc08aace64fee536e4c2c20f63f64f6fdbad72938c5ed46c9723a9f1/pdbpp-0.11.7-py3-none-any.whl", hash = "sha256:51916b63693898cf4881b36b4501c83947758d73f582f1f84893662b163bdb75", size = 30545, upload-time = "2025-07-18T09:36:01.478Z" }, +] + +[[package]] +name = "pdbufr" +version = "0.13.0" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "attrs", marker = "(platform_machine != 'aarch64' and platform_machine != 'x86_64' and extra == 'extra-10-weathergen-cpu' and extra == 'extra-10-weathergen-gpu') or (platform_machine == 'aarch64' and sys_platform == 'linux') or (platform_machine == 'x86_64' and sys_platform == 'linux') or (sys_platform != 'linux' and extra == 'extra-10-weathergen-cpu' and extra == 'extra-10-weathergen-gpu')" }, + { name = "eccodes", marker = "(platform_machine != 'aarch64' and platform_machine != 'x86_64' and extra == 'extra-10-weathergen-cpu' and extra == 'extra-10-weathergen-gpu') or (platform_machine == 'aarch64' and sys_platform == 'linux') or (platform_machine == 'x86_64' and sys_platform == 'linux') or (sys_platform != 'linux' and extra == 'extra-10-weathergen-cpu' and extra == 'extra-10-weathergen-gpu')" }, + { name = "pandas", marker = "(platform_machine != 'aarch64' and platform_machine != 'x86_64' and extra == 'extra-10-weathergen-cpu' and extra == 'extra-10-weathergen-gpu') or (platform_machine == 'aarch64' and sys_platform == 'linux') or (platform_machine == 'x86_64' and sys_platform == 'linux') or (sys_platform != 'linux' and extra == 'extra-10-weathergen-cpu' and extra == 'extra-10-weathergen-gpu')" }, + { name = "pint", marker = "(platform_machine != 'aarch64' and platform_machine != 'x86_64' and extra == 'extra-10-weathergen-cpu' and extra == 'extra-10-weathergen-gpu') or (platform_machine == 'aarch64' and sys_platform == 'linux') or (platform_machine == 'x86_64' and sys_platform == 'linux') or (sys_platform != 'linux' and extra == 'extra-10-weathergen-cpu' and extra == 'extra-10-weathergen-gpu')" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/0f/6b/ef6449262564f1846b64ae1c74891281c11fc9520bc4bfcef63ddd3d9111/pdbufr-0.13.0.tar.gz", hash = "sha256:7db6f5e51bffb32aa738012d1a50124970a67965a3baa92481e203da03f1859a", size = 8757098, upload-time = "2025-05-28T15:13:00.103Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/0a/1d/59f7f3786b7cecb81811d050c9bdaa2731aaf1d28e8393caf995f2512f11/pdbufr-0.13.0-py3-none-any.whl", hash = "sha256:06400369fc4925c114e354e6b91771e5908e9cdc6147b2b44aeb785b79bf95e8", size = 51172, upload-time = "2025-05-28T15:12:57.248Z" }, +] + +[[package]] +name = "pexpect" +version = "4.9.0" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "ptyprocess", marker = "(platform_machine != 'aarch64' and platform_machine != 'x86_64' and extra == 'extra-10-weathergen-cpu' and extra == 'extra-10-weathergen-gpu') or (platform_machine == 'aarch64' and sys_platform == 'linux') or (platform_machine == 'x86_64' and sys_platform == 'linux') or (sys_platform != 'linux' and extra == 'extra-10-weathergen-cpu' and extra == 'extra-10-weathergen-gpu')" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/42/92/cc564bf6381ff43ce1f4d06852fc19a2f11d180f23dc32d9588bee2f149d/pexpect-4.9.0.tar.gz", hash = "sha256:ee7d41123f3c9911050ea2c2dac107568dc43b2d3b0c7557a33212c398ead30f", size = 166450, upload-time = "2023-11-25T09:07:26.339Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/9e/c3/059298687310d527a58bb01f3b1965787ee3b40dce76752eda8b44e9a2c5/pexpect-4.9.0-py2.py3-none-any.whl", hash = "sha256:7236d1e080e4936be2dc3e326cec0af72acf9212a7e1d060210e70a47e253523", size = 63772, upload-time = "2023-11-25T06:56:14.81Z" }, +] + +[[package]] +name = "pillow" +version = "11.2.1" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/af/cb/bb5c01fcd2a69335b86c22142b2bccfc3464087efb7fd382eee5ffc7fdf7/pillow-11.2.1.tar.gz", hash = "sha256:a64dd61998416367b7ef979b73d3a85853ba9bec4c2925f74e588879a58716b6", size = 47026707, upload-time = "2025-04-12T17:50:03.289Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/c7/40/052610b15a1b8961f52537cc8326ca6a881408bc2bdad0d852edeb6ed33b/pillow-11.2.1-cp312-cp312-macosx_10_13_x86_64.whl", hash = "sha256:78afba22027b4accef10dbd5eed84425930ba41b3ea0a86fa8d20baaf19d807f", size = 3190185, upload-time = "2025-04-12T17:48:00.417Z" }, + { url = "https://files.pythonhosted.org/packages/e5/7e/b86dbd35a5f938632093dc40d1682874c33dcfe832558fc80ca56bfcb774/pillow-11.2.1-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:78092232a4ab376a35d68c4e6d5e00dfd73454bd12b230420025fbe178ee3b0b", size = 3030306, upload-time = "2025-04-12T17:48:02.391Z" }, + { url = "https://files.pythonhosted.org/packages/a4/5c/467a161f9ed53e5eab51a42923c33051bf8d1a2af4626ac04f5166e58e0c/pillow-11.2.1-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:25a5f306095c6780c52e6bbb6109624b95c5b18e40aab1c3041da3e9e0cd3e2d", size = 4416121, upload-time = "2025-04-12T17:48:04.554Z" }, + { url = "https://files.pythonhosted.org/packages/62/73/972b7742e38ae0e2ac76ab137ca6005dcf877480da0d9d61d93b613065b4/pillow-11.2.1-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:0c7b29dbd4281923a2bfe562acb734cee96bbb129e96e6972d315ed9f232bef4", size = 4501707, upload-time = "2025-04-12T17:48:06.831Z" }, + { url = "https://files.pythonhosted.org/packages/e4/3a/427e4cb0b9e177efbc1a84798ed20498c4f233abde003c06d2650a6d60cb/pillow-11.2.1-cp312-cp312-manylinux_2_28_aarch64.whl", hash = "sha256:3e645b020f3209a0181a418bffe7b4a93171eef6c4ef6cc20980b30bebf17b7d", size = 4522921, upload-time = "2025-04-12T17:48:09.229Z" }, + { url = "https://files.pythonhosted.org/packages/fe/7c/d8b1330458e4d2f3f45d9508796d7caf0c0d3764c00c823d10f6f1a3b76d/pillow-11.2.1-cp312-cp312-manylinux_2_28_x86_64.whl", hash = "sha256:b2dbea1012ccb784a65349f57bbc93730b96e85b42e9bf7b01ef40443db720b4", size = 4612523, upload-time = "2025-04-12T17:48:11.631Z" }, + { url = "https://files.pythonhosted.org/packages/b3/2f/65738384e0b1acf451de5a573d8153fe84103772d139e1e0bdf1596be2ea/pillow-11.2.1-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:da3104c57bbd72948d75f6a9389e6727d2ab6333c3617f0a89d72d4940aa0443", size = 4587836, upload-time = "2025-04-12T17:48:13.592Z" }, + { url = "https://files.pythonhosted.org/packages/6a/c5/e795c9f2ddf3debb2dedd0df889f2fe4b053308bb59a3cc02a0cd144d641/pillow-11.2.1-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:598174aef4589af795f66f9caab87ba4ff860ce08cd5bb447c6fc553ffee603c", size = 4669390, upload-time = "2025-04-12T17:48:15.938Z" }, + { url = "https://files.pythonhosted.org/packages/96/ae/ca0099a3995976a9fce2f423166f7bff9b12244afdc7520f6ed38911539a/pillow-11.2.1-cp312-cp312-win32.whl", hash = "sha256:1d535df14716e7f8776b9e7fee118576d65572b4aad3ed639be9e4fa88a1cad3", size = 2332309, upload-time = "2025-04-12T17:48:17.885Z" }, + { url = "https://files.pythonhosted.org/packages/7c/18/24bff2ad716257fc03da964c5e8f05d9790a779a8895d6566e493ccf0189/pillow-11.2.1-cp312-cp312-win_amd64.whl", hash = "sha256:14e33b28bf17c7a38eede290f77db7c664e4eb01f7869e37fa98a5aa95978941", size = 2676768, upload-time = "2025-04-12T17:48:19.655Z" }, + { url = "https://files.pythonhosted.org/packages/da/bb/e8d656c9543276517ee40184aaa39dcb41e683bca121022f9323ae11b39d/pillow-11.2.1-cp312-cp312-win_arm64.whl", hash = "sha256:21e1470ac9e5739ff880c211fc3af01e3ae505859392bf65458c224d0bf283eb", size = 2415087, upload-time = "2025-04-12T17:48:21.991Z" }, +] + +[[package]] +name = "pint" +version = "0.24.4" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "flexcache", marker = "(platform_machine != 'aarch64' and platform_machine != 'x86_64' and extra == 'extra-10-weathergen-cpu' and extra == 'extra-10-weathergen-gpu') or (platform_machine == 'aarch64' and sys_platform == 'linux') or (platform_machine == 'x86_64' and sys_platform == 'linux') or (sys_platform != 'linux' and extra == 'extra-10-weathergen-cpu' and extra == 'extra-10-weathergen-gpu')" }, + { name = "flexparser", marker = "(platform_machine != 'aarch64' and platform_machine != 'x86_64' and extra == 'extra-10-weathergen-cpu' and extra == 'extra-10-weathergen-gpu') or (platform_machine == 'aarch64' and sys_platform == 'linux') or (platform_machine == 'x86_64' and sys_platform == 'linux') or (sys_platform != 'linux' and extra == 'extra-10-weathergen-cpu' and extra == 'extra-10-weathergen-gpu')" }, + { name = "platformdirs", marker = "(platform_machine != 'aarch64' and platform_machine != 'x86_64' and extra == 'extra-10-weathergen-cpu' and extra == 'extra-10-weathergen-gpu') or (platform_machine == 'aarch64' and sys_platform == 'linux') or (platform_machine == 'x86_64' and sys_platform == 'linux') or (sys_platform != 'linux' and extra == 'extra-10-weathergen-cpu' and extra == 'extra-10-weathergen-gpu')" }, + { name = "typing-extensions", marker = "(platform_machine != 'aarch64' and platform_machine != 'x86_64' and extra == 'extra-10-weathergen-cpu' and extra == 'extra-10-weathergen-gpu') or (platform_machine == 'aarch64' and sys_platform == 'linux') or (platform_machine == 'x86_64' and sys_platform == 'linux') or (sys_platform != 'linux' and extra == 'extra-10-weathergen-cpu' and extra == 'extra-10-weathergen-gpu')" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/20/bb/52b15ddf7b7706ed591134a895dbf6e41c8348171fb635e655e0a4bbb0ea/pint-0.24.4.tar.gz", hash = "sha256:35275439b574837a6cd3020a5a4a73645eb125ce4152a73a2f126bf164b91b80", size = 342225, upload-time = "2024-11-07T16:29:46.061Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/b7/16/bd2f5904557265882108dc2e04f18abc05ab0c2b7082ae9430091daf1d5c/Pint-0.24.4-py3-none-any.whl", hash = "sha256:aa54926c8772159fcf65f82cc0d34de6768c151b32ad1deb0331291c38fe7659", size = 302029, upload-time = "2024-11-07T16:29:43.976Z" }, +] + +[[package]] +name = "platformdirs" +version = "4.3.8" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/fe/8b/3c73abc9c759ecd3f1f7ceff6685840859e8070c4d947c93fae71f6a0bf2/platformdirs-4.3.8.tar.gz", hash = "sha256:3d512d96e16bcb959a814c9f348431070822a6496326a4be0911c40b5a74c2bc", size = 21362, upload-time = "2025-05-07T22:47:42.121Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/fe/39/979e8e21520d4e47a0bbe349e2713c0aac6f3d853d0e5b34d76206c439aa/platformdirs-4.3.8-py3-none-any.whl", hash = "sha256:ff7059bb7eb1179e2685604f4aaf157cfd9535242bd23742eadc3c13542139b4", size = 18567, upload-time = "2025-05-07T22:47:40.376Z" }, +] + +[[package]] +name = "plotly" +version = "6.2.0" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "narwhals", marker = "(platform_machine != 'aarch64' and platform_machine != 'x86_64' and extra == 'extra-10-weathergen-cpu' and extra == 'extra-10-weathergen-gpu') or (platform_machine == 'aarch64' and sys_platform == 'linux') or (platform_machine == 'x86_64' and sys_platform == 'linux') or (sys_platform != 'linux' and extra == 'extra-10-weathergen-cpu' and extra == 'extra-10-weathergen-gpu')" }, + { name = "packaging", marker = "(platform_machine != 'aarch64' and platform_machine != 'x86_64' and extra == 'extra-10-weathergen-cpu' and extra == 'extra-10-weathergen-gpu') or (platform_machine == 'aarch64' and sys_platform == 'linux') or (platform_machine == 'x86_64' and sys_platform == 'linux') or (sys_platform != 'linux' and extra == 'extra-10-weathergen-cpu' and extra == 'extra-10-weathergen-gpu')" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/6e/5c/0efc297df362b88b74957a230af61cd6929f531f72f48063e8408702ffba/plotly-6.2.0.tar.gz", hash = "sha256:9dfa23c328000f16c928beb68927444c1ab9eae837d1fe648dbcda5360c7953d", size = 6801941, upload-time = "2025-06-26T16:20:45.765Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/ed/20/f2b7ac96a91cc5f70d81320adad24cc41bf52013508d649b1481db225780/plotly-6.2.0-py3-none-any.whl", hash = "sha256:32c444d4c940887219cb80738317040363deefdfee4f354498cc0b6dab8978bd", size = 9635469, upload-time = "2025-06-26T16:20:40.76Z" }, +] + +[[package]] +name = "pluggy" +version = "1.6.0" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/f9/e2/3e91f31a7d2b083fe6ef3fa267035b518369d9511ffab804f839851d2779/pluggy-1.6.0.tar.gz", hash = "sha256:7dcc130b76258d33b90f61b658791dede3486c3e6bfb003ee5c9bfb396dd22f3", size = 69412, upload-time = "2025-05-15T12:30:07.975Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/54/20/4d324d65cc6d9205fabedc306948156824eb9f0ee1633355a8f7ec5c66bf/pluggy-1.6.0-py3-none-any.whl", hash = "sha256:e920276dd6813095e9377c0bc5566d94c932c33b27a3e3945d8389c374dd4746", size = 20538, upload-time = "2025-05-15T12:30:06.134Z" }, +] + +[[package]] +name = "polars" +version = "1.25.2" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/57/56/d8a13c3a1990c92cc2c4f1887e97ea15aabf5685b1e826f875ca3e4e6c9e/polars-1.25.2.tar.gz", hash = "sha256:c6bd9b1b17c86e49bcf8aac44d2238b77e414d7df890afc3924812a5c989a4fe", size = 4501858, upload-time = "2025-03-15T16:55:05.901Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/bd/ec/61ae653b7848769baa5c5aaa00f3b3eaedaec56c3f1203a90dafe893a368/polars-1.25.2-cp39-abi3-macosx_10_12_x86_64.whl", hash = "sha256:59f2a34520ea4307a22e18b832310f8045a8a348606ca99ae785499b31eb4170", size = 34539929, upload-time = "2025-03-15T16:53:55.931Z" }, + { url = "https://files.pythonhosted.org/packages/58/80/54f8cbb048558114ca519d7c40a994130c5a537246923ecce47cf269eaa6/polars-1.25.2-cp39-abi3-macosx_11_0_arm64.whl", hash = "sha256:e9fe45bdc2327c2e2b64e8849a992b6d3bd4a7e7848b8a7a3a439cca9674dc87", size = 31326982, upload-time = "2025-03-15T16:54:01.056Z" }, + { url = "https://files.pythonhosted.org/packages/cd/92/db411b7c83f694dca1b8348fa57a120c27c67cf622b85fa88c7ecf463adb/polars-1.25.2-cp39-abi3-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:f7fcbb4f476784384ccda48757fca4e8c2e2c5a0a3aef3717aaf56aee4e30e09", size = 35121263, upload-time = "2025-03-15T16:54:04.932Z" }, + { url = "https://files.pythonhosted.org/packages/9f/a5/5ff200ce3bc643d5f12d91eddb9720fa083267c45fe395bcf0046e97cc2d/polars-1.25.2-cp39-abi3-manylinux_2_24_aarch64.whl", hash = "sha256:9dd91885c9ee5ffad8725c8591f73fb7bd2632c740277ee641f0453176b3d4b8", size = 32254697, upload-time = "2025-03-15T16:54:09.553Z" }, + { url = "https://files.pythonhosted.org/packages/70/d5/7a5458d05d5a0af816b1c7034aa1d026b7b8176a8de41e96dac70fcf29e2/polars-1.25.2-cp39-abi3-win_amd64.whl", hash = "sha256:a547796643b9a56cb2959be87d7cb87ff80a5c8ae9367f32fe1ad717039e9afc", size = 35318381, upload-time = "2025-03-15T16:54:14.088Z" }, + { url = "https://files.pythonhosted.org/packages/24/df/60d35c4ae8ec357a5fb9914eb253bd1bad9e0f5332eda2bd2c6371dd3668/polars-1.25.2-cp39-abi3-win_arm64.whl", hash = "sha256:a2488e9d4b67bf47b18088f7264999180559e6ec2637ed11f9d0d4f98a74a37c", size = 31619833, upload-time = "2025-03-15T16:54:17.974Z" }, +] + +[[package]] +name = "prompt-toolkit" +version = "3.0.51" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "wcwidth", marker = "(platform_machine != 'aarch64' and platform_machine != 'x86_64' and extra == 'extra-10-weathergen-cpu' and extra == 'extra-10-weathergen-gpu') or (platform_machine == 'aarch64' and sys_platform == 'linux') or (platform_machine == 'x86_64' and sys_platform == 'linux') or (sys_platform != 'linux' and extra == 'extra-10-weathergen-cpu' and extra == 'extra-10-weathergen-gpu')" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/bb/6e/9d084c929dfe9e3bfe0c6a47e31f78a25c54627d64a66e884a8bf5474f1c/prompt_toolkit-3.0.51.tar.gz", hash = "sha256:931a162e3b27fc90c86f1b48bb1fb2c528c2761475e57c9c06de13311c7b54ed", size = 428940, upload-time = "2025-04-15T09:18:47.731Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/ce/4f/5249960887b1fbe561d9ff265496d170b55a735b76724f10ef19f9e40716/prompt_toolkit-3.0.51-py3-none-any.whl", hash = "sha256:52742911fde84e2d423e2f9a4cf1de7d7ac4e51958f648d9540e0fb8db077b07", size = 387810, upload-time = "2025-04-15T09:18:44.753Z" }, +] + +[[package]] +name = "properscoring" +version = "0.1" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "numpy", marker = "(platform_machine != 'aarch64' and platform_machine != 'x86_64' and extra == 'extra-10-weathergen-cpu' and extra == 'extra-10-weathergen-gpu') or (platform_machine == 'aarch64' and sys_platform == 'linux') or (platform_machine == 'x86_64' and sys_platform == 'linux') or (sys_platform != 'linux' and extra == 'extra-10-weathergen-cpu' and extra == 'extra-10-weathergen-gpu')" }, + { name = "scipy", marker = "(platform_machine != 'aarch64' and platform_machine != 'x86_64' and extra == 'extra-10-weathergen-cpu' and extra == 'extra-10-weathergen-gpu') or (platform_machine == 'aarch64' and sys_platform == 'linux') or (platform_machine == 'x86_64' and sys_platform == 'linux') or (sys_platform != 'linux' and extra == 'extra-10-weathergen-cpu' and extra == 'extra-10-weathergen-gpu')" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/38/ac/513d2c8653ab6bc66c4502372e6e4e20ce6a136cde4c1ba9908ec36e34c1/properscoring-0.1.tar.gz", hash = "sha256:b0cc4963cc218b728d6c5f77b3259c8f835ae00e32e82678cdf6936049b93961", size = 17848, upload-time = "2015-11-12T19:54:29.615Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/0a/ff/51706ba1a029d0f2df0322543793d3bf1383de9dc567d23886144cb21bef/properscoring-0.1-py2.py3-none-any.whl", hash = "sha256:f84d5b06c13549d0171ce52ad7b45c6f5726ac44b733d24af5c60654cbb821dc", size = 23427, upload-time = "2015-11-12T19:54:24.578Z" }, +] + +[[package]] +name = "protobuf" +version = "6.31.1" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/52/f3/b9655a711b32c19720253f6f06326faf90580834e2e83f840472d752bc8b/protobuf-6.31.1.tar.gz", hash = "sha256:d8cac4c982f0b957a4dc73a80e2ea24fab08e679c0de9deb835f4a12d69aca9a", size = 441797, upload-time = "2025-05-28T19:25:54.947Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/f3/6f/6ab8e4bf962fd5570d3deaa2d5c38f0a363f57b4501047b5ebeb83ab1125/protobuf-6.31.1-cp310-abi3-win32.whl", hash = "sha256:7fa17d5a29c2e04b7d90e5e32388b8bfd0e7107cd8e616feef7ed3fa6bdab5c9", size = 423603, upload-time = "2025-05-28T19:25:41.198Z" }, + { url = "https://files.pythonhosted.org/packages/44/3a/b15c4347dd4bf3a1b0ee882f384623e2063bb5cf9fa9d57990a4f7df2fb6/protobuf-6.31.1-cp310-abi3-win_amd64.whl", hash = "sha256:426f59d2964864a1a366254fa703b8632dcec0790d8862d30034d8245e1cd447", size = 435283, upload-time = "2025-05-28T19:25:44.275Z" }, + { url = "https://files.pythonhosted.org/packages/6a/c9/b9689a2a250264a84e66c46d8862ba788ee7a641cdca39bccf64f59284b7/protobuf-6.31.1-cp39-abi3-macosx_10_9_universal2.whl", hash = "sha256:6f1227473dc43d44ed644425268eb7c2e488ae245d51c6866d19fe158e207402", size = 425604, upload-time = "2025-05-28T19:25:45.702Z" }, + { url = "https://files.pythonhosted.org/packages/76/a1/7a5a94032c83375e4fe7e7f56e3976ea6ac90c5e85fac8576409e25c39c3/protobuf-6.31.1-cp39-abi3-manylinux2014_aarch64.whl", hash = "sha256:a40fc12b84c154884d7d4c4ebd675d5b3b5283e155f324049ae396b95ddebc39", size = 322115, upload-time = "2025-05-28T19:25:47.128Z" }, + { url = "https://files.pythonhosted.org/packages/fa/b1/b59d405d64d31999244643d88c45c8241c58f17cc887e73bcb90602327f8/protobuf-6.31.1-cp39-abi3-manylinux2014_x86_64.whl", hash = "sha256:4ee898bf66f7a8b0bd21bce523814e6fbd8c6add948045ce958b73af7e8878c6", size = 321070, upload-time = "2025-05-28T19:25:50.036Z" }, + { url = "https://files.pythonhosted.org/packages/f7/af/ab3c51ab7507a7325e98ffe691d9495ee3d3aa5f589afad65ec920d39821/protobuf-6.31.1-py3-none-any.whl", hash = "sha256:720a6c7e6b77288b85063569baae8536671b39f15cc22037ec7045658d80489e", size = 168724, upload-time = "2025-05-28T19:25:53.926Z" }, +] + +[[package]] +name = "psutil" +version = "7.0.0" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/2a/80/336820c1ad9286a4ded7e845b2eccfcb27851ab8ac6abece774a6ff4d3de/psutil-7.0.0.tar.gz", hash = "sha256:7be9c3eba38beccb6495ea33afd982a44074b78f28c434a1f51cc07fd315c456", size = 497003, upload-time = "2025-02-13T21:54:07.946Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/ed/e6/2d26234410f8b8abdbf891c9da62bee396583f713fb9f3325a4760875d22/psutil-7.0.0-cp36-abi3-macosx_10_9_x86_64.whl", hash = "sha256:101d71dc322e3cffd7cea0650b09b3d08b8e7c4109dd6809fe452dfd00e58b25", size = 238051, upload-time = "2025-02-13T21:54:12.36Z" }, + { url = "https://files.pythonhosted.org/packages/04/8b/30f930733afe425e3cbfc0e1468a30a18942350c1a8816acfade80c005c4/psutil-7.0.0-cp36-abi3-macosx_11_0_arm64.whl", hash = "sha256:39db632f6bb862eeccf56660871433e111b6ea58f2caea825571951d4b6aa3da", size = 239535, upload-time = "2025-02-13T21:54:16.07Z" }, + { url = "https://files.pythonhosted.org/packages/2a/ed/d362e84620dd22876b55389248e522338ed1bf134a5edd3b8231d7207f6d/psutil-7.0.0-cp36-abi3-manylinux_2_12_i686.manylinux2010_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:1fcee592b4c6f146991ca55919ea3d1f8926497a713ed7faaf8225e174581e91", size = 275004, upload-time = "2025-02-13T21:54:18.662Z" }, + { url = "https://files.pythonhosted.org/packages/bf/b9/b0eb3f3cbcb734d930fdf839431606844a825b23eaf9a6ab371edac8162c/psutil-7.0.0-cp36-abi3-manylinux_2_12_x86_64.manylinux2010_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:4b1388a4f6875d7e2aff5c4ca1cc16c545ed41dd8bb596cefea80111db353a34", size = 277986, upload-time = "2025-02-13T21:54:21.811Z" }, + { url = "https://files.pythonhosted.org/packages/eb/a2/709e0fe2f093556c17fbafda93ac032257242cabcc7ff3369e2cb76a97aa/psutil-7.0.0-cp36-abi3-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:a5f098451abc2828f7dc6b58d44b532b22f2088f4999a937557b603ce72b1993", size = 279544, upload-time = "2025-02-13T21:54:24.68Z" }, + { url = "https://files.pythonhosted.org/packages/50/e6/eecf58810b9d12e6427369784efe814a1eec0f492084ce8eb8f4d89d6d61/psutil-7.0.0-cp37-abi3-win32.whl", hash = "sha256:ba3fcef7523064a6c9da440fc4d6bd07da93ac726b5733c29027d7dc95b39d99", size = 241053, upload-time = "2025-02-13T21:54:34.31Z" }, + { url = "https://files.pythonhosted.org/packages/50/1b/6921afe68c74868b4c9fa424dad3be35b095e16687989ebbb50ce4fceb7c/psutil-7.0.0-cp37-abi3-win_amd64.whl", hash = "sha256:4cf3d4eb1aa9b348dec30105c55cd9b7d4629285735a102beb4441e38db90553", size = 244885, upload-time = "2025-02-13T21:54:37.486Z" }, +] + +[[package]] +name = "ptyprocess" +version = "0.7.0" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/20/e5/16ff212c1e452235a90aeb09066144d0c5a6a8c0834397e03f5224495c4e/ptyprocess-0.7.0.tar.gz", hash = "sha256:5c5d0a3b48ceee0b48485e0c26037c0acd7d29765ca3fbb5cb3831d347423220", size = 70762, upload-time = "2020-12-28T15:15:30.155Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/22/a6/858897256d0deac81a172289110f31629fc4cee19b6f01283303e18c8db3/ptyprocess-0.7.0-py2.py3-none-any.whl", hash = "sha256:4b41f3967fce3af57cc7e94b888626c18bf37a083e3651ca8feeb66d492fef35", size = 13993, upload-time = "2020-12-28T15:15:28.35Z" }, +] + +[[package]] +name = "pure-eval" +version = "0.2.3" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/cd/05/0a34433a064256a578f1783a10da6df098ceaa4a57bbeaa96a6c0352786b/pure_eval-0.2.3.tar.gz", hash = "sha256:5f4e983f40564c576c7c8635ae88db5956bb2229d7e9237d03b3c0b0190eaf42", size = 19752, upload-time = "2024-07-21T12:58:21.801Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/8e/37/efad0257dc6e593a18957422533ff0f87ede7c9c6ea010a2177d738fb82f/pure_eval-0.2.3-py3-none-any.whl", hash = "sha256:1db8e35b67b3d218d818ae653e27f06c3aa420901fa7b081ca98cbedc874e0d0", size = 11842, upload-time = "2024-07-21T12:58:20.04Z" }, +] + +[[package]] +name = "pyasn1" +version = "0.6.1" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/ba/e9/01f1a64245b89f039897cb0130016d79f77d52669aae6ee7b159a6c4c018/pyasn1-0.6.1.tar.gz", hash = "sha256:6f580d2bdd84365380830acf45550f2511469f673cb4a5ae3857a3170128b034", size = 145322, upload-time = "2024-09-10T22:41:42.55Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/c8/f1/d6a797abb14f6283c0ddff96bbdd46937f64122b8c925cab503dd37f8214/pyasn1-0.6.1-py3-none-any.whl", hash = "sha256:0d632f46f2ba09143da3a8afe9e33fb6f92fa2320ab7e886e2d0f7672af84629", size = 83135, upload-time = "2024-09-11T16:00:36.122Z" }, +] + +[[package]] +name = "pyasn1-modules" +version = "0.4.2" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "pyasn1", marker = "(platform_machine != 'aarch64' and platform_machine != 'x86_64' and extra == 'extra-10-weathergen-cpu' and extra == 'extra-10-weathergen-gpu') or (platform_machine == 'aarch64' and sys_platform == 'linux') or (platform_machine == 'x86_64' and sys_platform == 'linux') or (sys_platform != 'linux' and extra == 'extra-10-weathergen-cpu' and extra == 'extra-10-weathergen-gpu')" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/e9/e6/78ebbb10a8c8e4b61a59249394a4a594c1a7af95593dc933a349c8d00964/pyasn1_modules-0.4.2.tar.gz", hash = "sha256:677091de870a80aae844b1ca6134f54652fa2c8c5a52aa396440ac3106e941e6", size = 307892, upload-time = "2025-03-28T02:41:22.17Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/47/8d/d529b5d697919ba8c11ad626e835d4039be708a35b0d22de83a269a6682c/pyasn1_modules-0.4.2-py3-none-any.whl", hash = "sha256:29253a9207ce32b64c3ac6600edc75368f98473906e8fd1043bd6b5b1de2c14a", size = 181259, upload-time = "2025-03-28T02:41:19.028Z" }, +] + +[[package]] +name = "pycparser" +version = "2.22" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/1d/b2/31537cf4b1ca988837256c910a668b553fceb8f069bedc4b1c826024b52c/pycparser-2.22.tar.gz", hash = "sha256:491c8be9c040f5390f5bf44a5b07752bd07f56edf992381b05c701439eec10f6", size = 172736, upload-time = "2024-03-30T13:22:22.564Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/13/a3/a812df4e2dd5696d1f351d58b8fe16a405b234ad2886a0dab9183fb78109/pycparser-2.22-py3-none-any.whl", hash = "sha256:c3702b6d3dd8c7abc1afa565d7e63d53a1d0bd86cdc24edd75470f4de499cfcc", size = 117552, upload-time = "2024-03-30T13:22:20.476Z" }, +] + +[[package]] +name = "pydantic" +version = "2.11.7" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "annotated-types", marker = "(platform_machine != 'aarch64' and platform_machine != 'x86_64' and extra == 'extra-10-weathergen-cpu' and extra == 'extra-10-weathergen-gpu') or (platform_machine == 'aarch64' and sys_platform == 'linux') or (platform_machine == 'x86_64' and sys_platform == 'linux') or (sys_platform != 'linux' and extra == 'extra-10-weathergen-cpu' and extra == 'extra-10-weathergen-gpu')" }, + { name = "pydantic-core", marker = "(platform_machine != 'aarch64' and platform_machine != 'x86_64' and extra == 'extra-10-weathergen-cpu' and extra == 'extra-10-weathergen-gpu') or (platform_machine == 'aarch64' and sys_platform == 'linux') or (platform_machine == 'x86_64' and sys_platform == 'linux') or (sys_platform != 'linux' and extra == 'extra-10-weathergen-cpu' and extra == 'extra-10-weathergen-gpu')" }, + { name = "typing-extensions", marker = "(platform_machine != 'aarch64' and platform_machine != 'x86_64' and extra == 'extra-10-weathergen-cpu' and extra == 'extra-10-weathergen-gpu') or (platform_machine == 'aarch64' and sys_platform == 'linux') or (platform_machine == 'x86_64' and sys_platform == 'linux') or (sys_platform != 'linux' and extra == 'extra-10-weathergen-cpu' and extra == 'extra-10-weathergen-gpu')" }, + { name = "typing-inspection", marker = "(platform_machine != 'aarch64' and platform_machine != 'x86_64' and extra == 'extra-10-weathergen-cpu' and extra == 'extra-10-weathergen-gpu') or (platform_machine == 'aarch64' and sys_platform == 'linux') or (platform_machine == 'x86_64' and sys_platform == 'linux') or (sys_platform != 'linux' and extra == 'extra-10-weathergen-cpu' and extra == 'extra-10-weathergen-gpu')" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/00/dd/4325abf92c39ba8623b5af936ddb36ffcfe0beae70405d456ab1fb2f5b8c/pydantic-2.11.7.tar.gz", hash = "sha256:d989c3c6cb79469287b1569f7447a17848c998458d49ebe294e975b9baf0f0db", size = 788350, upload-time = "2025-06-14T08:33:17.137Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/6a/c0/ec2b1c8712ca690e5d61979dee872603e92b8a32f94cc1b72d53beab008a/pydantic-2.11.7-py3-none-any.whl", hash = "sha256:dde5df002701f6de26248661f6835bbe296a47bf73990135c7d07ce741b9623b", size = 444782, upload-time = "2025-06-14T08:33:14.905Z" }, +] + +[[package]] +name = "pydantic-core" +version = "2.33.2" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "typing-extensions", marker = "(platform_machine != 'aarch64' and platform_machine != 'x86_64' and extra == 'extra-10-weathergen-cpu' and extra == 'extra-10-weathergen-gpu') or (platform_machine == 'aarch64' and sys_platform == 'linux') or (platform_machine == 'x86_64' and sys_platform == 'linux') or (sys_platform != 'linux' and extra == 'extra-10-weathergen-cpu' and extra == 'extra-10-weathergen-gpu')" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/ad/88/5f2260bdfae97aabf98f1778d43f69574390ad787afb646292a638c923d4/pydantic_core-2.33.2.tar.gz", hash = "sha256:7cb8bc3605c29176e1b105350d2e6474142d7c1bd1d9327c4a9bdb46bf827acc", size = 435195, upload-time = "2025-04-23T18:33:52.104Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/18/8a/2b41c97f554ec8c71f2a8a5f85cb56a8b0956addfe8b0efb5b3d77e8bdc3/pydantic_core-2.33.2-cp312-cp312-macosx_10_12_x86_64.whl", hash = "sha256:a7ec89dc587667f22b6a0b6579c249fca9026ce7c333fc142ba42411fa243cdc", size = 2009000, upload-time = "2025-04-23T18:31:25.863Z" }, + { url = "https://files.pythonhosted.org/packages/a1/02/6224312aacb3c8ecbaa959897af57181fb6cf3a3d7917fd44d0f2917e6f2/pydantic_core-2.33.2-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:3c6db6e52c6d70aa0d00d45cdb9b40f0433b96380071ea80b09277dba021ddf7", size = 1847996, upload-time = "2025-04-23T18:31:27.341Z" }, + { url = "https://files.pythonhosted.org/packages/d6/46/6dcdf084a523dbe0a0be59d054734b86a981726f221f4562aed313dbcb49/pydantic_core-2.33.2-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:4e61206137cbc65e6d5256e1166f88331d3b6238e082d9f74613b9b765fb9025", size = 1880957, upload-time = "2025-04-23T18:31:28.956Z" }, + { url = "https://files.pythonhosted.org/packages/ec/6b/1ec2c03837ac00886ba8160ce041ce4e325b41d06a034adbef11339ae422/pydantic_core-2.33.2-cp312-cp312-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:eb8c529b2819c37140eb51b914153063d27ed88e3bdc31b71198a198e921e011", size = 1964199, upload-time = "2025-04-23T18:31:31.025Z" }, + { url = "https://files.pythonhosted.org/packages/2d/1d/6bf34d6adb9debd9136bd197ca72642203ce9aaaa85cfcbfcf20f9696e83/pydantic_core-2.33.2-cp312-cp312-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:c52b02ad8b4e2cf14ca7b3d918f3eb0ee91e63b3167c32591e57c4317e134f8f", size = 2120296, upload-time = "2025-04-23T18:31:32.514Z" }, + { url = "https://files.pythonhosted.org/packages/e0/94/2bd0aaf5a591e974b32a9f7123f16637776c304471a0ab33cf263cf5591a/pydantic_core-2.33.2-cp312-cp312-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:96081f1605125ba0855dfda83f6f3df5ec90c61195421ba72223de35ccfb2f88", size = 2676109, upload-time = "2025-04-23T18:31:33.958Z" }, + { url = "https://files.pythonhosted.org/packages/f9/41/4b043778cf9c4285d59742281a769eac371b9e47e35f98ad321349cc5d61/pydantic_core-2.33.2-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:8f57a69461af2a5fa6e6bbd7a5f60d3b7e6cebb687f55106933188e79ad155c1", size = 2002028, upload-time = "2025-04-23T18:31:39.095Z" }, + { url = "https://files.pythonhosted.org/packages/cb/d5/7bb781bf2748ce3d03af04d5c969fa1308880e1dca35a9bd94e1a96a922e/pydantic_core-2.33.2-cp312-cp312-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:572c7e6c8bb4774d2ac88929e3d1f12bc45714ae5ee6d9a788a9fb35e60bb04b", size = 2100044, upload-time = "2025-04-23T18:31:41.034Z" }, + { url = "https://files.pythonhosted.org/packages/fe/36/def5e53e1eb0ad896785702a5bbfd25eed546cdcf4087ad285021a90ed53/pydantic_core-2.33.2-cp312-cp312-musllinux_1_1_aarch64.whl", hash = "sha256:db4b41f9bd95fbe5acd76d89920336ba96f03e149097365afe1cb092fceb89a1", size = 2058881, upload-time = "2025-04-23T18:31:42.757Z" }, + { url = "https://files.pythonhosted.org/packages/01/6c/57f8d70b2ee57fc3dc8b9610315949837fa8c11d86927b9bb044f8705419/pydantic_core-2.33.2-cp312-cp312-musllinux_1_1_armv7l.whl", hash = "sha256:fa854f5cf7e33842a892e5c73f45327760bc7bc516339fda888c75ae60edaeb6", size = 2227034, upload-time = "2025-04-23T18:31:44.304Z" }, + { url = "https://files.pythonhosted.org/packages/27/b9/9c17f0396a82b3d5cbea4c24d742083422639e7bb1d5bf600e12cb176a13/pydantic_core-2.33.2-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:5f483cfb75ff703095c59e365360cb73e00185e01aaea067cd19acffd2ab20ea", size = 2234187, upload-time = "2025-04-23T18:31:45.891Z" }, + { url = "https://files.pythonhosted.org/packages/b0/6a/adf5734ffd52bf86d865093ad70b2ce543415e0e356f6cacabbc0d9ad910/pydantic_core-2.33.2-cp312-cp312-win32.whl", hash = "sha256:9cb1da0f5a471435a7bc7e439b8a728e8b61e59784b2af70d7c169f8dd8ae290", size = 1892628, upload-time = "2025-04-23T18:31:47.819Z" }, + { url = "https://files.pythonhosted.org/packages/43/e4/5479fecb3606c1368d496a825d8411e126133c41224c1e7238be58b87d7e/pydantic_core-2.33.2-cp312-cp312-win_amd64.whl", hash = "sha256:f941635f2a3d96b2973e867144fde513665c87f13fe0e193c158ac51bfaaa7b2", size = 1955866, upload-time = "2025-04-23T18:31:49.635Z" }, + { url = "https://files.pythonhosted.org/packages/0d/24/8b11e8b3e2be9dd82df4b11408a67c61bb4dc4f8e11b5b0fc888b38118b5/pydantic_core-2.33.2-cp312-cp312-win_arm64.whl", hash = "sha256:cca3868ddfaccfbc4bfb1d608e2ccaaebe0ae628e1416aeb9c4d88c001bb45ab", size = 1888894, upload-time = "2025-04-23T18:31:51.609Z" }, +] + +[[package]] +name = "pyerfa" +version = "2.0.1.5" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "numpy", marker = "(platform_machine != 'aarch64' and platform_machine != 'x86_64' and extra == 'extra-10-weathergen-cpu' and extra == 'extra-10-weathergen-gpu') or (platform_machine == 'aarch64' and sys_platform == 'linux') or (platform_machine == 'x86_64' and sys_platform == 'linux') or (sys_platform != 'linux' and extra == 'extra-10-weathergen-cpu' and extra == 'extra-10-weathergen-gpu')" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/71/39/63cc8291b0cf324ae710df41527faf7d331bce573899199d926b3e492260/pyerfa-2.0.1.5.tar.gz", hash = "sha256:17d6b24fe4846c65d5e7d8c362dcb08199dc63b30a236aedd73875cc83e1f6c0", size = 818430, upload-time = "2024-11-11T15:22:30.852Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/7d/d9/3448a57cb5bd19950de6d6ab08bd8fbb3df60baa71726de91d73d76c481b/pyerfa-2.0.1.5-cp39-abi3-macosx_10_9_x86_64.whl", hash = "sha256:b282d7c60c4c47cf629c484c17ac504fcb04abd7b3f4dfcf53ee042afc3a5944", size = 341818, upload-time = "2024-11-11T15:22:16.467Z" }, + { url = "https://files.pythonhosted.org/packages/11/4a/31a363370478b63c6289a34743f2ba2d3ae1bd8223e004d18ab28fb92385/pyerfa-2.0.1.5-cp39-abi3-macosx_11_0_arm64.whl", hash = "sha256:be1aeb70390dd03a34faf96749d5cabc58437410b4aab7213c512323932427df", size = 329370, upload-time = "2024-11-11T15:22:17.829Z" }, + { url = "https://files.pythonhosted.org/packages/cb/96/b6210fc624123c8ae13e1eecb68fb75e3f3adff216d95eee1c7b05843e3e/pyerfa-2.0.1.5-cp39-abi3-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:b0603e8e1b839327d586c8a627cdc634b795e18b007d84f0cda5500a0908254e", size = 692794, upload-time = "2024-11-11T15:22:19.429Z" }, + { url = "https://files.pythonhosted.org/packages/e5/e0/050018d855d26d3c0b4a7d1b2ed692be758ce276d8289e2a2b44ba1014a5/pyerfa-2.0.1.5-cp39-abi3-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:0e43c7194e3242083f2350b46c09fd4bf8ba1bcc0ebd1460b98fc47fe2389906", size = 738711, upload-time = "2024-11-11T15:22:20.661Z" }, + { url = "https://files.pythonhosted.org/packages/b9/f5/ff91ee77308793ae32fa1e1de95e9edd4551456dd888b4e87c5938657ca5/pyerfa-2.0.1.5-cp39-abi3-musllinux_1_2_x86_64.whl", hash = "sha256:07b80cd70701f5d066b1ac8cce406682cfcd667a1186ec7d7ade597239a6021d", size = 722966, upload-time = "2024-11-11T15:22:21.905Z" }, + { url = "https://files.pythonhosted.org/packages/2c/56/b22b35c8551d2228ff8d445e63787112927ca13f6dc9e2c04f69d742c95b/pyerfa-2.0.1.5-cp39-abi3-win32.whl", hash = "sha256:d30b9b0df588ed5467e529d851ea324a67239096dd44703125072fd11b351ea2", size = 339955, upload-time = "2024-11-11T15:22:23.087Z" }, + { url = "https://files.pythonhosted.org/packages/b4/11/97233cf23ad5411ac6f13b1d6ee3888f90ace4f974d9bf9db887aa428912/pyerfa-2.0.1.5-cp39-abi3-win_amd64.whl", hash = "sha256:66292d437dcf75925b694977aa06eb697126e7b86553e620371ed3e48b5e0ad0", size = 349410, upload-time = "2024-11-11T15:22:24.817Z" }, +] + +[[package]] +name = "pygments" +version = "2.19.2" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/b0/77/a5b8c569bf593b0140bde72ea885a803b82086995367bf2037de0159d924/pygments-2.19.2.tar.gz", hash = "sha256:636cb2477cec7f8952536970bc533bc43743542f70392ae026374600add5b887", size = 4968631, upload-time = "2025-06-21T13:39:12.283Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/c7/21/705964c7812476f378728bdf590ca4b771ec72385c533964653c68e86bdc/pygments-2.19.2-py3-none-any.whl", hash = "sha256:86540386c03d588bb81d44bc3928634ff26449851e99741617ecb9037ee5ec0b", size = 1225217, upload-time = "2025-06-21T13:39:07.939Z" }, +] + +[[package]] +name = "pynvml" +version = "12.0.0" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "nvidia-ml-py", marker = "(platform_machine != 'aarch64' and platform_machine != 'x86_64' and extra == 'extra-10-weathergen-cpu' and extra == 'extra-10-weathergen-gpu') or (platform_machine == 'aarch64' and sys_platform == 'linux') or (platform_machine == 'x86_64' and sys_platform == 'linux') or (sys_platform != 'linux' and extra == 'extra-10-weathergen-cpu' and extra == 'extra-10-weathergen-gpu')" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/26/6f/6b5880ed0239e85b9a39aed103b65b2ef81425beef9f45e5c035bf008330/pynvml-12.0.0.tar.gz", hash = "sha256:299ce2451a6a17e6822d6faee750103e25b415f06f59abb8db65d30f794166f5", size = 33636, upload-time = "2024-12-02T15:04:36.631Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/ed/df/f7cf07a65a96dd11d71f346f9c2863accdd4784da83af7181b067d556cbc/pynvml-12.0.0-py3-none-any.whl", hash = "sha256:fdff84b62a27dbe98e08e1a647eb77342bef1aebe0878bcd15e99a83fcbecb9e", size = 26560, upload-time = "2024-12-02T15:04:35.047Z" }, +] + +[[package]] +name = "pyparsing" +version = "3.2.3" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/bb/22/f1129e69d94ffff626bdb5c835506b3a5b4f3d070f17ea295e12c2c6f60f/pyparsing-3.2.3.tar.gz", hash = "sha256:b9c13f1ab8b3b542f72e28f634bad4de758ab3ce4546e4301970ad6fa77c38be", size = 1088608, upload-time = "2025-03-25T05:01:28.114Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/05/e7/df2285f3d08fee213f2d041540fa4fc9ca6c2d44cf36d3a035bf2a8d2bcc/pyparsing-3.2.3-py3-none-any.whl", hash = "sha256:a749938e02d6fd0b59b356ca504a24982314bb090c383e3cf201c95ef7e2bfcf", size = 111120, upload-time = "2025-03-25T05:01:24.908Z" }, +] + +[[package]] +name = "pyproj" +version = "3.7.1" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "certifi", marker = "(platform_machine != 'aarch64' and platform_machine != 'x86_64' and extra == 'extra-10-weathergen-cpu' and extra == 'extra-10-weathergen-gpu') or (platform_machine == 'aarch64' and sys_platform == 'linux') or (platform_machine == 'x86_64' and sys_platform == 'linux') or (sys_platform != 'linux' and extra == 'extra-10-weathergen-cpu' and extra == 'extra-10-weathergen-gpu')" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/67/10/a8480ea27ea4bbe896c168808854d00f2a9b49f95c0319ddcbba693c8a90/pyproj-3.7.1.tar.gz", hash = "sha256:60d72facd7b6b79853f19744779abcd3f804c4e0d4fa8815469db20c9f640a47", size = 226339, upload-time = "2025-02-16T04:28:46.621Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/e6/c9/876d4345b8d17f37ac59ebd39f8fa52fc6a6a9891a420f72d050edb6b899/pyproj-3.7.1-cp312-cp312-macosx_13_0_x86_64.whl", hash = "sha256:2781029d90df7f8d431e29562a3f2d8eafdf233c4010d6fc0381858dc7373217", size = 6264087, upload-time = "2025-02-16T04:28:09.036Z" }, + { url = "https://files.pythonhosted.org/packages/ff/e6/5f8691f8c90e7f402cc80a6276eb19d2ec1faa150d5ae2dd9c7b0a254da8/pyproj-3.7.1-cp312-cp312-macosx_14_0_arm64.whl", hash = "sha256:d61bf8ab04c73c1da08eedaf21a103b72fa5b0a9b854762905f65ff8b375d394", size = 4669628, upload-time = "2025-02-16T04:28:10.944Z" }, + { url = "https://files.pythonhosted.org/packages/42/ec/16475bbb79c1c68845c0a0d9c60c4fb31e61b8a2a20bc18b1a81e81c7f68/pyproj-3.7.1-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:04abc517a8555d1b05fcee768db3280143fe42ec39fdd926a2feef31631a1f2f", size = 9721415, upload-time = "2025-02-16T04:28:13.342Z" }, + { url = "https://files.pythonhosted.org/packages/b3/a3/448f05b15e318bd6bea9a32cfaf11e886c4ae61fa3eee6e09ed5c3b74bb2/pyproj-3.7.1-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:084c0a475688f934d386c2ab3b6ce03398a473cd48adfda70d9ab8f87f2394a0", size = 9556447, upload-time = "2025-02-16T04:28:15.818Z" }, + { url = "https://files.pythonhosted.org/packages/6a/ae/bd15fe8d8bd914ead6d60bca7f895a4e6f8ef7e3928295134ff9a7dad14c/pyproj-3.7.1-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:a20727a23b1e49c7dc7fe3c3df8e56a8a7acdade80ac2f5cca29d7ca5564c145", size = 10758317, upload-time = "2025-02-16T04:28:18.338Z" }, + { url = "https://files.pythonhosted.org/packages/9d/d9/5ccefb8bca925f44256b188a91c31238cae29ab6ee7f53661ecc04616146/pyproj-3.7.1-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:bf84d766646f1ebd706d883755df4370aaf02b48187cedaa7e4239f16bc8213d", size = 10771259, upload-time = "2025-02-16T04:28:20.822Z" }, + { url = "https://files.pythonhosted.org/packages/2a/7d/31dedff9c35fa703162f922eeb0baa6c44a3288469a5fd88d209e2892f9e/pyproj-3.7.1-cp312-cp312-win32.whl", hash = "sha256:5f0da2711364d7cb9f115b52289d4a9b61e8bca0da57f44a3a9d6fc9bdeb7274", size = 5859914, upload-time = "2025-02-16T04:28:23.303Z" }, + { url = "https://files.pythonhosted.org/packages/3e/47/c6ab03d6564a7c937590cff81a2742b5990f096cce7c1a622d325be340ee/pyproj-3.7.1-cp312-cp312-win_amd64.whl", hash = "sha256:aee664a9d806612af30a19dba49e55a7a78ebfec3e9d198f6a6176e1d140ec98", size = 6273196, upload-time = "2025-02-16T04:28:25.227Z" }, +] + +[[package]] +name = "pyrefly" +version = "0.36.0" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/47/67/c5983b8cd002132b9d0e4ecbe096c04bbd86bd6898ee1f3eb63413f0b07e/pyrefly-0.36.0.tar.gz", hash = "sha256:f0fbadae9e6fadbf078eeafaa1c415ec1ede863a621132ecaad45ed316a944a8", size = 1670536, upload-time = "2025-10-06T17:50:28.856Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/52/09/a5ac35332359f1882e83062660db0361034352353a5aad49668148deec4c/pyrefly-0.36.0-py3-none-macosx_10_12_x86_64.whl", hash = "sha256:614d6f6597024f2cbc2bd6c26b9123f6020bb22f0f905b9698f0786131e1afd4", size = 6803144, upload-time = "2025-10-06T17:50:13.38Z" }, + { url = "https://files.pythonhosted.org/packages/4c/ca/1cefd294d57977e1e2711da38dbd2f7636a454adaa8a21227db97f8dc83b/pyrefly-0.36.0-py3-none-macosx_11_0_arm64.whl", hash = "sha256:bdd5ce618ddea4d47982a2223b1f67196fa774cd2e223bfb85b202ad15144afb", size = 6350887, upload-time = "2025-10-06T17:50:15.75Z" }, + { url = "https://files.pythonhosted.org/packages/9e/85/2abcf08a8e663a231394b3ce9ee31257521dde0b53289bf3553ccf960320/pyrefly-0.36.0-py3-none-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:4cabdf10811c4af9d7960a7450a2d59c7e5d0ae1a4c19ffb1bbc7d81c00f4862", size = 6593770, upload-time = "2025-10-06T17:50:17.74Z" }, + { url = "https://files.pythonhosted.org/packages/6e/ca/1b2832a28a64e5145a070143dfaeaf4bb9083fe35b7bf179e0c84a8aa7e9/pyrefly-0.36.0-py3-none-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:222d49512d6c832a2cf661847d8e709a9b00a570f68ece2173bd840128385296", size = 7424514, upload-time = "2025-10-06T17:50:19.744Z" }, + { url = "https://files.pythonhosted.org/packages/73/23/fbcf6e094df39902cbc3733e8edf8e3ac2bb4875b48050e2f514622af54b/pyrefly-0.36.0-py3-none-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:937f79660786dc670bccba55279db347c1f3a51869af50e5602a84f8eeac4688", size = 7079712, upload-time = "2025-10-06T17:50:21.908Z" }, + { url = "https://files.pythonhosted.org/packages/e4/90/62c11f6324dd6d9990e05067758b6380cee7abf4589be8c51bc1786fa504/pyrefly-0.36.0-py3-none-win32.whl", hash = "sha256:f8683df8e3635de882163fdb7c74c643a5f2387a8f1c9f40ef36aba7722d697c", size = 6607047, upload-time = "2025-10-06T17:50:23.923Z" }, + { url = "https://files.pythonhosted.org/packages/d6/a3/44c7764dfab004fc4221ed326b9032ac446f26ce1231169d155007e02697/pyrefly-0.36.0-py3-none-win_amd64.whl", hash = "sha256:3327948305b37efcf7e30db74fd29c9b7e6e6d5d2e16effdb2508d611ff69fca", size = 7034208, upload-time = "2025-10-06T17:50:25.697Z" }, + { url = "https://files.pythonhosted.org/packages/e8/4b/e98f462612e3a335fb7cefb35d581d7fdd8ddce92f050a78b721b5855a19/pyrefly-0.36.0-py3-none-win_arm64.whl", hash = "sha256:e2fcb3a1f27418f71585e2300d8addc1d63aeb77f022cac1edae489f19da7a4b", size = 6626879, upload-time = "2025-10-06T17:50:27.312Z" }, +] + +[[package]] +name = "pyrepl" +version = "0.11.4" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/08/4f/7088417e5465c53a30b918d30542aad89352ea0d635a5d077717c69a7d2b/pyrepl-0.11.4.tar.gz", hash = "sha256:efe988b4a6e5eed587e9769dc2269aeec2b6feec2f5d77995ee85b9ad7cf7063", size = 51089, upload-time = "2025-07-17T22:56:25.42Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/bd/a5/ce97a778f096aaa27cfcb7ad09f1198cf73277dcab6c68a4b8f332d91e48/pyrepl-0.11.4-py3-none-any.whl", hash = "sha256:ac30d6340267a21c39e1b1934f92bca6b8735017d14b17e40f903b2d1563541d", size = 55596, upload-time = "2025-07-17T22:56:24.537Z" }, +] + +[[package]] +name = "pyshp" +version = "2.3.1" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/63/9f/0dd21250c60375a532c35e89fad8d5e8a3f1a2e3f7c389ccc5a60b05263e/pyshp-2.3.1.tar.gz", hash = "sha256:4caec82fd8dd096feba8217858068bacb2a3b5950f43c048c6dc32a3489d5af1", size = 1731544, upload-time = "2022-07-27T19:51:28.409Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/98/2f/68116db5b36b895c0450e3072b8cb6c2fac0359279b182ea97014d3c8ac0/pyshp-2.3.1-py2.py3-none-any.whl", hash = "sha256:67024c0ccdc352ba5db777c4e968483782dfa78f8e200672a90d2d30fd8b7b49", size = 46537, upload-time = "2022-07-27T19:51:26.34Z" }, +] + +[[package]] +name = "pytest" +version = "8.3.5" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "iniconfig", marker = "(platform_machine != 'aarch64' and platform_machine != 'x86_64' and extra == 'extra-10-weathergen-cpu' and extra == 'extra-10-weathergen-gpu') or (platform_machine == 'aarch64' and sys_platform == 'linux') or (platform_machine == 'x86_64' and sys_platform == 'linux') or (sys_platform != 'linux' and extra == 'extra-10-weathergen-cpu' and extra == 'extra-10-weathergen-gpu')" }, + { name = "packaging", marker = "(platform_machine != 'aarch64' and platform_machine != 'x86_64' and extra == 'extra-10-weathergen-cpu' and extra == 'extra-10-weathergen-gpu') or (platform_machine == 'aarch64' and sys_platform == 'linux') or (platform_machine == 'x86_64' and sys_platform == 'linux') or (sys_platform != 'linux' and extra == 'extra-10-weathergen-cpu' and extra == 'extra-10-weathergen-gpu')" }, + { name = "pluggy", marker = "(platform_machine != 'aarch64' and platform_machine != 'x86_64' and extra == 'extra-10-weathergen-cpu' and extra == 'extra-10-weathergen-gpu') or (platform_machine == 'aarch64' and sys_platform == 'linux') or (platform_machine == 'x86_64' and sys_platform == 'linux') or (sys_platform != 'linux' and extra == 'extra-10-weathergen-cpu' and extra == 'extra-10-weathergen-gpu')" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/ae/3c/c9d525a414d506893f0cd8a8d0de7706446213181570cdbd766691164e40/pytest-8.3.5.tar.gz", hash = "sha256:f4efe70cc14e511565ac476b57c279e12a855b11f48f212af1080ef2263d3845", size = 1450891, upload-time = "2025-03-02T12:54:54.503Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/30/3d/64ad57c803f1fa1e963a7946b6e0fea4a70df53c1a7fed304586539c2bac/pytest-8.3.5-py3-none-any.whl", hash = "sha256:c69214aa47deac29fad6c2a4f590b9c4a9fdb16a403176fe154b79c0b4d4d820", size = 343634, upload-time = "2025-03-02T12:54:52.069Z" }, +] + +[[package]] +name = "pytest-mock" +version = "3.14.1" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "pytest", marker = "(platform_machine != 'aarch64' and platform_machine != 'x86_64' and extra == 'extra-10-weathergen-cpu' and extra == 'extra-10-weathergen-gpu') or (platform_machine == 'aarch64' and sys_platform == 'linux') or (platform_machine == 'x86_64' and sys_platform == 'linux') or (sys_platform != 'linux' and extra == 'extra-10-weathergen-cpu' and extra == 'extra-10-weathergen-gpu')" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/71/28/67172c96ba684058a4d24ffe144d64783d2a270d0af0d9e792737bddc75c/pytest_mock-3.14.1.tar.gz", hash = "sha256:159e9edac4c451ce77a5cdb9fc5d1100708d2dd4ba3c3df572f14097351af80e", size = 33241, upload-time = "2025-05-26T13:58:45.167Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/b2/05/77b60e520511c53d1c1ca75f1930c7dd8e971d0c4379b7f4b3f9644685ba/pytest_mock-3.14.1-py3-none-any.whl", hash = "sha256:178aefcd11307d874b4cd3100344e7e2d888d9791a6a1d9bfe90fbc1b74fd1d0", size = 9923, upload-time = "2025-05-26T13:58:43.487Z" }, +] + +[[package]] +name = "python-dateutil" +version = "2.9.0.post0" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "six", marker = "(platform_machine != 'aarch64' and platform_machine != 'x86_64' and extra == 'extra-10-weathergen-cpu' and extra == 'extra-10-weathergen-gpu') or (platform_machine == 'aarch64' and sys_platform == 'linux') or (platform_machine == 'x86_64' and sys_platform == 'linux') or (sys_platform != 'linux' and extra == 'extra-10-weathergen-cpu' and extra == 'extra-10-weathergen-gpu')" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/66/c0/0c8b6ad9f17a802ee498c46e004a0eb49bc148f2fd230864601a86dcf6db/python-dateutil-2.9.0.post0.tar.gz", hash = "sha256:37dd54208da7e1cd875388217d5e00ebd4179249f90fb72437e91a35459a0ad3", size = 342432, upload-time = "2024-03-01T18:36:20.211Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/ec/57/56b9bcc3c9c6a792fcbaf139543cee77261f3651ca9da0c93f5c1221264b/python_dateutil-2.9.0.post0-py2.py3-none-any.whl", hash = "sha256:a8b2bc7bffae282281c8140a97d3aa9c14da0b136dfe83f850eea9a5f7470427", size = 229892, upload-time = "2024-03-01T18:36:18.57Z" }, +] + +[[package]] +name = "python-dotenv" +version = "1.1.1" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/f6/b0/4bc07ccd3572a2f9df7e6782f52b0c6c90dcbb803ac4a167702d7d0dfe1e/python_dotenv-1.1.1.tar.gz", hash = "sha256:a8a6399716257f45be6a007360200409fce5cda2661e3dec71d23dc15f6189ab", size = 41978, upload-time = "2025-06-24T04:21:07.341Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/5f/ed/539768cf28c661b5b068d66d96a2f155c4971a5d55684a514c1a0e0dec2f/python_dotenv-1.1.1-py3-none-any.whl", hash = "sha256:31f23644fe2602f88ff55e1f5c79ba497e01224ee7737937930c448e4d0e24dc", size = 20556, upload-time = "2025-06-24T04:21:06.073Z" }, +] + +[[package]] +name = "pytz" +version = "2025.2" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/f8/bf/abbd3cdfb8fbc7fb3d4d38d320f2441b1e7cbe29be4f23797b4a2b5d8aac/pytz-2025.2.tar.gz", hash = "sha256:360b9e3dbb49a209c21ad61809c7fb453643e048b38924c765813546746e81c3", size = 320884, upload-time = "2025-03-25T02:25:00.538Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/81/c4/34e93fe5f5429d7570ec1fa436f1986fb1f00c3e0f43a589fe2bbcd22c3f/pytz-2025.2-py2.py3-none-any.whl", hash = "sha256:5ddf76296dd8c44c26eb8f4b6f35488f3ccbf6fbbd7adee0b7262d43f0ec2f00", size = 509225, upload-time = "2025-03-25T02:24:58.468Z" }, +] + +[[package]] +name = "pyviz-comms" +version = "3.0.6" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "param", marker = "(platform_machine != 'aarch64' and platform_machine != 'x86_64' and extra == 'extra-10-weathergen-cpu' and extra == 'extra-10-weathergen-gpu') or (platform_machine == 'aarch64' and sys_platform == 'linux') or (platform_machine == 'x86_64' and sys_platform == 'linux') or (sys_platform != 'linux' and extra == 'extra-10-weathergen-cpu' and extra == 'extra-10-weathergen-gpu')" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/da/ee/2b5367b911bab506662abffe6f342101a9b3edacee91ff9afe62db5fe9a7/pyviz_comms-3.0.6.tar.gz", hash = "sha256:73d66b620390d97959b2c4d8a2c0778d41fe20581be4717f01e46b8fae8c5695", size = 197772, upload-time = "2025-06-20T16:50:30.97Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/13/5a/f8c0868199bbb231a02616286ce8a4ccb85f5387b9215510297dcfedd214/pyviz_comms-3.0.6-py3-none-any.whl", hash = "sha256:4eba6238cd4a7f4add2d11879ce55411785b7d38a7c5dba42c7a0826ca53e6c2", size = 84275, upload-time = "2025-06-20T16:50:28.826Z" }, +] + +[[package]] +name = "pyyaml" +version = "6.0.2" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/54/ed/79a089b6be93607fa5cdaedf301d7dfb23af5f25c398d5ead2525b063e17/pyyaml-6.0.2.tar.gz", hash = "sha256:d584d9ec91ad65861cc08d42e834324ef890a082e591037abe114850ff7bbc3e", size = 130631, upload-time = "2024-08-06T20:33:50.674Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/86/0c/c581167fc46d6d6d7ddcfb8c843a4de25bdd27e4466938109ca68492292c/PyYAML-6.0.2-cp312-cp312-macosx_10_9_x86_64.whl", hash = "sha256:c70c95198c015b85feafc136515252a261a84561b7b1d51e3384e0655ddf25ab", size = 183873, upload-time = "2024-08-06T20:32:25.131Z" }, + { url = "https://files.pythonhosted.org/packages/a8/0c/38374f5bb272c051e2a69281d71cba6fdb983413e6758b84482905e29a5d/PyYAML-6.0.2-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:ce826d6ef20b1bc864f0a68340c8b3287705cae2f8b4b1d932177dcc76721725", size = 173302, upload-time = "2024-08-06T20:32:26.511Z" }, + { url = "https://files.pythonhosted.org/packages/c3/93/9916574aa8c00aa06bbac729972eb1071d002b8e158bd0e83a3b9a20a1f7/PyYAML-6.0.2-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:1f71ea527786de97d1a0cc0eacd1defc0985dcf6b3f17bb77dcfc8c34bec4dc5", size = 739154, upload-time = "2024-08-06T20:32:28.363Z" }, + { url = "https://files.pythonhosted.org/packages/95/0f/b8938f1cbd09739c6da569d172531567dbcc9789e0029aa070856f123984/PyYAML-6.0.2-cp312-cp312-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:9b22676e8097e9e22e36d6b7bda33190d0d400f345f23d4065d48f4ca7ae0425", size = 766223, upload-time = "2024-08-06T20:32:30.058Z" }, + { url = "https://files.pythonhosted.org/packages/b9/2b/614b4752f2e127db5cc206abc23a8c19678e92b23c3db30fc86ab731d3bd/PyYAML-6.0.2-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:80bab7bfc629882493af4aa31a4cfa43a4c57c83813253626916b8c7ada83476", size = 767542, upload-time = "2024-08-06T20:32:31.881Z" }, + { url = "https://files.pythonhosted.org/packages/d4/00/dd137d5bcc7efea1836d6264f049359861cf548469d18da90cd8216cf05f/PyYAML-6.0.2-cp312-cp312-musllinux_1_1_aarch64.whl", hash = "sha256:0833f8694549e586547b576dcfaba4a6b55b9e96098b36cdc7ebefe667dfed48", size = 731164, upload-time = "2024-08-06T20:32:37.083Z" }, + { url = "https://files.pythonhosted.org/packages/c9/1f/4f998c900485e5c0ef43838363ba4a9723ac0ad73a9dc42068b12aaba4e4/PyYAML-6.0.2-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:8b9c7197f7cb2738065c481a0461e50ad02f18c78cd75775628afb4d7137fb3b", size = 756611, upload-time = "2024-08-06T20:32:38.898Z" }, + { url = "https://files.pythonhosted.org/packages/df/d1/f5a275fdb252768b7a11ec63585bc38d0e87c9e05668a139fea92b80634c/PyYAML-6.0.2-cp312-cp312-win32.whl", hash = "sha256:ef6107725bd54b262d6dedcc2af448a266975032bc85ef0172c5f059da6325b4", size = 140591, upload-time = "2024-08-06T20:32:40.241Z" }, + { url = "https://files.pythonhosted.org/packages/0c/e8/4f648c598b17c3d06e8753d7d13d57542b30d56e6c2dedf9c331ae56312e/PyYAML-6.0.2-cp312-cp312-win_amd64.whl", hash = "sha256:7e7401d0de89a9a855c839bc697c079a4af81cf878373abd7dc625847d25cbd8", size = 156338, upload-time = "2024-08-06T20:32:41.93Z" }, +] + +[[package]] +name = "pyzmq" +version = "27.0.0" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "cffi", marker = "(implementation_name == 'pypy' and platform_machine == 'aarch64' and sys_platform == 'linux') or (implementation_name == 'pypy' and platform_machine == 'x86_64' and sys_platform == 'linux') or (implementation_name != 'pypy' and platform_machine == 'aarch64' and extra == 'extra-10-weathergen-cpu' and extra == 'extra-10-weathergen-gpu') or (implementation_name != 'pypy' and platform_machine == 'x86_64' and extra == 'extra-10-weathergen-cpu' and extra == 'extra-10-weathergen-gpu') or (platform_machine != 'aarch64' and platform_machine != 'x86_64' and extra == 'extra-10-weathergen-cpu' and extra == 'extra-10-weathergen-gpu') or (sys_platform != 'linux' and extra == 'extra-10-weathergen-cpu' and extra == 'extra-10-weathergen-gpu')" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/f1/06/50a4e9648b3e8b992bef8eb632e457307553a89d294103213cfd47b3da69/pyzmq-27.0.0.tar.gz", hash = "sha256:b1f08eeb9ce1510e6939b6e5dcd46a17765e2333daae78ecf4606808442e52cf", size = 280478, upload-time = "2025-06-13T14:09:07.087Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/93/a7/9ad68f55b8834ede477842214feba6a4c786d936c022a67625497aacf61d/pyzmq-27.0.0-cp312-abi3-macosx_10_15_universal2.whl", hash = "sha256:cbabc59dcfaac66655c040dfcb8118f133fb5dde185e5fc152628354c1598e52", size = 1305438, upload-time = "2025-06-13T14:07:31.676Z" }, + { url = "https://files.pythonhosted.org/packages/ba/ee/26aa0f98665a22bc90ebe12dced1de5f3eaca05363b717f6fb229b3421b3/pyzmq-27.0.0-cp312-abi3-manylinux2014_i686.manylinux_2_17_i686.whl", hash = "sha256:cb0ac5179cba4b2f94f1aa208fbb77b62c4c9bf24dd446278b8b602cf85fcda3", size = 895095, upload-time = "2025-06-13T14:07:33.104Z" }, + { url = "https://files.pythonhosted.org/packages/cf/85/c57e7ab216ecd8aa4cc7e3b83b06cc4e9cf45c87b0afc095f10cd5ce87c1/pyzmq-27.0.0-cp312-abi3-manylinux_2_27_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:53a48f0228eab6cbf69fde3aa3c03cbe04e50e623ef92ae395fce47ef8a76152", size = 651826, upload-time = "2025-06-13T14:07:34.831Z" }, + { url = "https://files.pythonhosted.org/packages/69/9a/9ea7e230feda9400fb0ae0d61d7d6ddda635e718d941c44eeab22a179d34/pyzmq-27.0.0-cp312-abi3-manylinux_2_27_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:111db5f395e09f7e775f759d598f43cb815fc58e0147623c4816486e1a39dc22", size = 839750, upload-time = "2025-06-13T14:07:36.553Z" }, + { url = "https://files.pythonhosted.org/packages/08/66/4cebfbe71f3dfbd417011daca267539f62ed0fbc68105357b68bbb1a25b7/pyzmq-27.0.0-cp312-abi3-musllinux_1_2_aarch64.whl", hash = "sha256:c8878011653dcdc27cc2c57e04ff96f0471e797f5c19ac3d7813a245bcb24371", size = 1641357, upload-time = "2025-06-13T14:07:38.21Z" }, + { url = "https://files.pythonhosted.org/packages/ac/f6/b0f62578c08d2471c791287149cb8c2aaea414ae98c6e995c7dbe008adfb/pyzmq-27.0.0-cp312-abi3-musllinux_1_2_i686.whl", hash = "sha256:c0ed2c1f335ba55b5fdc964622254917d6b782311c50e138863eda409fbb3b6d", size = 2020281, upload-time = "2025-06-13T14:07:39.599Z" }, + { url = "https://files.pythonhosted.org/packages/37/b9/4f670b15c7498495da9159edc374ec09c88a86d9cd5a47d892f69df23450/pyzmq-27.0.0-cp312-abi3-musllinux_1_2_x86_64.whl", hash = "sha256:e918d70862d4cfd4b1c187310015646a14e1f5917922ab45b29f28f345eeb6be", size = 1877110, upload-time = "2025-06-13T14:07:41.027Z" }, + { url = "https://files.pythonhosted.org/packages/66/31/9dee25c226295b740609f0d46db2fe972b23b6f5cf786360980524a3ba92/pyzmq-27.0.0-cp312-abi3-win32.whl", hash = "sha256:88b4e43cab04c3c0f0d55df3b1eef62df2b629a1a369b5289a58f6fa8b07c4f4", size = 559297, upload-time = "2025-06-13T14:07:42.533Z" }, + { url = "https://files.pythonhosted.org/packages/9b/12/52da5509800f7ff2d287b2f2b4e636e7ea0f001181cba6964ff6c1537778/pyzmq-27.0.0-cp312-abi3-win_amd64.whl", hash = "sha256:dce4199bf5f648a902ce37e7b3afa286f305cd2ef7a8b6ec907470ccb6c8b371", size = 619203, upload-time = "2025-06-13T14:07:43.843Z" }, + { url = "https://files.pythonhosted.org/packages/93/6d/7f2e53b19d1edb1eb4f09ec7c3a1f945ca0aac272099eab757d15699202b/pyzmq-27.0.0-cp312-abi3-win_arm64.whl", hash = "sha256:56e46bbb85d52c1072b3f809cc1ce77251d560bc036d3a312b96db1afe76db2e", size = 551927, upload-time = "2025-06-13T14:07:45.51Z" }, +] + +[[package]] +name = "referencing" +version = "0.36.2" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "attrs", marker = "(platform_machine != 'aarch64' and platform_machine != 'x86_64' and extra == 'extra-10-weathergen-cpu' and extra == 'extra-10-weathergen-gpu') or (platform_machine == 'aarch64' and sys_platform == 'linux') or (platform_machine == 'x86_64' and sys_platform == 'linux') or (sys_platform != 'linux' and extra == 'extra-10-weathergen-cpu' and extra == 'extra-10-weathergen-gpu')" }, + { name = "rpds-py", marker = "(platform_machine != 'aarch64' and platform_machine != 'x86_64' and extra == 'extra-10-weathergen-cpu' and extra == 'extra-10-weathergen-gpu') or (platform_machine == 'aarch64' and sys_platform == 'linux') or (platform_machine == 'x86_64' and sys_platform == 'linux') or (sys_platform != 'linux' and extra == 'extra-10-weathergen-cpu' and extra == 'extra-10-weathergen-gpu')" }, + { name = "typing-extensions", marker = "(platform_machine != 'aarch64' and platform_machine != 'x86_64' and extra == 'extra-10-weathergen-cpu' and extra == 'extra-10-weathergen-gpu') or (platform_machine == 'aarch64' and sys_platform == 'linux') or (platform_machine == 'x86_64' and sys_platform == 'linux') or (sys_platform != 'linux' and extra == 'extra-10-weathergen-cpu' and extra == 'extra-10-weathergen-gpu')" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/2f/db/98b5c277be99dd18bfd91dd04e1b759cad18d1a338188c936e92f921c7e2/referencing-0.36.2.tar.gz", hash = "sha256:df2e89862cd09deabbdba16944cc3f10feb6b3e6f18e902f7cc25609a34775aa", size = 74744, upload-time = "2025-01-25T08:48:16.138Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/c1/b1/3baf80dc6d2b7bc27a95a67752d0208e410351e3feb4eb78de5f77454d8d/referencing-0.36.2-py3-none-any.whl", hash = "sha256:e8699adbbf8b5c7de96d8ffa0eb5c158b3beafce084968e2ea8bb08c6794dcd0", size = 26775, upload-time = "2025-01-25T08:48:14.241Z" }, +] + +[[package]] +name = "requests" +version = "2.32.4" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "certifi", marker = "(platform_machine != 'aarch64' and platform_machine != 'x86_64' and extra == 'extra-10-weathergen-cpu' and extra == 'extra-10-weathergen-gpu') or (platform_machine == 'aarch64' and sys_platform == 'linux') or (platform_machine == 'x86_64' and sys_platform == 'linux') or (sys_platform != 'linux' and extra == 'extra-10-weathergen-cpu' and extra == 'extra-10-weathergen-gpu')" }, + { name = "charset-normalizer", marker = "(platform_machine != 'aarch64' and platform_machine != 'x86_64' and extra == 'extra-10-weathergen-cpu' and extra == 'extra-10-weathergen-gpu') or (platform_machine == 'aarch64' and sys_platform == 'linux') or (platform_machine == 'x86_64' and sys_platform == 'linux') or (sys_platform != 'linux' and extra == 'extra-10-weathergen-cpu' and extra == 'extra-10-weathergen-gpu')" }, + { name = "idna", marker = "(platform_machine != 'aarch64' and platform_machine != 'x86_64' and extra == 'extra-10-weathergen-cpu' and extra == 'extra-10-weathergen-gpu') or (platform_machine == 'aarch64' and sys_platform == 'linux') or (platform_machine == 'x86_64' and sys_platform == 'linux') or (sys_platform != 'linux' and extra == 'extra-10-weathergen-cpu' and extra == 'extra-10-weathergen-gpu')" }, + { name = "urllib3", marker = "(platform_machine != 'aarch64' and platform_machine != 'x86_64' and extra == 'extra-10-weathergen-cpu' and extra == 'extra-10-weathergen-gpu') or (platform_machine == 'aarch64' and sys_platform == 'linux') or (platform_machine == 'x86_64' and sys_platform == 'linux') or (sys_platform != 'linux' and extra == 'extra-10-weathergen-cpu' and extra == 'extra-10-weathergen-gpu')" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/e1/0a/929373653770d8a0d7ea76c37de6e41f11eb07559b103b1c02cafb3f7cf8/requests-2.32.4.tar.gz", hash = "sha256:27d0316682c8a29834d3264820024b62a36942083d52caf2f14c0591336d3422", size = 135258, upload-time = "2025-06-09T16:43:07.34Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/7c/e4/56027c4a6b4ae70ca9de302488c5ca95ad4a39e190093d6c1a8ace08341b/requests-2.32.4-py3-none-any.whl", hash = "sha256:27babd3cda2a6d50b30443204ee89830707d396671944c998b5975b031ac2b2c", size = 64847, upload-time = "2025-06-09T16:43:05.728Z" }, +] + +[[package]] +name = "rpds-py" +version = "0.25.1" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/8c/a6/60184b7fc00dd3ca80ac635dd5b8577d444c57e8e8742cecabfacb829921/rpds_py-0.25.1.tar.gz", hash = "sha256:8960b6dac09b62dac26e75d7e2c4a22efb835d827a7278c34f72b2b84fa160e3", size = 27304, upload-time = "2025-05-21T12:46:12.502Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/7f/81/28ab0408391b1dc57393653b6a0cf2014cc282cc2909e4615e63e58262be/rpds_py-0.25.1-cp312-cp312-macosx_10_12_x86_64.whl", hash = "sha256:b5ffe453cde61f73fea9430223c81d29e2fbf412a6073951102146c84e19e34c", size = 364647, upload-time = "2025-05-21T12:43:28.559Z" }, + { url = "https://files.pythonhosted.org/packages/2c/9a/7797f04cad0d5e56310e1238434f71fc6939d0bc517192a18bb99a72a95f/rpds_py-0.25.1-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:115874ae5e2fdcfc16b2aedc95b5eef4aebe91b28e7e21951eda8a5dc0d3461b", size = 350454, upload-time = "2025-05-21T12:43:30.615Z" }, + { url = "https://files.pythonhosted.org/packages/69/3c/93d2ef941b04898011e5d6eaa56a1acf46a3b4c9f4b3ad1bbcbafa0bee1f/rpds_py-0.25.1-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:a714bf6e5e81b0e570d01f56e0c89c6375101b8463999ead3a93a5d2a4af91fa", size = 389665, upload-time = "2025-05-21T12:43:32.629Z" }, + { url = "https://files.pythonhosted.org/packages/c1/57/ad0e31e928751dde8903a11102559628d24173428a0f85e25e187defb2c1/rpds_py-0.25.1-cp312-cp312-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:35634369325906bcd01577da4c19e3b9541a15e99f31e91a02d010816b49bfda", size = 403873, upload-time = "2025-05-21T12:43:34.576Z" }, + { url = "https://files.pythonhosted.org/packages/16/ad/c0c652fa9bba778b4f54980a02962748479dc09632e1fd34e5282cf2556c/rpds_py-0.25.1-cp312-cp312-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:d4cb2b3ddc16710548801c6fcc0cfcdeeff9dafbc983f77265877793f2660309", size = 525866, upload-time = "2025-05-21T12:43:36.123Z" }, + { url = "https://files.pythonhosted.org/packages/2a/39/3e1839bc527e6fcf48d5fec4770070f872cdee6c6fbc9b259932f4e88a38/rpds_py-0.25.1-cp312-cp312-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:9ceca1cf097ed77e1a51f1dbc8d174d10cb5931c188a4505ff9f3e119dfe519b", size = 416886, upload-time = "2025-05-21T12:43:38.034Z" }, + { url = "https://files.pythonhosted.org/packages/7a/95/dd6b91cd4560da41df9d7030a038298a67d24f8ca38e150562644c829c48/rpds_py-0.25.1-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:2c2cd1a4b0c2b8c5e31ffff50d09f39906fe351389ba143c195566056c13a7ea", size = 390666, upload-time = "2025-05-21T12:43:40.065Z" }, + { url = "https://files.pythonhosted.org/packages/64/48/1be88a820e7494ce0a15c2d390ccb7c52212370badabf128e6a7bb4cb802/rpds_py-0.25.1-cp312-cp312-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:1de336a4b164c9188cb23f3703adb74a7623ab32d20090d0e9bf499a2203ad65", size = 425109, upload-time = "2025-05-21T12:43:42.263Z" }, + { url = "https://files.pythonhosted.org/packages/cf/07/3e2a17927ef6d7720b9949ec1b37d1e963b829ad0387f7af18d923d5cfa5/rpds_py-0.25.1-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:9fca84a15333e925dd59ce01da0ffe2ffe0d6e5d29a9eeba2148916d1824948c", size = 567244, upload-time = "2025-05-21T12:43:43.846Z" }, + { url = "https://files.pythonhosted.org/packages/d2/e5/76cf010998deccc4f95305d827847e2eae9c568099c06b405cf96384762b/rpds_py-0.25.1-cp312-cp312-musllinux_1_2_i686.whl", hash = "sha256:88ec04afe0c59fa64e2f6ea0dd9657e04fc83e38de90f6de201954b4d4eb59bd", size = 596023, upload-time = "2025-05-21T12:43:45.932Z" }, + { url = "https://files.pythonhosted.org/packages/52/9a/df55efd84403736ba37a5a6377b70aad0fd1cb469a9109ee8a1e21299a1c/rpds_py-0.25.1-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:a8bd2f19e312ce3e1d2c635618e8a8d8132892bb746a7cf74780a489f0f6cdcb", size = 561634, upload-time = "2025-05-21T12:43:48.263Z" }, + { url = "https://files.pythonhosted.org/packages/ab/aa/dc3620dd8db84454aaf9374bd318f1aa02578bba5e567f5bf6b79492aca4/rpds_py-0.25.1-cp312-cp312-win32.whl", hash = "sha256:e5e2f7280d8d0d3ef06f3ec1b4fd598d386cc6f0721e54f09109a8132182fbfe", size = 222713, upload-time = "2025-05-21T12:43:49.897Z" }, + { url = "https://files.pythonhosted.org/packages/a3/7f/7cef485269a50ed5b4e9bae145f512d2a111ca638ae70cc101f661b4defd/rpds_py-0.25.1-cp312-cp312-win_amd64.whl", hash = "sha256:db58483f71c5db67d643857404da360dce3573031586034b7d59f245144cc192", size = 235280, upload-time = "2025-05-21T12:43:51.893Z" }, + { url = "https://files.pythonhosted.org/packages/99/f2/c2d64f6564f32af913bf5f3f7ae41c7c263c5ae4c4e8f1a17af8af66cd46/rpds_py-0.25.1-cp312-cp312-win_arm64.whl", hash = "sha256:6d50841c425d16faf3206ddbba44c21aa3310a0cebc3c1cdfc3e3f4f9f6f5728", size = 225399, upload-time = "2025-05-21T12:43:53.351Z" }, +] + +[[package]] +name = "rsa" +version = "4.9.1" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "pyasn1", marker = "(platform_machine != 'aarch64' and platform_machine != 'x86_64' and extra == 'extra-10-weathergen-cpu' and extra == 'extra-10-weathergen-gpu') or (platform_machine == 'aarch64' and sys_platform == 'linux') or (platform_machine == 'x86_64' and sys_platform == 'linux') or (sys_platform != 'linux' and extra == 'extra-10-weathergen-cpu' and extra == 'extra-10-weathergen-gpu')" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/da/8a/22b7beea3ee0d44b1916c0c1cb0ee3af23b700b6da9f04991899d0c555d4/rsa-4.9.1.tar.gz", hash = "sha256:e7bdbfdb5497da4c07dfd35530e1a902659db6ff241e39d9953cad06ebd0ae75", size = 29034, upload-time = "2025-04-16T09:51:18.218Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/64/8d/0133e4eb4beed9e425d9a98ed6e081a55d195481b7632472be1af08d2f6b/rsa-4.9.1-py3-none-any.whl", hash = "sha256:68635866661c6836b8d39430f97a996acbd61bfa49406748ea243539fe239762", size = 34696, upload-time = "2025-04-16T09:51:17.142Z" }, +] + +[[package]] +name = "ruff" +version = "0.9.7" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/39/8b/a86c300359861b186f18359adf4437ac8e4c52e42daa9eedc731ef9d5b53/ruff-0.9.7.tar.gz", hash = "sha256:643757633417907510157b206e490c3aa11cab0c087c912f60e07fbafa87a4c6", size = 3669813, upload-time = "2025-02-20T13:26:52.111Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/b1/f3/3a1d22973291226df4b4e2ff70196b926b6f910c488479adb0eeb42a0d7f/ruff-0.9.7-py3-none-linux_armv6l.whl", hash = "sha256:99d50def47305fe6f233eb8dabfd60047578ca87c9dcb235c9723ab1175180f4", size = 11774588, upload-time = "2025-02-20T13:25:52.253Z" }, + { url = "https://files.pythonhosted.org/packages/8e/c9/b881f4157b9b884f2994fd08ee92ae3663fb24e34b0372ac3af999aa7fc6/ruff-0.9.7-py3-none-macosx_10_12_x86_64.whl", hash = "sha256:d59105ae9c44152c3d40a9c40d6331a7acd1cdf5ef404fbe31178a77b174ea66", size = 11746848, upload-time = "2025-02-20T13:25:57.279Z" }, + { url = "https://files.pythonhosted.org/packages/14/89/2f546c133f73886ed50a3d449e6bf4af27d92d2f960a43a93d89353f0945/ruff-0.9.7-py3-none-macosx_11_0_arm64.whl", hash = "sha256:f313b5800483770bd540cddac7c90fc46f895f427b7820f18fe1822697f1fec9", size = 11177525, upload-time = "2025-02-20T13:26:00.007Z" }, + { url = "https://files.pythonhosted.org/packages/d7/93/6b98f2c12bf28ab9def59c50c9c49508519c5b5cfecca6de871cf01237f6/ruff-0.9.7-py3-none-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:042ae32b41343888f59c0a4148f103208bf6b21c90118d51dc93a68366f4e903", size = 11996580, upload-time = "2025-02-20T13:26:03.274Z" }, + { url = "https://files.pythonhosted.org/packages/8e/3f/b3fcaf4f6d875e679ac2b71a72f6691a8128ea3cb7be07cbb249f477c061/ruff-0.9.7-py3-none-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:87862589373b33cc484b10831004e5e5ec47dc10d2b41ba770e837d4f429d721", size = 11525674, upload-time = "2025-02-20T13:26:06.073Z" }, + { url = "https://files.pythonhosted.org/packages/f0/48/33fbf18defb74d624535d5d22adcb09a64c9bbabfa755bc666189a6b2210/ruff-0.9.7-py3-none-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:a17e1e01bee0926d351a1ee9bc15c445beae888f90069a6192a07a84af544b6b", size = 12739151, upload-time = "2025-02-20T13:26:08.964Z" }, + { url = "https://files.pythonhosted.org/packages/63/b5/7e161080c5e19fa69495cbab7c00975ef8a90f3679caa6164921d7f52f4a/ruff-0.9.7-py3-none-manylinux_2_17_ppc64.manylinux2014_ppc64.whl", hash = "sha256:7c1f880ac5b2cbebd58b8ebde57069a374865c73f3bf41f05fe7a179c1c8ef22", size = 13416128, upload-time = "2025-02-20T13:26:12.54Z" }, + { url = "https://files.pythonhosted.org/packages/4e/c8/b5e7d61fb1c1b26f271ac301ff6d9de5e4d9a9a63f67d732fa8f200f0c88/ruff-0.9.7-py3-none-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:e63fc20143c291cab2841dbb8260e96bafbe1ba13fd3d60d28be2c71e312da49", size = 12870858, upload-time = "2025-02-20T13:26:16.794Z" }, + { url = "https://files.pythonhosted.org/packages/da/cb/2a1a8e4e291a54d28259f8fc6a674cd5b8833e93852c7ef5de436d6ed729/ruff-0.9.7-py3-none-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:91ff963baed3e9a6a4eba2a02f4ca8eaa6eba1cc0521aec0987da8d62f53cbef", size = 14786046, upload-time = "2025-02-20T13:26:19.85Z" }, + { url = "https://files.pythonhosted.org/packages/ca/6c/c8f8a313be1943f333f376d79724260da5701426c0905762e3ddb389e3f4/ruff-0.9.7-py3-none-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:88362e3227c82f63eaebf0b2eff5b88990280fb1ecf7105523883ba8c3aaf6fb", size = 12550834, upload-time = "2025-02-20T13:26:23.082Z" }, + { url = "https://files.pythonhosted.org/packages/9d/ad/f70cf5e8e7c52a25e166bdc84c082163c9c6f82a073f654c321b4dff9660/ruff-0.9.7-py3-none-musllinux_1_2_aarch64.whl", hash = "sha256:0372c5a90349f00212270421fe91874b866fd3626eb3b397ede06cd385f6f7e0", size = 11961307, upload-time = "2025-02-20T13:26:26.738Z" }, + { url = "https://files.pythonhosted.org/packages/52/d5/4f303ea94a5f4f454daf4d02671b1fbfe2a318b5fcd009f957466f936c50/ruff-0.9.7-py3-none-musllinux_1_2_armv7l.whl", hash = "sha256:d76b8ab60e99e6424cd9d3d923274a1324aefce04f8ea537136b8398bbae0a62", size = 11612039, upload-time = "2025-02-20T13:26:30.26Z" }, + { url = "https://files.pythonhosted.org/packages/eb/c8/bd12a23a75603c704ce86723be0648ba3d4ecc2af07eecd2e9fa112f7e19/ruff-0.9.7-py3-none-musllinux_1_2_i686.whl", hash = "sha256:0c439bdfc8983e1336577f00e09a4e7a78944fe01e4ea7fe616d00c3ec69a3d0", size = 12168177, upload-time = "2025-02-20T13:26:33.452Z" }, + { url = "https://files.pythonhosted.org/packages/cc/57/d648d4f73400fef047d62d464d1a14591f2e6b3d4a15e93e23a53c20705d/ruff-0.9.7-py3-none-musllinux_1_2_x86_64.whl", hash = "sha256:115d1f15e8fdd445a7b4dc9a30abae22de3f6bcabeb503964904471691ef7606", size = 12610122, upload-time = "2025-02-20T13:26:37.365Z" }, + { url = "https://files.pythonhosted.org/packages/49/79/acbc1edd03ac0e2a04ae2593555dbc9990b34090a9729a0c4c0cf20fb595/ruff-0.9.7-py3-none-win32.whl", hash = "sha256:e9ece95b7de5923cbf38893f066ed2872be2f2f477ba94f826c8defdd6ec6b7d", size = 9988751, upload-time = "2025-02-20T13:26:40.366Z" }, + { url = "https://files.pythonhosted.org/packages/6d/95/67153a838c6b6ba7a2401241fd8a00cd8c627a8e4a0491b8d853dedeffe0/ruff-0.9.7-py3-none-win_amd64.whl", hash = "sha256:3770fe52b9d691a15f0b87ada29c45324b2ace8f01200fb0c14845e499eb0c2c", size = 11002987, upload-time = "2025-02-20T13:26:43.762Z" }, + { url = "https://files.pythonhosted.org/packages/63/6a/aca01554949f3a401991dc32fe22837baeaccb8a0d868256cbb26a029778/ruff-0.9.7-py3-none-win_arm64.whl", hash = "sha256:b075a700b2533feb7a01130ff656a4ec0d5f340bb540ad98759b8401c32c2037", size = 10177763, upload-time = "2025-02-20T13:26:48.92Z" }, +] + +[[package]] +name = "scipy" +version = "1.15.3" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "numpy", marker = "(platform_machine != 'aarch64' and platform_machine != 'x86_64' and extra == 'extra-10-weathergen-cpu' and extra == 'extra-10-weathergen-gpu') or (platform_machine == 'aarch64' and sys_platform == 'linux') or (platform_machine == 'x86_64' and sys_platform == 'linux') or (sys_platform != 'linux' and extra == 'extra-10-weathergen-cpu' and extra == 'extra-10-weathergen-gpu')" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/0f/37/6964b830433e654ec7485e45a00fc9a27cf868d622838f6b6d9c5ec0d532/scipy-1.15.3.tar.gz", hash = "sha256:eae3cf522bc7df64b42cad3925c876e1b0b6c35c1337c93e12c0f366f55b0eaf", size = 59419214, upload-time = "2025-05-08T16:13:05.955Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/37/4b/683aa044c4162e10ed7a7ea30527f2cbd92e6999c10a8ed8edb253836e9c/scipy-1.15.3-cp312-cp312-macosx_10_13_x86_64.whl", hash = "sha256:6ac6310fdbfb7aa6612408bd2f07295bcbd3fda00d2d702178434751fe48e019", size = 38766735, upload-time = "2025-05-08T16:06:06.471Z" }, + { url = "https://files.pythonhosted.org/packages/7b/7e/f30be3d03de07f25dc0ec926d1681fed5c732d759ac8f51079708c79e680/scipy-1.15.3-cp312-cp312-macosx_12_0_arm64.whl", hash = "sha256:185cd3d6d05ca4b44a8f1595af87f9c372bb6acf9c808e99aa3e9aa03bd98cf6", size = 30173284, upload-time = "2025-05-08T16:06:11.686Z" }, + { url = "https://files.pythonhosted.org/packages/07/9c/0ddb0d0abdabe0d181c1793db51f02cd59e4901da6f9f7848e1f96759f0d/scipy-1.15.3-cp312-cp312-macosx_14_0_arm64.whl", hash = "sha256:05dc6abcd105e1a29f95eada46d4a3f251743cfd7d3ae8ddb4088047f24ea477", size = 22446958, upload-time = "2025-05-08T16:06:15.97Z" }, + { url = "https://files.pythonhosted.org/packages/af/43/0bce905a965f36c58ff80d8bea33f1f9351b05fad4beaad4eae34699b7a1/scipy-1.15.3-cp312-cp312-macosx_14_0_x86_64.whl", hash = "sha256:06efcba926324df1696931a57a176c80848ccd67ce6ad020c810736bfd58eb1c", size = 25242454, upload-time = "2025-05-08T16:06:20.394Z" }, + { url = "https://files.pythonhosted.org/packages/56/30/a6f08f84ee5b7b28b4c597aca4cbe545535c39fe911845a96414700b64ba/scipy-1.15.3-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:c05045d8b9bfd807ee1b9f38761993297b10b245f012b11b13b91ba8945f7e45", size = 35210199, upload-time = "2025-05-08T16:06:26.159Z" }, + { url = "https://files.pythonhosted.org/packages/0b/1f/03f52c282437a168ee2c7c14a1a0d0781a9a4a8962d84ac05c06b4c5b555/scipy-1.15.3-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:271e3713e645149ea5ea3e97b57fdab61ce61333f97cfae392c28ba786f9bb49", size = 37309455, upload-time = "2025-05-08T16:06:32.778Z" }, + { url = "https://files.pythonhosted.org/packages/89/b1/fbb53137f42c4bf630b1ffdfc2151a62d1d1b903b249f030d2b1c0280af8/scipy-1.15.3-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:6cfd56fc1a8e53f6e89ba3a7a7251f7396412d655bca2aa5611c8ec9a6784a1e", size = 36885140, upload-time = "2025-05-08T16:06:39.249Z" }, + { url = "https://files.pythonhosted.org/packages/2e/2e/025e39e339f5090df1ff266d021892694dbb7e63568edcfe43f892fa381d/scipy-1.15.3-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:0ff17c0bb1cb32952c09217d8d1eed9b53d1463e5f1dd6052c7857f83127d539", size = 39710549, upload-time = "2025-05-08T16:06:45.729Z" }, + { url = "https://files.pythonhosted.org/packages/e6/eb/3bf6ea8ab7f1503dca3a10df2e4b9c3f6b3316df07f6c0ded94b281c7101/scipy-1.15.3-cp312-cp312-win_amd64.whl", hash = "sha256:52092bc0472cfd17df49ff17e70624345efece4e1a12b23783a1ac59a1b728ed", size = 40966184, upload-time = "2025-05-08T16:06:52.623Z" }, +] + +[[package]] +name = "semantic-version" +version = "2.10.0" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/7d/31/f2289ce78b9b473d582568c234e104d2a342fd658cc288a7553d83bb8595/semantic_version-2.10.0.tar.gz", hash = "sha256:bdabb6d336998cbb378d4b9db3a4b56a1e3235701dc05ea2690d9a997ed5041c", size = 52289, upload-time = "2022-05-26T13:35:23.454Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/6a/23/8146aad7d88f4fcb3a6218f41a60f6c2d4e3a72de72da1825dc7c8f7877c/semantic_version-2.10.0-py2.py3-none-any.whl", hash = "sha256:de78a3b8e0feda74cabc54aab2da702113e33ac9d9eb9d2389bcf1f58b7d9177", size = 15552, upload-time = "2022-05-26T13:35:21.206Z" }, +] + +[[package]] +name = "setuptools" +version = "80.9.0" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/18/5d/3bf57dcd21979b887f014ea83c24ae194cfcd12b9e0fda66b957c69d1fca/setuptools-80.9.0.tar.gz", hash = "sha256:f36b47402ecde768dbfafc46e8e4207b4360c654f1f3bb84475f0a28628fb19c", size = 1319958, upload-time = "2025-05-27T00:56:51.443Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/a3/dc/17031897dae0efacfea57dfd3a82fdd2a2aeb58e0ff71b77b87e44edc772/setuptools-80.9.0-py3-none-any.whl", hash = "sha256:062d34222ad13e0cc312a4c02d73f059e86a4acbfbdea8f8f76b28c99f306922", size = 1201486, upload-time = "2025-05-27T00:56:49.664Z" }, +] + +[[package]] +name = "shapely" +version = "2.1.1" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "numpy", marker = "(platform_machine != 'aarch64' and platform_machine != 'x86_64' and extra == 'extra-10-weathergen-cpu' and extra == 'extra-10-weathergen-gpu') or (platform_machine == 'aarch64' and sys_platform == 'linux') or (platform_machine == 'x86_64' and sys_platform == 'linux') or (sys_platform != 'linux' and extra == 'extra-10-weathergen-cpu' and extra == 'extra-10-weathergen-gpu')" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/ca/3c/2da625233f4e605155926566c0e7ea8dda361877f48e8b1655e53456f252/shapely-2.1.1.tar.gz", hash = "sha256:500621967f2ffe9642454808009044c21e5b35db89ce69f8a2042c2ffd0e2772", size = 315422, upload-time = "2025-05-19T11:04:41.265Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/fb/64/9544dc07dfe80a2d489060791300827c941c451e2910f7364b19607ea352/shapely-2.1.1-cp312-cp312-macosx_10_13_x86_64.whl", hash = "sha256:2827365b58bf98efb60affc94a8e01c56dd1995a80aabe4b701465d86dcbba43", size = 1833021, upload-time = "2025-05-19T11:04:08.022Z" }, + { url = "https://files.pythonhosted.org/packages/07/aa/fb5f545e72e89b6a0f04a0effda144f5be956c9c312c7d4e00dfddbddbcf/shapely-2.1.1-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:a9c551f7fa7f1e917af2347fe983f21f212863f1d04f08eece01e9c275903fad", size = 1643018, upload-time = "2025-05-19T11:04:09.343Z" }, + { url = "https://files.pythonhosted.org/packages/03/46/61e03edba81de729f09d880ce7ae5c1af873a0814206bbfb4402ab5c3388/shapely-2.1.1-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:78dec4d4fbe7b1db8dc36de3031767e7ece5911fb7782bc9e95c5cdec58fb1e9", size = 2986417, upload-time = "2025-05-19T11:04:10.56Z" }, + { url = "https://files.pythonhosted.org/packages/1f/1e/83ec268ab8254a446b4178b45616ab5822d7b9d2b7eb6e27cf0b82f45601/shapely-2.1.1-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:872d3c0a7b8b37da0e23d80496ec5973c4692920b90de9f502b5beb994bbaaef", size = 3098224, upload-time = "2025-05-19T11:04:11.903Z" }, + { url = "https://files.pythonhosted.org/packages/f1/44/0c21e7717c243e067c9ef8fa9126de24239f8345a5bba9280f7bb9935959/shapely-2.1.1-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:2e2b9125ebfbc28ecf5353511de62f75a8515ae9470521c9a693e4bb9fbe0cf1", size = 3925982, upload-time = "2025-05-19T11:04:13.224Z" }, + { url = "https://files.pythonhosted.org/packages/15/50/d3b4e15fefc103a0eb13d83bad5f65cd6e07a5d8b2ae920e767932a247d1/shapely-2.1.1-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:4b96cea171b3d7f6786976a0520f178c42792897653ecca0c5422fb1e6946e6d", size = 4089122, upload-time = "2025-05-19T11:04:14.477Z" }, + { url = "https://files.pythonhosted.org/packages/bd/05/9a68f27fc6110baeedeeebc14fd86e73fa38738c5b741302408fb6355577/shapely-2.1.1-cp312-cp312-win32.whl", hash = "sha256:39dca52201e02996df02e447f729da97cfb6ff41a03cb50f5547f19d02905af8", size = 1522437, upload-time = "2025-05-19T11:04:16.203Z" }, + { url = "https://files.pythonhosted.org/packages/bc/e9/a4560e12b9338842a1f82c9016d2543eaa084fce30a1ca11991143086b57/shapely-2.1.1-cp312-cp312-win_amd64.whl", hash = "sha256:13d643256f81d55a50013eff6321142781cf777eb6a9e207c2c9e6315ba6044a", size = 1703479, upload-time = "2025-05-19T11:04:18.497Z" }, +] + +[[package]] +name = "six" +version = "1.17.0" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/94/e7/b2c673351809dca68a0e064b6af791aa332cf192da575fd474ed7d6f16a2/six-1.17.0.tar.gz", hash = "sha256:ff70335d468e7eb6ec65b95b99d3a2836546063f63acc5171de367e834932a81", size = 34031, upload-time = "2024-12-04T17:35:28.174Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/b7/ce/149a00dd41f10bc29e5921b496af8b574d8413afcd5e30dfa0ed46c2cc5e/six-1.17.0-py2.py3-none-any.whl", hash = "sha256:4721f391ed90541fddacab5acf947aa0d3dc7d27b2e1e8eda2be8970586c3274", size = 11050, upload-time = "2024-12-04T17:35:26.475Z" }, +] + +[[package]] +name = "smmap" +version = "5.0.2" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/44/cd/a040c4b3119bbe532e5b0732286f805445375489fceaec1f48306068ee3b/smmap-5.0.2.tar.gz", hash = "sha256:26ea65a03958fa0c8a1c7e8c7a58fdc77221b8910f6be2131affade476898ad5", size = 22329, upload-time = "2025-01-02T07:14:40.909Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/04/be/d09147ad1ec7934636ad912901c5fd7667e1c858e19d355237db0d0cd5e4/smmap-5.0.2-py3-none-any.whl", hash = "sha256:b30115f0def7d7531d22a0fb6502488d879e75b260a9db4d0819cfb25403af5e", size = 24303, upload-time = "2025-01-02T07:14:38.724Z" }, +] + +[[package]] +name = "sniffio" +version = "1.3.1" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/a2/87/a6771e1546d97e7e041b6ae58d80074f81b7d5121207425c964ddf5cfdbd/sniffio-1.3.1.tar.gz", hash = "sha256:f4324edc670a0f49750a81b895f35c3adb843cca46f0530f79fc1babb23789dc", size = 20372, upload-time = "2024-02-25T23:20:04.057Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/e9/44/75a9c9421471a6c4805dbf2356f7c181a29c1879239abab1ea2cc8f38b40/sniffio-1.3.1-py3-none-any.whl", hash = "sha256:2f6da418d1f1e0fddd844478f41680e794e6051915791a034ff65e5f100525a2", size = 10235, upload-time = "2024-02-25T23:20:01.196Z" }, +] + +[[package]] +name = "sqlparse" +version = "0.5.3" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/e5/40/edede8dd6977b0d3da179a342c198ed100dd2aba4be081861ee5911e4da4/sqlparse-0.5.3.tar.gz", hash = "sha256:09f67787f56a0b16ecdbde1bfc7f5d9c3371ca683cfeaa8e6ff60b4807ec9272", size = 84999, upload-time = "2024-12-10T12:05:30.728Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/a9/5c/bfd6bd0bf979426d405cc6e71eceb8701b148b16c21d2dc3c261efc61c7b/sqlparse-0.5.3-py3-none-any.whl", hash = "sha256:cf2196ed3418f3ba5de6af7e82c694a9fbdbfecccdfc72e281548517081f16ca", size = 44415, upload-time = "2024-12-10T12:05:27.824Z" }, +] + +[[package]] +name = "stack-data" +version = "0.6.3" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "asttokens", marker = "(platform_machine != 'aarch64' and platform_machine != 'x86_64' and extra == 'extra-10-weathergen-cpu' and extra == 'extra-10-weathergen-gpu') or (platform_machine == 'aarch64' and sys_platform == 'linux') or (platform_machine == 'x86_64' and sys_platform == 'linux') or (sys_platform != 'linux' and extra == 'extra-10-weathergen-cpu' and extra == 'extra-10-weathergen-gpu')" }, + { name = "executing", marker = "(platform_machine != 'aarch64' and platform_machine != 'x86_64' and extra == 'extra-10-weathergen-cpu' and extra == 'extra-10-weathergen-gpu') or (platform_machine == 'aarch64' and sys_platform == 'linux') or (platform_machine == 'x86_64' and sys_platform == 'linux') or (sys_platform != 'linux' and extra == 'extra-10-weathergen-cpu' and extra == 'extra-10-weathergen-gpu')" }, + { name = "pure-eval", marker = "(platform_machine != 'aarch64' and platform_machine != 'x86_64' and extra == 'extra-10-weathergen-cpu' and extra == 'extra-10-weathergen-gpu') or (platform_machine == 'aarch64' and sys_platform == 'linux') or (platform_machine == 'x86_64' and sys_platform == 'linux') or (sys_platform != 'linux' and extra == 'extra-10-weathergen-cpu' and extra == 'extra-10-weathergen-gpu')" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/28/e3/55dcc2cfbc3ca9c29519eb6884dd1415ecb53b0e934862d3559ddcb7e20b/stack_data-0.6.3.tar.gz", hash = "sha256:836a778de4fec4dcd1dcd89ed8abff8a221f58308462e1c4aa2a3cf30148f0b9", size = 44707, upload-time = "2023-09-30T13:58:05.479Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/f1/7b/ce1eafaf1a76852e2ec9b22edecf1daa58175c090266e9f6c64afcd81d91/stack_data-0.6.3-py3-none-any.whl", hash = "sha256:d5558e0c25a4cb0853cddad3d77da9891a08cb85dd9f9f91b9f8cd66e511e695", size = 24521, upload-time = "2023-09-30T13:58:03.53Z" }, +] + +[[package]] +name = "starlette" +version = "0.48.0" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "anyio", marker = "(platform_machine != 'aarch64' and platform_machine != 'x86_64' and extra == 'extra-10-weathergen-cpu' and extra == 'extra-10-weathergen-gpu') or (platform_machine == 'aarch64' and sys_platform == 'linux') or (platform_machine == 'x86_64' and sys_platform == 'linux') or (sys_platform != 'linux' and extra == 'extra-10-weathergen-cpu' and extra == 'extra-10-weathergen-gpu')" }, + { name = "typing-extensions", marker = "(platform_machine != 'aarch64' and platform_machine != 'x86_64' and extra == 'extra-10-weathergen-cpu' and extra == 'extra-10-weathergen-gpu') or (platform_machine == 'aarch64' and sys_platform == 'linux') or (platform_machine == 'x86_64' and sys_platform == 'linux') or (sys_platform != 'linux' and extra == 'extra-10-weathergen-cpu' and extra == 'extra-10-weathergen-gpu')" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/a7/a5/d6f429d43394057b67a6b5bbe6eae2f77a6bf7459d961fdb224bf206eee6/starlette-0.48.0.tar.gz", hash = "sha256:7e8cee469a8ab2352911528110ce9088fdc6a37d9876926e73da7ce4aa4c7a46", size = 2652949, upload-time = "2025-09-13T08:41:05.699Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/be/72/2db2f49247d0a18b4f1bb9a5a39a0162869acf235f3a96418363947b3d46/starlette-0.48.0-py3-none-any.whl", hash = "sha256:0764ca97b097582558ecb498132ed0c7d942f233f365b86ba37770e026510659", size = 73736, upload-time = "2025-09-13T08:41:03.869Z" }, +] + +[[package]] +name = "statsmodels" +version = "0.14.5" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "numpy", marker = "(platform_machine != 'aarch64' and platform_machine != 'x86_64' and extra == 'extra-10-weathergen-cpu' and extra == 'extra-10-weathergen-gpu') or (platform_machine == 'aarch64' and sys_platform == 'linux') or (platform_machine == 'x86_64' and sys_platform == 'linux') or (sys_platform != 'linux' and extra == 'extra-10-weathergen-cpu' and extra == 'extra-10-weathergen-gpu')" }, + { name = "packaging", marker = "(platform_machine != 'aarch64' and platform_machine != 'x86_64' and extra == 'extra-10-weathergen-cpu' and extra == 'extra-10-weathergen-gpu') or (platform_machine == 'aarch64' and sys_platform == 'linux') or (platform_machine == 'x86_64' and sys_platform == 'linux') or (sys_platform != 'linux' and extra == 'extra-10-weathergen-cpu' and extra == 'extra-10-weathergen-gpu')" }, + { name = "pandas", marker = "(platform_machine != 'aarch64' and platform_machine != 'x86_64' and extra == 'extra-10-weathergen-cpu' and extra == 'extra-10-weathergen-gpu') or (platform_machine == 'aarch64' and sys_platform == 'linux') or (platform_machine == 'x86_64' and sys_platform == 'linux') or (sys_platform != 'linux' and extra == 'extra-10-weathergen-cpu' and extra == 'extra-10-weathergen-gpu')" }, + { name = "patsy", marker = "(platform_machine != 'aarch64' and platform_machine != 'x86_64' and extra == 'extra-10-weathergen-cpu' and extra == 'extra-10-weathergen-gpu') or (platform_machine == 'aarch64' and sys_platform == 'linux') or (platform_machine == 'x86_64' and sys_platform == 'linux') or (sys_platform != 'linux' and extra == 'extra-10-weathergen-cpu' and extra == 'extra-10-weathergen-gpu')" }, + { name = "scipy", marker = "(platform_machine != 'aarch64' and platform_machine != 'x86_64' and extra == 'extra-10-weathergen-cpu' and extra == 'extra-10-weathergen-gpu') or (platform_machine == 'aarch64' and sys_platform == 'linux') or (platform_machine == 'x86_64' and sys_platform == 'linux') or (sys_platform != 'linux' and extra == 'extra-10-weathergen-cpu' and extra == 'extra-10-weathergen-gpu')" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/64/cc/8c1bf59bf8203dea1bf2ea811cfe667d7bcc6909c83d8afb02b08e30f50b/statsmodels-0.14.5.tar.gz", hash = "sha256:de260e58cccfd2ceddf835b55a357233d6ca853a1aa4f90f7553a52cc71c6ddf", size = 20525016, upload-time = "2025-07-07T12:14:23.195Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/5f/a5/fcc4f5f16355660ce7a1742e28a43e3a9391b492fc4ff29fdd6893e81c05/statsmodels-0.14.5-cp312-cp312-macosx_10_13_x86_64.whl", hash = "sha256:37e7364a39f9aa3b51d15a208c2868b90aadb8412f868530f5cba9197cb00eaa", size = 10042891, upload-time = "2025-07-07T12:13:41.671Z" }, + { url = "https://files.pythonhosted.org/packages/1c/6f/db0cf5efa48277ac6218d9b981c8fd5e63c4c43e0d9d65015fdc38eed0ef/statsmodels-0.14.5-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:4263d7f4d0f1d5ac6eb4db22e1ee34264a14d634b9332c975c9d9109b6b46e12", size = 9698912, upload-time = "2025-07-07T12:07:54.674Z" }, + { url = "https://files.pythonhosted.org/packages/4a/93/4ddc3bc4a59c51e6a57c49df1b889882c40d9e141e855b3517f6a8de3232/statsmodels-0.14.5-cp312-cp312-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:86224f6e36f38486e471e75759d241fe2912d8bc25ab157d54ee074c6aedbf45", size = 10237801, upload-time = "2025-07-07T14:23:12.593Z" }, + { url = "https://files.pythonhosted.org/packages/66/de/dc6bf2f6e8c8eb4c5815560ebdbdf2d69a767bc0f65fde34bc086cf5b36d/statsmodels-0.14.5-cp312-cp312-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:c3dd760a6fa80cd5e0371685c697bb9c2c0e6e1f394d975e596a1e6d0bbb9372", size = 10424154, upload-time = "2025-07-07T14:23:25.365Z" }, + { url = "https://files.pythonhosted.org/packages/16/4f/2d5a8d14bebdf2b03b3ea89b8c6a2c837bb406ba5b7a41add8bd303bce29/statsmodels-0.14.5-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:6264fb00e02f858b86bd01ef2dc05055a71d4a0cc7551b9976b07b0f0e6cf24f", size = 10652915, upload-time = "2025-07-07T14:23:39.337Z" }, + { url = "https://files.pythonhosted.org/packages/df/4c/2feda3a9f0e17444a84ba5398ada6a4d2e1b8f832760048f04e2b8ea0c41/statsmodels-0.14.5-cp312-cp312-win_amd64.whl", hash = "sha256:b2ed065bfbaf8bb214c7201656df840457c2c8c65e1689e3eb09dc7440f9c61c", size = 9611236, upload-time = "2025-07-07T12:08:06.794Z" }, +] + +[[package]] +name = "sympy" +version = "1.13.1" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "mpmath", marker = "(platform_machine == 'aarch64' and sys_platform == 'linux') or (platform_machine == 'x86_64' and sys_platform == 'linux')" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/ca/99/5a5b6f19ff9f083671ddf7b9632028436167cd3d33e11015754e41b249a4/sympy-1.13.1.tar.gz", hash = "sha256:9cebf7e04ff162015ce31c9c6c9144daa34a93bd082f54fd8f12deca4f47515f", size = 7533040, upload-time = "2024-07-19T09:26:51.238Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/b2/fe/81695a1aa331a842b582453b605175f419fe8540355886031328089d840a/sympy-1.13.1-py3-none-any.whl", hash = "sha256:db36cdc64bf61b9b24578b6f7bab1ecdd2452cf008f34faa33776680c26d66f8", size = 6189177, upload-time = "2024-07-19T09:26:48.863Z" }, +] + +[[package]] +name = "tensorboard" +version = "2.20.0" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "absl-py", marker = "(platform_machine != 'aarch64' and platform_machine != 'x86_64' and extra == 'extra-10-weathergen-cpu' and extra == 'extra-10-weathergen-gpu') or (platform_machine == 'aarch64' and sys_platform == 'linux') or (platform_machine == 'x86_64' and sys_platform == 'linux') or (sys_platform != 'linux' and extra == 'extra-10-weathergen-cpu' and extra == 'extra-10-weathergen-gpu')" }, + { name = "grpcio", marker = "(platform_machine != 'aarch64' and platform_machine != 'x86_64' and extra == 'extra-10-weathergen-cpu' and extra == 'extra-10-weathergen-gpu') or (platform_machine == 'aarch64' and sys_platform == 'linux') or (platform_machine == 'x86_64' and sys_platform == 'linux') or (sys_platform != 'linux' and extra == 'extra-10-weathergen-cpu' and extra == 'extra-10-weathergen-gpu')" }, + { name = "markdown", marker = "(platform_machine != 'aarch64' and platform_machine != 'x86_64' and extra == 'extra-10-weathergen-cpu' and extra == 'extra-10-weathergen-gpu') or (platform_machine == 'aarch64' and sys_platform == 'linux') or (platform_machine == 'x86_64' and sys_platform == 'linux') or (sys_platform != 'linux' and extra == 'extra-10-weathergen-cpu' and extra == 'extra-10-weathergen-gpu')" }, + { name = "numpy", marker = "(platform_machine != 'aarch64' and platform_machine != 'x86_64' and extra == 'extra-10-weathergen-cpu' and extra == 'extra-10-weathergen-gpu') or (platform_machine == 'aarch64' and sys_platform == 'linux') or (platform_machine == 'x86_64' and sys_platform == 'linux') or (sys_platform != 'linux' and extra == 'extra-10-weathergen-cpu' and extra == 'extra-10-weathergen-gpu')" }, + { name = "packaging", marker = "(platform_machine != 'aarch64' and platform_machine != 'x86_64' and extra == 'extra-10-weathergen-cpu' and extra == 'extra-10-weathergen-gpu') or (platform_machine == 'aarch64' and sys_platform == 'linux') or (platform_machine == 'x86_64' and sys_platform == 'linux') or (sys_platform != 'linux' and extra == 'extra-10-weathergen-cpu' and extra == 'extra-10-weathergen-gpu')" }, + { name = "pillow", marker = "(platform_machine != 'aarch64' and platform_machine != 'x86_64' and extra == 'extra-10-weathergen-cpu' and extra == 'extra-10-weathergen-gpu') or (platform_machine == 'aarch64' and sys_platform == 'linux') or (platform_machine == 'x86_64' and sys_platform == 'linux') or (sys_platform != 'linux' and extra == 'extra-10-weathergen-cpu' and extra == 'extra-10-weathergen-gpu')" }, + { name = "protobuf", marker = "(platform_machine != 'aarch64' and platform_machine != 'x86_64' and extra == 'extra-10-weathergen-cpu' and extra == 'extra-10-weathergen-gpu') or (platform_machine == 'aarch64' and sys_platform == 'linux') or (platform_machine == 'x86_64' and sys_platform == 'linux') or (sys_platform != 'linux' and extra == 'extra-10-weathergen-cpu' and extra == 'extra-10-weathergen-gpu')" }, + { name = "setuptools", marker = "(platform_machine != 'aarch64' and platform_machine != 'x86_64' and extra == 'extra-10-weathergen-cpu' and extra == 'extra-10-weathergen-gpu') or (platform_machine == 'aarch64' and sys_platform == 'linux') or (platform_machine == 'x86_64' and sys_platform == 'linux') or (sys_platform != 'linux' and extra == 'extra-10-weathergen-cpu' and extra == 'extra-10-weathergen-gpu')" }, + { name = "tensorboard-data-server", marker = "(platform_machine != 'aarch64' and platform_machine != 'x86_64' and extra == 'extra-10-weathergen-cpu' and extra == 'extra-10-weathergen-gpu') or (platform_machine == 'aarch64' and sys_platform == 'linux') or (platform_machine == 'x86_64' and sys_platform == 'linux') or (sys_platform != 'linux' and extra == 'extra-10-weathergen-cpu' and extra == 'extra-10-weathergen-gpu')" }, + { name = "werkzeug", marker = "(platform_machine != 'aarch64' and platform_machine != 'x86_64' and extra == 'extra-10-weathergen-cpu' and extra == 'extra-10-weathergen-gpu') or (platform_machine == 'aarch64' and sys_platform == 'linux') or (platform_machine == 'x86_64' and sys_platform == 'linux') or (sys_platform != 'linux' and extra == 'extra-10-weathergen-cpu' and extra == 'extra-10-weathergen-gpu')" }, +] +wheels = [ + { url = "https://files.pythonhosted.org/packages/9c/d9/a5db55f88f258ac669a92858b70a714bbbd5acd993820b41ec4a96a4d77f/tensorboard-2.20.0-py3-none-any.whl", hash = "sha256:9dc9f978cb84c0723acf9a345d96c184f0293d18f166bb8d59ee098e6cfaaba6", size = 5525680, upload-time = "2025-07-17T19:20:49.638Z" }, +] + +[[package]] +name = "tensorboard-data-server" +version = "0.7.2" +source = { registry = "https://pypi.org/simple" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/7a/13/e503968fefabd4c6b2650af21e110aa8466fe21432cd7c43a84577a89438/tensorboard_data_server-0.7.2-py3-none-any.whl", hash = "sha256:7e0610d205889588983836ec05dc098e80f97b7e7bbff7e994ebb78f578d0ddb", size = 2356, upload-time = "2023-10-23T21:23:32.16Z" }, + { url = "https://files.pythonhosted.org/packages/b7/85/dabeaf902892922777492e1d253bb7e1264cadce3cea932f7ff599e53fea/tensorboard_data_server-0.7.2-py3-none-macosx_10_9_x86_64.whl", hash = "sha256:9fe5d24221b29625dbc7328b0436ca7fc1c23de4acf4d272f1180856e32f9f60", size = 4823598, upload-time = "2023-10-23T21:23:33.714Z" }, + { url = "https://files.pythonhosted.org/packages/73/c6/825dab04195756cf8ff2e12698f22513b3db2f64925bdd41671bfb33aaa5/tensorboard_data_server-0.7.2-py3-none-manylinux_2_31_x86_64.whl", hash = "sha256:ef687163c24185ae9754ed5650eb5bc4d84ff257aabdc33f0cc6f74d8ba54530", size = 6590363, upload-time = "2023-10-23T21:23:35.583Z" }, +] + +[[package]] +name = "toolz" +version = "1.0.0" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/8a/0b/d80dfa675bf592f636d1ea0b835eab4ec8df6e9415d8cfd766df54456123/toolz-1.0.0.tar.gz", hash = "sha256:2c86e3d9a04798ac556793bced838816296a2f085017664e4995cb40a1047a02", size = 66790, upload-time = "2024-10-04T16:17:04.001Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/03/98/eb27cc78ad3af8e302c9d8ff4977f5026676e130d28dd7578132a457170c/toolz-1.0.0-py3-none-any.whl", hash = "sha256:292c8f1c4e7516bf9086f8850935c799a874039c8bcf959d47b600e4c44a6236", size = 56383, upload-time = "2024-10-04T16:17:01.533Z" }, +] + +[[package]] +name = "torch" +version = "2.6.0+cpu" +source = { registry = "https://download.pytorch.org/whl/cpu" } +resolution-markers = [ + "platform_machine == 'aarch64' and sys_platform == 'linux'", + "platform_machine == 'x86_64' and sys_platform == 'linux'", +] +dependencies = [ + { name = "filelock", marker = "(platform_machine == 'aarch64' and sys_platform == 'linux') or (platform_machine == 'x86_64' and sys_platform == 'linux')" }, + { name = "fsspec", marker = "(platform_machine == 'aarch64' and sys_platform == 'linux') or (platform_machine == 'x86_64' and sys_platform == 'linux')" }, + { name = "jinja2", marker = "(platform_machine == 'aarch64' and sys_platform == 'linux') or (platform_machine == 'x86_64' and sys_platform == 'linux')" }, + { name = "networkx", marker = "(platform_machine == 'aarch64' and sys_platform == 'linux') or (platform_machine == 'x86_64' and sys_platform == 'linux')" }, + { name = "setuptools", marker = "(platform_machine == 'aarch64' and sys_platform == 'linux') or (platform_machine == 'x86_64' and sys_platform == 'linux')" }, + { name = "sympy", marker = "(platform_machine == 'aarch64' and sys_platform == 'linux') or (platform_machine == 'x86_64' and sys_platform == 'linux')" }, + { name = "typing-extensions", marker = "(platform_machine == 'aarch64' and sys_platform == 'linux') or (platform_machine == 'x86_64' and sys_platform == 'linux')" }, +] +wheels = [ + { url = "https://download.pytorch.org/whl/cpu/torch-2.6.0%2Bcpu-cp312-cp312-linux_x86_64.whl", hash = "sha256:59e78aa0c690f70734e42670036d6b541930b8eabbaa18d94e090abf14cc4d91" }, + { url = "https://download.pytorch.org/whl/cpu/torch-2.6.0%2Bcpu-cp312-cp312-manylinux_2_28_aarch64.whl", hash = "sha256:318290e8924353c61b125cdc8768d15208704e279e7757c113b9620740deca98" }, + { url = "https://download.pytorch.org/whl/cpu/torch-2.6.0%2Bcpu-cp312-cp312-win_amd64.whl", hash = "sha256:4027d982eb2781c93825ab9527f17fbbb12dbabf422298e4b954be60016f87d8" }, +] + +[[package]] +name = "torch" +version = "2.6.0+cu126" +source = { url = "https://download.pytorch.org/whl/cu126/torch-2.6.0%2Bcu126-cp312-cp312-linux_aarch64.whl" } +resolution-markers = [ + "platform_machine == 'aarch64' and sys_platform == 'linux'", +] +dependencies = [ + { name = "filelock", marker = "platform_machine == 'aarch64' and sys_platform == 'linux'" }, + { name = "fsspec", marker = "platform_machine == 'aarch64' and sys_platform == 'linux'" }, + { name = "jinja2", marker = "platform_machine == 'aarch64' and sys_platform == 'linux'" }, + { name = "networkx", marker = "platform_machine == 'aarch64' and sys_platform == 'linux'" }, + { name = "setuptools", marker = "platform_machine == 'aarch64' and sys_platform == 'linux'" }, + { name = "sympy", marker = "platform_machine == 'aarch64' and sys_platform == 'linux'" }, + { name = "typing-extensions", marker = "platform_machine == 'aarch64' and sys_platform == 'linux'" }, +] +wheels = [ + { url = "https://download.pytorch.org/whl/cu126/torch-2.6.0%2Bcu126-cp312-cp312-linux_aarch64.whl", hash = "sha256:993e0e99c472df1d2746c3233ef8e88d992904fe75b8996a2c15439c43ff46c4" }, +] + +[package.metadata] +requires-dist = [ + { name = "filelock" }, + { name = "fsspec" }, + { name = "jinja2" }, + { name = "networkx" }, + { name = "opt-einsum", marker = "extra == 'opt-einsum'", specifier = ">=3.3" }, + { name = "optree", marker = "extra == 'optree'", specifier = ">=0.13.0" }, + { name = "setuptools", marker = "python_full_version >= '3.12'" }, + { name = "sympy", marker = "python_full_version >= '3.9'", specifier = "==1.13.1" }, + { name = "typing-extensions", specifier = ">=4.10.0" }, +] +provides-extras = ["optree", "opt-einsum"] + +[[package]] +name = "torch" +version = "2.6.0+cu126" +source = { url = "https://download.pytorch.org/whl/cu126/torch-2.6.0%2Bcu126-cp312-cp312-manylinux_2_28_x86_64.whl" } +resolution-markers = [ + "platform_machine == 'x86_64' and sys_platform == 'linux'", +] +dependencies = [ + { name = "filelock", marker = "platform_machine == 'x86_64' and sys_platform == 'linux'" }, + { name = "fsspec", marker = "platform_machine == 'x86_64' and sys_platform == 'linux'" }, + { name = "jinja2", marker = "platform_machine == 'x86_64' and sys_platform == 'linux'" }, + { name = "networkx", marker = "platform_machine == 'x86_64' and sys_platform == 'linux'" }, + { name = "nvidia-cublas-cu12", marker = "platform_machine == 'x86_64' and sys_platform == 'linux'" }, + { name = "nvidia-cuda-cupti-cu12", marker = "platform_machine == 'x86_64' and sys_platform == 'linux'" }, + { name = "nvidia-cuda-nvrtc-cu12", marker = "platform_machine == 'x86_64' and sys_platform == 'linux'" }, + { name = "nvidia-cuda-runtime-cu12", marker = "platform_machine == 'x86_64' and sys_platform == 'linux'" }, + { name = "nvidia-cudnn-cu12", marker = "platform_machine == 'x86_64' and sys_platform == 'linux'" }, + { name = "nvidia-cufft-cu12", marker = "platform_machine == 'x86_64' and sys_platform == 'linux'" }, + { name = "nvidia-curand-cu12", marker = "platform_machine == 'x86_64' and sys_platform == 'linux'" }, + { name = "nvidia-cusolver-cu12", marker = "platform_machine == 'x86_64' and sys_platform == 'linux'" }, + { name = "nvidia-cusparse-cu12", marker = "platform_machine == 'x86_64' and sys_platform == 'linux'" }, + { name = "nvidia-cusparselt-cu12", marker = "platform_machine == 'x86_64' and sys_platform == 'linux'" }, + { name = "nvidia-nccl-cu12", marker = "platform_machine == 'x86_64' and sys_platform == 'linux'" }, + { name = "nvidia-nvjitlink-cu12", marker = "platform_machine == 'x86_64' and sys_platform == 'linux'" }, + { name = "nvidia-nvtx-cu12", marker = "platform_machine == 'x86_64' and sys_platform == 'linux'" }, + { name = "setuptools", marker = "platform_machine == 'x86_64' and sys_platform == 'linux'" }, + { name = "sympy", marker = "platform_machine == 'x86_64' and sys_platform == 'linux'" }, + { name = "triton", marker = "platform_machine == 'x86_64' and sys_platform == 'linux'" }, + { name = "typing-extensions", marker = "platform_machine == 'x86_64' and sys_platform == 'linux'" }, +] +wheels = [ + { url = "https://download.pytorch.org/whl/cu126/torch-2.6.0%2Bcu126-cp312-cp312-manylinux_2_28_x86_64.whl", hash = "sha256:6bc5b9126daa3ac1e4d920b731da9f9503ff1f56204796de124e080f5cc3570e" }, +] + +[package.metadata] +requires-dist = [ + { name = "filelock" }, + { name = "fsspec" }, + { name = "jinja2" }, + { name = "networkx" }, + { name = "nvidia-cublas-cu12", marker = "platform_machine == 'x86_64' and sys_platform == 'linux'", specifier = "==12.6.4.1" }, + { name = "nvidia-cuda-cupti-cu12", marker = "platform_machine == 'x86_64' and sys_platform == 'linux'", specifier = "==12.6.80" }, + { name = "nvidia-cuda-nvrtc-cu12", marker = "platform_machine == 'x86_64' and sys_platform == 'linux'", specifier = "==12.6.77" }, + { name = "nvidia-cuda-runtime-cu12", marker = "platform_machine == 'x86_64' and sys_platform == 'linux'", specifier = "==12.6.77" }, + { name = "nvidia-cudnn-cu12", marker = "platform_machine == 'x86_64' and sys_platform == 'linux'", specifier = "==9.5.1.17" }, + { name = "nvidia-cufft-cu12", marker = "platform_machine == 'x86_64' and sys_platform == 'linux'", specifier = "==11.3.0.4" }, + { name = "nvidia-curand-cu12", marker = "platform_machine == 'x86_64' and sys_platform == 'linux'", specifier = "==10.3.7.77" }, + { name = "nvidia-cusolver-cu12", marker = "platform_machine == 'x86_64' and sys_platform == 'linux'", specifier = "==11.7.1.2" }, + { name = "nvidia-cusparse-cu12", marker = "platform_machine == 'x86_64' and sys_platform == 'linux'", specifier = "==12.5.4.2" }, + { name = "nvidia-cusparselt-cu12", marker = "platform_machine == 'x86_64' and sys_platform == 'linux'", specifier = "==0.6.3" }, + { name = "nvidia-nccl-cu12", marker = "platform_machine == 'x86_64' and sys_platform == 'linux'", specifier = "==2.21.5" }, + { name = "nvidia-nvjitlink-cu12", marker = "platform_machine == 'x86_64' and sys_platform == 'linux'", specifier = "==12.6.85" }, + { name = "nvidia-nvtx-cu12", marker = "platform_machine == 'x86_64' and sys_platform == 'linux'", specifier = "==12.6.77" }, + { name = "opt-einsum", marker = "extra == 'opt-einsum'", specifier = ">=3.3" }, + { name = "optree", marker = "extra == 'optree'", specifier = ">=0.13.0" }, + { name = "setuptools", marker = "python_full_version >= '3.12'" }, + { name = "sympy", marker = "python_full_version >= '3.9'", specifier = "==1.13.1" }, + { name = "triton", marker = "platform_machine == 'x86_64' and sys_platform == 'linux'", specifier = "==3.2.0" }, + { name = "typing-extensions", specifier = ">=4.10.0" }, +] +provides-extras = ["optree", "opt-einsum"] + +[[package]] +name = "tornado" +version = "6.5.1" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/51/89/c72771c81d25d53fe33e3dca61c233b665b2780f21820ba6fd2c6793c12b/tornado-6.5.1.tar.gz", hash = "sha256:84ceece391e8eb9b2b95578db65e920d2a61070260594819589609ba9bc6308c", size = 509934, upload-time = "2025-05-22T18:15:38.788Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/77/89/f4532dee6843c9e0ebc4e28d4be04c67f54f60813e4bf73d595fe7567452/tornado-6.5.1-cp39-abi3-macosx_10_9_universal2.whl", hash = "sha256:d50065ba7fd11d3bd41bcad0825227cc9a95154bad83239357094c36708001f7", size = 441948, upload-time = "2025-05-22T18:15:20.862Z" }, + { url = "https://files.pythonhosted.org/packages/15/9a/557406b62cffa395d18772e0cdcf03bed2fff03b374677348eef9f6a3792/tornado-6.5.1-cp39-abi3-macosx_10_9_x86_64.whl", hash = "sha256:9e9ca370f717997cb85606d074b0e5b247282cf5e2e1611568b8821afe0342d6", size = 440112, upload-time = "2025-05-22T18:15:22.591Z" }, + { url = "https://files.pythonhosted.org/packages/55/82/7721b7319013a3cf881f4dffa4f60ceff07b31b394e459984e7a36dc99ec/tornado-6.5.1-cp39-abi3-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:b77e9dfa7ed69754a54c89d82ef746398be82f749df69c4d3abe75c4d1ff4888", size = 443672, upload-time = "2025-05-22T18:15:24.027Z" }, + { url = "https://files.pythonhosted.org/packages/7d/42/d11c4376e7d101171b94e03cef0cbce43e823ed6567ceda571f54cf6e3ce/tornado-6.5.1-cp39-abi3-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:253b76040ee3bab8bcf7ba9feb136436a3787208717a1fb9f2c16b744fba7331", size = 443019, upload-time = "2025-05-22T18:15:25.735Z" }, + { url = "https://files.pythonhosted.org/packages/7d/f7/0c48ba992d875521ac761e6e04b0a1750f8150ae42ea26df1852d6a98942/tornado-6.5.1-cp39-abi3-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:308473f4cc5a76227157cdf904de33ac268af770b2c5f05ca6c1161d82fdd95e", size = 443252, upload-time = "2025-05-22T18:15:27.499Z" }, + { url = "https://files.pythonhosted.org/packages/89/46/d8d7413d11987e316df4ad42e16023cd62666a3c0dfa1518ffa30b8df06c/tornado-6.5.1-cp39-abi3-musllinux_1_2_aarch64.whl", hash = "sha256:caec6314ce8a81cf69bd89909f4b633b9f523834dc1a352021775d45e51d9401", size = 443930, upload-time = "2025-05-22T18:15:29.299Z" }, + { url = "https://files.pythonhosted.org/packages/78/b2/f8049221c96a06df89bed68260e8ca94beca5ea532ffc63b1175ad31f9cc/tornado-6.5.1-cp39-abi3-musllinux_1_2_i686.whl", hash = "sha256:13ce6e3396c24e2808774741331638ee6c2f50b114b97a55c5b442df65fd9692", size = 443351, upload-time = "2025-05-22T18:15:31.038Z" }, + { url = "https://files.pythonhosted.org/packages/76/ff/6a0079e65b326cc222a54720a748e04a4db246870c4da54ece4577bfa702/tornado-6.5.1-cp39-abi3-musllinux_1_2_x86_64.whl", hash = "sha256:5cae6145f4cdf5ab24744526cc0f55a17d76f02c98f4cff9daa08ae9a217448a", size = 443328, upload-time = "2025-05-22T18:15:32.426Z" }, + { url = "https://files.pythonhosted.org/packages/49/18/e3f902a1d21f14035b5bc6246a8c0f51e0eef562ace3a2cea403c1fb7021/tornado-6.5.1-cp39-abi3-win32.whl", hash = "sha256:e0a36e1bc684dca10b1aa75a31df8bdfed656831489bc1e6a6ebed05dc1ec365", size = 444396, upload-time = "2025-05-22T18:15:34.205Z" }, + { url = "https://files.pythonhosted.org/packages/7b/09/6526e32bf1049ee7de3bebba81572673b19a2a8541f795d887e92af1a8bc/tornado-6.5.1-cp39-abi3-win_amd64.whl", hash = "sha256:908e7d64567cecd4c2b458075589a775063453aeb1d2a1853eedb806922f568b", size = 444840, upload-time = "2025-05-22T18:15:36.1Z" }, + { url = "https://files.pythonhosted.org/packages/55/a7/535c44c7bea4578e48281d83c615219f3ab19e6abc67625ef637c73987be/tornado-6.5.1-cp39-abi3-win_arm64.whl", hash = "sha256:02420a0eb7bf617257b9935e2b754d1b63897525d8a289c9d65690d580b4dcf7", size = 443596, upload-time = "2025-05-22T18:15:37.433Z" }, +] + +[[package]] +name = "tqdm" +version = "4.67.1" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/a8/4b/29b4ef32e036bb34e4ab51796dd745cdba7ed47ad142a9f4a1eb8e0c744d/tqdm-4.67.1.tar.gz", hash = "sha256:f8aef9c52c08c13a65f30ea34f4e5aac3fd1a34959879d7e59e63027286627f2", size = 169737, upload-time = "2024-11-24T20:12:22.481Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/d0/30/dc54f88dd4a2b5dc8a0279bdd7270e735851848b762aeb1c1184ed1f6b14/tqdm-4.67.1-py3-none-any.whl", hash = "sha256:26445eca388f82e72884e0d580d5464cd801a3ea01e63e5601bdff9ba6a48de2", size = 78540, upload-time = "2024-11-24T20:12:19.698Z" }, +] + +[[package]] +name = "traitlets" +version = "5.14.3" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/eb/79/72064e6a701c2183016abbbfedaba506d81e30e232a68c9f0d6f6fcd1574/traitlets-5.14.3.tar.gz", hash = "sha256:9ed0579d3502c94b4b3732ac120375cda96f923114522847de4b3bb98b96b6b7", size = 161621, upload-time = "2024-04-19T11:11:49.746Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/00/c0/8f5d070730d7836adc9c9b6408dec68c6ced86b304a9b26a14df072a6e8c/traitlets-5.14.3-py3-none-any.whl", hash = "sha256:b74e89e397b1ed28cc831db7aea759ba6640cb3de13090ca145426688ff1ac4f", size = 85359, upload-time = "2024-04-19T11:11:46.763Z" }, +] + +[[package]] +name = "triton" +version = "3.2.0" +source = { registry = "https://pypi.org/simple" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/06/00/59500052cb1cf8cf5316be93598946bc451f14072c6ff256904428eaf03c/triton-3.2.0-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:8d9b215efc1c26fa7eefb9a157915c92d52e000d2bf83e5f69704047e63f125c", size = 253159365, upload-time = "2025-01-22T19:13:24.648Z" }, +] + +[[package]] +name = "trove-classifiers" +version = "2025.5.9.12" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/38/04/1cd43f72c241fedcf0d9a18d0783953ee301eac9e5d9db1df0f0f089d9af/trove_classifiers-2025.5.9.12.tar.gz", hash = "sha256:7ca7c8a7a76e2cd314468c677c69d12cc2357711fcab4a60f87994c1589e5cb5", size = 16940, upload-time = "2025-05-09T12:04:48.829Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/92/ef/c6deb083748be3bcad6f471b6ae983950c161890bf5ae1b2af80cc56c530/trove_classifiers-2025.5.9.12-py3-none-any.whl", hash = "sha256:e381c05537adac78881c8fa345fd0e9970159f4e4a04fcc42cfd3129cca640ce", size = 14119, upload-time = "2025-05-09T12:04:46.38Z" }, +] + +[[package]] +name = "typing-extensions" +version = "4.14.0" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/d1/bc/51647cd02527e87d05cb083ccc402f93e441606ff1f01739a62c8ad09ba5/typing_extensions-4.14.0.tar.gz", hash = "sha256:8676b788e32f02ab42d9e7c61324048ae4c6d844a399eebace3d4979d75ceef4", size = 107423, upload-time = "2025-06-02T14:52:11.399Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/69/e0/552843e0d356fbb5256d21449fa957fa4eff3bbc135a74a691ee70c7c5da/typing_extensions-4.14.0-py3-none-any.whl", hash = "sha256:a1514509136dd0b477638fc68d6a91497af5076466ad0fa6c338e44e359944af", size = 43839, upload-time = "2025-06-02T14:52:10.026Z" }, +] + +[[package]] +name = "typing-inspection" +version = "0.4.1" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "typing-extensions", marker = "(platform_machine != 'aarch64' and platform_machine != 'x86_64' and extra == 'extra-10-weathergen-cpu' and extra == 'extra-10-weathergen-gpu') or (platform_machine == 'aarch64' and sys_platform == 'linux') or (platform_machine == 'x86_64' and sys_platform == 'linux') or (sys_platform != 'linux' and extra == 'extra-10-weathergen-cpu' and extra == 'extra-10-weathergen-gpu')" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/f8/b1/0c11f5058406b3af7609f121aaa6b609744687f1d158b3c3a5bf4cc94238/typing_inspection-0.4.1.tar.gz", hash = "sha256:6ae134cc0203c33377d43188d4064e9b357dba58cff3185f22924610e70a9d28", size = 75726, upload-time = "2025-05-21T18:55:23.885Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/17/69/cd203477f944c353c31bade965f880aa1061fd6bf05ded0726ca845b6ff7/typing_inspection-0.4.1-py3-none-any.whl", hash = "sha256:389055682238f53b04f7badcb49b989835495a96700ced5dab2d8feae4b26f51", size = 14552, upload-time = "2025-05-21T18:55:22.152Z" }, +] + +[[package]] +name = "tzdata" +version = "2025.2" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/95/32/1a225d6164441be760d75c2c42e2780dc0873fe382da3e98a2e1e48361e5/tzdata-2025.2.tar.gz", hash = "sha256:b60a638fcc0daffadf82fe0f57e53d06bdec2f36c4df66280ae79bce6bd6f2b9", size = 196380, upload-time = "2025-03-23T13:54:43.652Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/5c/23/c7abc0ca0a1526a0774eca151daeb8de62ec457e77262b66b359c3c7679e/tzdata-2025.2-py2.py3-none-any.whl", hash = "sha256:1a403fada01ff9221ca8044d701868fa132215d84beb92242d9acd2147f667a8", size = 347839, upload-time = "2025-03-23T13:54:41.845Z" }, +] + +[[package]] +name = "uc-micro-py" +version = "1.0.3" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/91/7a/146a99696aee0609e3712f2b44c6274566bc368dfe8375191278045186b8/uc-micro-py-1.0.3.tar.gz", hash = "sha256:d321b92cff673ec58027c04015fcaa8bb1e005478643ff4a500882eaab88c48a", size = 6043, upload-time = "2024-02-09T16:52:01.654Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/37/87/1f677586e8ac487e29672e4b17455758fce261de06a0d086167bb760361a/uc_micro_py-1.0.3-py3-none-any.whl", hash = "sha256:db1dffff340817673d7b466ec86114a9dc0e9d4d9b5ba229d9d60e5c12600cd5", size = 6229, upload-time = "2024-02-09T16:52:00.371Z" }, +] + +[[package]] +name = "urllib3" +version = "2.4.0" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/8a/78/16493d9c386d8e60e442a35feac5e00f0913c0f4b7c217c11e8ec2ff53e0/urllib3-2.4.0.tar.gz", hash = "sha256:414bc6535b787febd7567804cc015fee39daab8ad86268f1310a9250697de466", size = 390672, upload-time = "2025-04-10T15:23:39.232Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/6b/11/cc635220681e93a0183390e26485430ca2c7b5f9d33b15c74c2861cb8091/urllib3-2.4.0-py3-none-any.whl", hash = "sha256:4e16665048960a0900c702d4a66415956a584919c03361cac9f1df5c5dd7e813", size = 128680, upload-time = "2025-04-10T15:23:37.377Z" }, +] + +[[package]] +name = "uvicorn" +version = "0.38.0" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "click", marker = "(platform_machine != 'aarch64' and platform_machine != 'x86_64' and extra == 'extra-10-weathergen-cpu' and extra == 'extra-10-weathergen-gpu') or (platform_machine == 'aarch64' and sys_platform == 'linux') or (platform_machine == 'x86_64' and sys_platform == 'linux') or (sys_platform != 'linux' and extra == 'extra-10-weathergen-cpu' and extra == 'extra-10-weathergen-gpu')" }, + { name = "h11", marker = "(platform_machine != 'aarch64' and platform_machine != 'x86_64' and extra == 'extra-10-weathergen-cpu' and extra == 'extra-10-weathergen-gpu') or (platform_machine == 'aarch64' and sys_platform == 'linux') or (platform_machine == 'x86_64' and sys_platform == 'linux') or (sys_platform != 'linux' and extra == 'extra-10-weathergen-cpu' and extra == 'extra-10-weathergen-gpu')" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/cb/ce/f06b84e2697fef4688ca63bdb2fdf113ca0a3be33f94488f2cadb690b0cf/uvicorn-0.38.0.tar.gz", hash = "sha256:fd97093bdd120a2609fc0d3afe931d4d4ad688b6e75f0f929fde1bc36fe0e91d", size = 80605, upload-time = "2025-10-18T13:46:44.63Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/ee/d9/d88e73ca598f4f6ff671fb5fde8a32925c2e08a637303a1d12883c7305fa/uvicorn-0.38.0-py3-none-any.whl", hash = "sha256:48c0afd214ceb59340075b4a052ea1ee91c16fbc2a9b1469cca0e54566977b02", size = 68109, upload-time = "2025-10-18T13:46:42.958Z" }, +] + +[[package]] +name = "wcwidth" +version = "0.2.13" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/6c/63/53559446a878410fc5a5974feb13d31d78d752eb18aeba59c7fef1af7598/wcwidth-0.2.13.tar.gz", hash = "sha256:72ea0c06399eb286d978fdedb6923a9eb47e1c486ce63e9b4e64fc18303972b5", size = 101301, upload-time = "2024-01-06T02:10:57.829Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/fd/84/fd2ba7aafacbad3c4201d395674fc6348826569da3c0937e75505ead3528/wcwidth-0.2.13-py2.py3-none-any.whl", hash = "sha256:3da69048e4540d84af32131829ff948f1e022c1c6bdb8d6102117aac784f6859", size = 34166, upload-time = "2024-01-06T02:10:55.763Z" }, +] + +[[package]] +name = "weathergen" +version = "0.1.0" +source = { editable = "." } +dependencies = [ + { name = "anemoi-datasets", marker = "(platform_machine != 'aarch64' and platform_machine != 'x86_64' and extra == 'extra-10-weathergen-cpu' and extra == 'extra-10-weathergen-gpu') or (platform_machine == 'aarch64' and sys_platform == 'linux') or (platform_machine == 'x86_64' and sys_platform == 'linux') or (sys_platform != 'linux' and extra == 'extra-10-weathergen-cpu' and extra == 'extra-10-weathergen-gpu')" }, + { name = "astropy-healpix", marker = "(platform_machine != 'aarch64' and platform_machine != 'x86_64' and extra == 'extra-10-weathergen-cpu' and extra == 'extra-10-weathergen-gpu') or (platform_machine == 'aarch64' and sys_platform == 'linux') or (platform_machine == 'x86_64' and sys_platform == 'linux') or (sys_platform != 'linux' and extra == 'extra-10-weathergen-cpu' and extra == 'extra-10-weathergen-gpu')" }, + { name = "dask", marker = "(platform_machine != 'aarch64' and platform_machine != 'x86_64' and extra == 'extra-10-weathergen-cpu' and extra == 'extra-10-weathergen-gpu') or (platform_machine == 'aarch64' and sys_platform == 'linux') or (platform_machine == 'x86_64' and sys_platform == 'linux') or (sys_platform != 'linux' and extra == 'extra-10-weathergen-cpu' and extra == 'extra-10-weathergen-gpu')" }, + { name = "hatchling", marker = "(platform_machine != 'aarch64' and platform_machine != 'x86_64' and extra == 'extra-10-weathergen-cpu' and extra == 'extra-10-weathergen-gpu') or (platform_machine == 'aarch64' and sys_platform == 'linux') or (platform_machine == 'x86_64' and sys_platform == 'linux') or (sys_platform != 'linux' and extra == 'extra-10-weathergen-cpu' and extra == 'extra-10-weathergen-gpu')" }, + { name = "matplotlib", marker = "(platform_machine != 'aarch64' and platform_machine != 'x86_64' and extra == 'extra-10-weathergen-cpu' and extra == 'extra-10-weathergen-gpu') or (platform_machine == 'aarch64' and sys_platform == 'linux') or (platform_machine == 'x86_64' and sys_platform == 'linux') or (sys_platform != 'linux' and extra == 'extra-10-weathergen-cpu' and extra == 'extra-10-weathergen-gpu')" }, + { name = "numexpr", marker = "(platform_machine != 'aarch64' and platform_machine != 'x86_64' and extra == 'extra-10-weathergen-cpu' and extra == 'extra-10-weathergen-gpu') or (platform_machine == 'aarch64' and sys_platform == 'linux') or (platform_machine == 'x86_64' and sys_platform == 'linux') or (sys_platform != 'linux' and extra == 'extra-10-weathergen-cpu' and extra == 'extra-10-weathergen-gpu')" }, + { name = "numpy", marker = "(platform_machine != 'aarch64' and platform_machine != 'x86_64' and extra == 'extra-10-weathergen-cpu' and extra == 'extra-10-weathergen-gpu') or (platform_machine == 'aarch64' and sys_platform == 'linux') or (platform_machine == 'x86_64' and sys_platform == 'linux') or (sys_platform != 'linux' and extra == 'extra-10-weathergen-cpu' and extra == 'extra-10-weathergen-gpu')" }, + { name = "omegaconf", marker = "(platform_machine != 'aarch64' and platform_machine != 'x86_64' and extra == 'extra-10-weathergen-cpu' and extra == 'extra-10-weathergen-gpu') or (platform_machine == 'aarch64' and sys_platform == 'linux') or (platform_machine == 'x86_64' and sys_platform == 'linux') or (sys_platform != 'linux' and extra == 'extra-10-weathergen-cpu' and extra == 'extra-10-weathergen-gpu')" }, + { name = "packaging", marker = "(platform_machine != 'aarch64' and platform_machine != 'x86_64' and extra == 'extra-10-weathergen-cpu' and extra == 'extra-10-weathergen-gpu') or (platform_machine == 'aarch64' and sys_platform == 'linux') or (platform_machine == 'x86_64' and sys_platform == 'linux') or (sys_platform != 'linux' and extra == 'extra-10-weathergen-cpu' and extra == 'extra-10-weathergen-gpu')" }, + { name = "pandas", marker = "(platform_machine != 'aarch64' and platform_machine != 'x86_64' and extra == 'extra-10-weathergen-cpu' and extra == 'extra-10-weathergen-gpu') or (platform_machine == 'aarch64' and sys_platform == 'linux') or (platform_machine == 'x86_64' and sys_platform == 'linux') or (sys_platform != 'linux' and extra == 'extra-10-weathergen-cpu' and extra == 'extra-10-weathergen-gpu')" }, + { name = "polars", marker = "(platform_machine != 'aarch64' and platform_machine != 'x86_64' and extra == 'extra-10-weathergen-cpu' and extra == 'extra-10-weathergen-gpu') or (platform_machine == 'aarch64' and sys_platform == 'linux') or (platform_machine == 'x86_64' and sys_platform == 'linux') or (sys_platform != 'linux' and extra == 'extra-10-weathergen-cpu' and extra == 'extra-10-weathergen-gpu')" }, + { name = "psutil", marker = "(platform_machine != 'aarch64' and platform_machine != 'x86_64' and extra == 'extra-10-weathergen-cpu' and extra == 'extra-10-weathergen-gpu') or (platform_machine == 'aarch64' and sys_platform == 'linux') or (platform_machine == 'x86_64' and sys_platform == 'linux') or (sys_platform != 'linux' and extra == 'extra-10-weathergen-cpu' and extra == 'extra-10-weathergen-gpu')" }, + { name = "pynvml", marker = "(platform_machine != 'aarch64' and platform_machine != 'x86_64' and extra == 'extra-10-weathergen-cpu' and extra == 'extra-10-weathergen-gpu') or (platform_machine == 'aarch64' and sys_platform == 'linux') or (platform_machine == 'x86_64' and sys_platform == 'linux') or (sys_platform != 'linux' and extra == 'extra-10-weathergen-cpu' and extra == 'extra-10-weathergen-gpu')" }, + { name = "tqdm", marker = "(platform_machine != 'aarch64' and platform_machine != 'x86_64' and extra == 'extra-10-weathergen-cpu' and extra == 'extra-10-weathergen-gpu') or (platform_machine == 'aarch64' and sys_platform == 'linux') or (platform_machine == 'x86_64' and sys_platform == 'linux') or (sys_platform != 'linux' and extra == 'extra-10-weathergen-cpu' and extra == 'extra-10-weathergen-gpu')" }, + { name = "weathergen-common", marker = "(platform_machine != 'aarch64' and platform_machine != 'x86_64' and extra == 'extra-10-weathergen-cpu' and extra == 'extra-10-weathergen-gpu') or (platform_machine == 'aarch64' and sys_platform == 'linux') or (platform_machine == 'x86_64' and sys_platform == 'linux') or (sys_platform != 'linux' and extra == 'extra-10-weathergen-cpu' and extra == 'extra-10-weathergen-gpu')" }, + { name = "weathergen-evaluate", marker = "(platform_machine != 'aarch64' and platform_machine != 'x86_64' and extra == 'extra-10-weathergen-cpu' and extra == 'extra-10-weathergen-gpu') or (platform_machine == 'aarch64' and sys_platform == 'linux') or (platform_machine == 'x86_64' and sys_platform == 'linux') or (sys_platform != 'linux' and extra == 'extra-10-weathergen-cpu' and extra == 'extra-10-weathergen-gpu')" }, + { name = "weathergen-readers-extra", marker = "(platform_machine != 'aarch64' and platform_machine != 'x86_64' and extra == 'extra-10-weathergen-cpu' and extra == 'extra-10-weathergen-gpu') or (platform_machine == 'aarch64' and sys_platform == 'linux') or (platform_machine == 'x86_64' and sys_platform == 'linux') or (sys_platform != 'linux' and extra == 'extra-10-weathergen-cpu' and extra == 'extra-10-weathergen-gpu')" }, + { name = "wheel", marker = "(platform_machine != 'aarch64' and platform_machine != 'x86_64' and extra == 'extra-10-weathergen-cpu' and extra == 'extra-10-weathergen-gpu') or (platform_machine == 'aarch64' and sys_platform == 'linux') or (platform_machine == 'x86_64' and sys_platform == 'linux') or (sys_platform != 'linux' and extra == 'extra-10-weathergen-cpu' and extra == 'extra-10-weathergen-gpu')" }, + { name = "zarr", marker = "(platform_machine != 'aarch64' and platform_machine != 'x86_64' and extra == 'extra-10-weathergen-cpu' and extra == 'extra-10-weathergen-gpu') or (platform_machine == 'aarch64' and sys_platform == 'linux') or (platform_machine == 'x86_64' and sys_platform == 'linux') or (sys_platform != 'linux' and extra == 'extra-10-weathergen-cpu' and extra == 'extra-10-weathergen-gpu')" }, +] + +[package.optional-dependencies] +cpu = [ + { name = "torch", version = "2.6.0+cpu", source = { registry = "https://download.pytorch.org/whl/cpu" }, marker = "(platform_machine == 'aarch64' and sys_platform == 'linux') or (platform_machine == 'x86_64' and sys_platform == 'linux')" }, +] +gpu = [ + { name = "flash-attn", version = "2.7.3", source = { url = "https://object-store.os-api.cci1.ecmwf.int/weathergenerator-dev/wheels/flash_attn-2.7.3-cp312-cp312-linux_aarch64.whl" }, marker = "(platform_machine == 'aarch64' and sys_platform == 'linux' and extra == 'extra-10-weathergen-gpu') or (platform_machine != 'aarch64' and extra == 'extra-10-weathergen-cpu' and extra == 'extra-10-weathergen-gpu') or (sys_platform != 'linux' and extra == 'extra-10-weathergen-cpu' and extra == 'extra-10-weathergen-gpu')" }, + { name = "flash-attn", version = "2.7.4.post1", source = { url = "https://github.com/Dao-AILab/flash-attention/releases/download/v2.7.4.post1/flash_attn-2.7.4.post1+cu12torch2.6cxx11abiTRUE-cp312-cp312-linux_x86_64.whl" }, marker = "(platform_machine == 'x86_64' and sys_platform == 'linux' and extra == 'extra-10-weathergen-gpu') or (platform_machine != 'x86_64' and extra == 'extra-10-weathergen-cpu' and extra == 'extra-10-weathergen-gpu') or (sys_platform != 'linux' and extra == 'extra-10-weathergen-cpu' and extra == 'extra-10-weathergen-gpu')" }, + { name = "torch", version = "2.6.0+cu126", source = { url = "https://download.pytorch.org/whl/cu126/torch-2.6.0%2Bcu126-cp312-cp312-linux_aarch64.whl" }, marker = "(platform_machine == 'aarch64' and sys_platform == 'linux' and extra == 'extra-10-weathergen-gpu') or (platform_machine != 'aarch64' and extra == 'extra-10-weathergen-cpu' and extra == 'extra-10-weathergen-gpu') or (sys_platform != 'linux' and extra == 'extra-10-weathergen-cpu' and extra == 'extra-10-weathergen-gpu')" }, + { name = "torch", version = "2.6.0+cu126", source = { url = "https://download.pytorch.org/whl/cu126/torch-2.6.0%2Bcu126-cp312-cp312-manylinux_2_28_x86_64.whl" }, marker = "(platform_machine == 'x86_64' and sys_platform == 'linux' and extra == 'extra-10-weathergen-gpu') or (platform_machine != 'x86_64' and extra == 'extra-10-weathergen-cpu' and extra == 'extra-10-weathergen-gpu') or (sys_platform != 'linux' and extra == 'extra-10-weathergen-cpu' and extra == 'extra-10-weathergen-gpu')" }, +] + +[package.dev-dependencies] +dev = [ + { name = "ipykernel", marker = "(platform_machine != 'aarch64' and platform_machine != 'x86_64' and extra == 'extra-10-weathergen-cpu' and extra == 'extra-10-weathergen-gpu') or (platform_machine == 'aarch64' and sys_platform == 'linux') or (platform_machine == 'x86_64' and sys_platform == 'linux') or (sys_platform != 'linux' and extra == 'extra-10-weathergen-cpu' and extra == 'extra-10-weathergen-gpu')" }, + { name = "jupytext", marker = "(platform_machine != 'aarch64' and platform_machine != 'x86_64' and extra == 'extra-10-weathergen-cpu' and extra == 'extra-10-weathergen-gpu') or (platform_machine == 'aarch64' and sys_platform == 'linux') or (platform_machine == 'x86_64' and sys_platform == 'linux') or (sys_platform != 'linux' and extra == 'extra-10-weathergen-cpu' and extra == 'extra-10-weathergen-gpu')" }, + { name = "pdbpp", marker = "(platform_machine != 'aarch64' and platform_machine != 'x86_64' and extra == 'extra-10-weathergen-cpu' and extra == 'extra-10-weathergen-gpu') or (platform_machine == 'aarch64' and sys_platform == 'linux') or (platform_machine == 'x86_64' and sys_platform == 'linux') or (sys_platform != 'linux' and extra == 'extra-10-weathergen-cpu' and extra == 'extra-10-weathergen-gpu')" }, + { name = "pyrefly", marker = "(platform_machine != 'aarch64' and platform_machine != 'x86_64' and extra == 'extra-10-weathergen-cpu' and extra == 'extra-10-weathergen-gpu') or (platform_machine == 'aarch64' and sys_platform == 'linux') or (platform_machine == 'x86_64' and sys_platform == 'linux') or (sys_platform != 'linux' and extra == 'extra-10-weathergen-cpu' and extra == 'extra-10-weathergen-gpu')" }, + { name = "pytest", marker = "(platform_machine != 'aarch64' and platform_machine != 'x86_64' and extra == 'extra-10-weathergen-cpu' and extra == 'extra-10-weathergen-gpu') or (platform_machine == 'aarch64' and sys_platform == 'linux') or (platform_machine == 'x86_64' and sys_platform == 'linux') or (sys_platform != 'linux' and extra == 'extra-10-weathergen-cpu' and extra == 'extra-10-weathergen-gpu')" }, + { name = "pytest-mock", marker = "(platform_machine != 'aarch64' and platform_machine != 'x86_64' and extra == 'extra-10-weathergen-cpu' and extra == 'extra-10-weathergen-gpu') or (platform_machine == 'aarch64' and sys_platform == 'linux') or (platform_machine == 'x86_64' and sys_platform == 'linux') or (sys_platform != 'linux' and extra == 'extra-10-weathergen-cpu' and extra == 'extra-10-weathergen-gpu')" }, + { name = "ruff", marker = "(platform_machine != 'aarch64' and platform_machine != 'x86_64' and extra == 'extra-10-weathergen-cpu' and extra == 'extra-10-weathergen-gpu') or (platform_machine == 'aarch64' and sys_platform == 'linux') or (platform_machine == 'x86_64' and sys_platform == 'linux') or (sys_platform != 'linux' and extra == 'extra-10-weathergen-cpu' and extra == 'extra-10-weathergen-gpu')" }, + { name = "tensorboard", marker = "(platform_machine != 'aarch64' and platform_machine != 'x86_64' and extra == 'extra-10-weathergen-cpu' and extra == 'extra-10-weathergen-gpu') or (platform_machine == 'aarch64' and sys_platform == 'linux') or (platform_machine == 'x86_64' and sys_platform == 'linux') or (sys_platform != 'linux' and extra == 'extra-10-weathergen-cpu' and extra == 'extra-10-weathergen-gpu')" }, +] + +[package.metadata] +requires-dist = [ + { name = "anemoi-datasets", specifier = "~=0.5.16" }, + { name = "astropy-healpix", specifier = "~=1.1.2" }, + { name = "dask", specifier = "~=2025.5.1" }, + { name = "flash-attn", marker = "platform_machine == 'aarch64' and sys_platform == 'linux' and extra == 'gpu'", url = "https://object-store.os-api.cci1.ecmwf.int/weathergenerator-dev/wheels/flash_attn-2.7.3-cp312-cp312-linux_aarch64.whl" }, + { name = "flash-attn", marker = "platform_machine == 'x86_64' and sys_platform == 'linux' and extra == 'gpu'", url = "https://github.com/Dao-AILab/flash-attention/releases/download/v2.7.4.post1/flash_attn-2.7.4.post1+cu12torch2.6cxx11abiTRUE-cp312-cp312-linux_x86_64.whl" }, + { name = "flash-attn", marker = "(platform_machine != 'aarch64' and platform_machine != 'x86_64' and extra == 'gpu') or (sys_platform != 'linux' and extra == 'gpu')" }, + { name = "hatchling" }, + { name = "matplotlib" }, + { name = "numexpr", specifier = ">=2.11.0" }, + { name = "numpy", specifier = "~=2.2" }, + { name = "omegaconf", specifier = "~=2.3.0" }, + { name = "packaging" }, + { name = "pandas", specifier = "~=2.2" }, + { name = "polars", specifier = "~=1.25.2" }, + { name = "psutil" }, + { name = "pynvml" }, + { name = "torch", marker = "platform_machine == 'aarch64' and sys_platform == 'linux' and extra == 'gpu'", url = "https://download.pytorch.org/whl/cu126/torch-2.6.0%2Bcu126-cp312-cp312-linux_aarch64.whl" }, + { name = "torch", marker = "platform_machine == 'x86_64' and sys_platform == 'linux' and extra == 'gpu'", url = "https://download.pytorch.org/whl/cu126/torch-2.6.0%2Bcu126-cp312-cp312-manylinux_2_28_x86_64.whl" }, + { name = "torch", marker = "sys_platform == 'linux' and extra == 'cpu'", specifier = "==2.6.0", index = "https://download.pytorch.org/whl/cpu", conflict = { package = "weathergen", extra = "cpu" } }, + { name = "torch", marker = "sys_platform != 'linux' and extra == 'cpu'", specifier = "==2.6.0" }, + { name = "torch", marker = "(platform_machine != 'aarch64' and platform_machine != 'x86_64' and extra == 'gpu') or (sys_platform != 'linux' and extra == 'gpu')", specifier = "==2.6.0+cu126" }, + { name = "tqdm" }, + { name = "weathergen-common", editable = "packages/common" }, + { name = "weathergen-evaluate", editable = "packages/evaluate" }, + { name = "weathergen-readers-extra", editable = "packages/readers_extra" }, + { name = "wheel" }, + { name = "zarr", specifier = "~=2.17" }, +] +provides-extras = ["cpu", "gpu"] + +[package.metadata.requires-dev] +dev = [ + { name = "ipykernel", specifier = ">=6.30.0" }, + { name = "jupytext", specifier = ">=1.17.2" }, + { name = "pdbpp", specifier = ">=0.11.7" }, + { name = "pyrefly", specifier = "==0.36.0" }, + { name = "pytest", specifier = "~=8.3.5" }, + { name = "pytest-mock", specifier = ">=3.14.1" }, + { name = "ruff", specifier = "==0.9.7" }, + { name = "tensorboard", specifier = ">=2.20.0" }, +] + +[[package]] +name = "weathergen-common" +version = "0.1.0" +source = { editable = "packages/common" } +dependencies = [ + { name = "astropy-healpix", marker = "(platform_machine != 'aarch64' and platform_machine != 'x86_64' and extra == 'extra-10-weathergen-cpu' and extra == 'extra-10-weathergen-gpu') or (platform_machine == 'aarch64' and sys_platform == 'linux') or (platform_machine == 'x86_64' and sys_platform == 'linux') or (sys_platform != 'linux' and extra == 'extra-10-weathergen-cpu' and extra == 'extra-10-weathergen-gpu')" }, + { name = "dask", marker = "(platform_machine != 'aarch64' and platform_machine != 'x86_64' and extra == 'extra-10-weathergen-cpu' and extra == 'extra-10-weathergen-gpu') or (platform_machine == 'aarch64' and sys_platform == 'linux') or (platform_machine == 'x86_64' and sys_platform == 'linux') or (sys_platform != 'linux' and extra == 'extra-10-weathergen-cpu' and extra == 'extra-10-weathergen-gpu')" }, + { name = "numcodecs", marker = "(platform_machine != 'aarch64' and platform_machine != 'x86_64' and extra == 'extra-10-weathergen-cpu' and extra == 'extra-10-weathergen-gpu') or (platform_machine == 'aarch64' and sys_platform == 'linux') or (platform_machine == 'x86_64' and sys_platform == 'linux') or (sys_platform != 'linux' and extra == 'extra-10-weathergen-cpu' and extra == 'extra-10-weathergen-gpu')" }, + { name = "omegaconf", marker = "(platform_machine != 'aarch64' and platform_machine != 'x86_64' and extra == 'extra-10-weathergen-cpu' and extra == 'extra-10-weathergen-gpu') or (platform_machine == 'aarch64' and sys_platform == 'linux') or (platform_machine == 'x86_64' and sys_platform == 'linux') or (sys_platform != 'linux' and extra == 'extra-10-weathergen-cpu' and extra == 'extra-10-weathergen-gpu')" }, + { name = "pyyaml", marker = "(platform_machine != 'aarch64' and platform_machine != 'x86_64' and extra == 'extra-10-weathergen-cpu' and extra == 'extra-10-weathergen-gpu') or (platform_machine == 'aarch64' and sys_platform == 'linux') or (platform_machine == 'x86_64' and sys_platform == 'linux') or (sys_platform != 'linux' and extra == 'extra-10-weathergen-cpu' and extra == 'extra-10-weathergen-gpu')" }, + { name = "xarray", marker = "(platform_machine != 'aarch64' and platform_machine != 'x86_64' and extra == 'extra-10-weathergen-cpu' and extra == 'extra-10-weathergen-gpu') or (platform_machine == 'aarch64' and sys_platform == 'linux') or (platform_machine == 'x86_64' and sys_platform == 'linux') or (sys_platform != 'linux' and extra == 'extra-10-weathergen-cpu' and extra == 'extra-10-weathergen-gpu')" }, + { name = "zarr", marker = "(platform_machine != 'aarch64' and platform_machine != 'x86_64' and extra == 'extra-10-weathergen-cpu' and extra == 'extra-10-weathergen-gpu') or (platform_machine == 'aarch64' and sys_platform == 'linux') or (platform_machine == 'x86_64' and sys_platform == 'linux') or (sys_platform != 'linux' and extra == 'extra-10-weathergen-cpu' and extra == 'extra-10-weathergen-gpu')" }, +] + +[package.dev-dependencies] +dev = [ + { name = "pyrefly", marker = "(platform_machine != 'aarch64' and platform_machine != 'x86_64' and extra == 'extra-10-weathergen-cpu' and extra == 'extra-10-weathergen-gpu') or (platform_machine == 'aarch64' and sys_platform == 'linux') or (platform_machine == 'x86_64' and sys_platform == 'linux') or (sys_platform != 'linux' and extra == 'extra-10-weathergen-cpu' and extra == 'extra-10-weathergen-gpu')" }, + { name = "pytest", marker = "(platform_machine != 'aarch64' and platform_machine != 'x86_64' and extra == 'extra-10-weathergen-cpu' and extra == 'extra-10-weathergen-gpu') or (platform_machine == 'aarch64' and sys_platform == 'linux') or (platform_machine == 'x86_64' and sys_platform == 'linux') or (sys_platform != 'linux' and extra == 'extra-10-weathergen-cpu' and extra == 'extra-10-weathergen-gpu')" }, + { name = "pytest-mock", marker = "(platform_machine != 'aarch64' and platform_machine != 'x86_64' and extra == 'extra-10-weathergen-cpu' and extra == 'extra-10-weathergen-gpu') or (platform_machine == 'aarch64' and sys_platform == 'linux') or (platform_machine == 'x86_64' and sys_platform == 'linux') or (sys_platform != 'linux' and extra == 'extra-10-weathergen-cpu' and extra == 'extra-10-weathergen-gpu')" }, + { name = "ruff", marker = "(platform_machine != 'aarch64' and platform_machine != 'x86_64' and extra == 'extra-10-weathergen-cpu' and extra == 'extra-10-weathergen-gpu') or (platform_machine == 'aarch64' and sys_platform == 'linux') or (platform_machine == 'x86_64' and sys_platform == 'linux') or (sys_platform != 'linux' and extra == 'extra-10-weathergen-cpu' and extra == 'extra-10-weathergen-gpu')" }, +] + +[package.metadata] +requires-dist = [ + { name = "astropy-healpix", specifier = "~=1.1.2" }, + { name = "dask", specifier = ">=2024.9.1" }, + { name = "numcodecs", specifier = "<0.16.0" }, + { name = "omegaconf", specifier = "~=2.3.0" }, + { name = "pyyaml" }, + { name = "xarray", specifier = ">=2025.6.1" }, + { name = "zarr", specifier = "==2.18.4,<3" }, +] + +[package.metadata.requires-dev] +dev = [ + { name = "pyrefly", specifier = "==0.36.0" }, + { name = "pytest", specifier = "~=8.3.5" }, + { name = "pytest-mock", specifier = ">=3.14.1" }, + { name = "ruff", specifier = "==0.9.7" }, +] + +[[package]] +name = "weathergen-evaluate" +version = "0.1.0" +source = { editable = "packages/evaluate" } +dependencies = [ + { name = "cartopy", marker = "(platform_machine != 'aarch64' and platform_machine != 'x86_64' and extra == 'extra-10-weathergen-cpu' and extra == 'extra-10-weathergen-gpu') or (platform_machine == 'aarch64' and sys_platform == 'linux') or (platform_machine == 'x86_64' and sys_platform == 'linux') or (sys_platform != 'linux' and extra == 'extra-10-weathergen-cpu' and extra == 'extra-10-weathergen-gpu')" }, + { name = "omegaconf", marker = "(platform_machine != 'aarch64' and platform_machine != 'x86_64' and extra == 'extra-10-weathergen-cpu' and extra == 'extra-10-weathergen-gpu') or (platform_machine == 'aarch64' and sys_platform == 'linux') or (platform_machine == 'x86_64' and sys_platform == 'linux') or (sys_platform != 'linux' and extra == 'extra-10-weathergen-cpu' and extra == 'extra-10-weathergen-gpu')" }, + { name = "panel", marker = "(platform_machine != 'aarch64' and platform_machine != 'x86_64' and extra == 'extra-10-weathergen-cpu' and extra == 'extra-10-weathergen-gpu') or (platform_machine == 'aarch64' and sys_platform == 'linux') or (platform_machine == 'x86_64' and sys_platform == 'linux') or (sys_platform != 'linux' and extra == 'extra-10-weathergen-cpu' and extra == 'extra-10-weathergen-gpu')" }, + { name = "plotly", marker = "(platform_machine != 'aarch64' and platform_machine != 'x86_64' and extra == 'extra-10-weathergen-cpu' and extra == 'extra-10-weathergen-gpu') or (platform_machine == 'aarch64' and sys_platform == 'linux') or (platform_machine == 'x86_64' and sys_platform == 'linux') or (sys_platform != 'linux' and extra == 'extra-10-weathergen-cpu' and extra == 'extra-10-weathergen-gpu')" }, + { name = "weathergen-common", marker = "(platform_machine != 'aarch64' and platform_machine != 'x86_64' and extra == 'extra-10-weathergen-cpu' and extra == 'extra-10-weathergen-gpu') or (platform_machine == 'aarch64' and sys_platform == 'linux') or (platform_machine == 'x86_64' and sys_platform == 'linux') or (sys_platform != 'linux' and extra == 'extra-10-weathergen-cpu' and extra == 'extra-10-weathergen-gpu')" }, + { name = "weathergen-metrics", marker = "(platform_machine != 'aarch64' and platform_machine != 'x86_64' and extra == 'extra-10-weathergen-cpu' and extra == 'extra-10-weathergen-gpu') or (platform_machine == 'aarch64' and sys_platform == 'linux') or (platform_machine == 'x86_64' and sys_platform == 'linux') or (sys_platform != 'linux' and extra == 'extra-10-weathergen-cpu' and extra == 'extra-10-weathergen-gpu')" }, + { name = "xhistogram", marker = "(platform_machine != 'aarch64' and platform_machine != 'x86_64' and extra == 'extra-10-weathergen-cpu' and extra == 'extra-10-weathergen-gpu') or (platform_machine == 'aarch64' and sys_platform == 'linux') or (platform_machine == 'x86_64' and sys_platform == 'linux') or (sys_platform != 'linux' and extra == 'extra-10-weathergen-cpu' and extra == 'extra-10-weathergen-gpu')" }, + { name = "xskillscore", marker = "(platform_machine != 'aarch64' and platform_machine != 'x86_64' and extra == 'extra-10-weathergen-cpu' and extra == 'extra-10-weathergen-gpu') or (platform_machine == 'aarch64' and sys_platform == 'linux') or (platform_machine == 'x86_64' and sys_platform == 'linux') or (sys_platform != 'linux' and extra == 'extra-10-weathergen-cpu' and extra == 'extra-10-weathergen-gpu')" }, +] + +[package.dev-dependencies] +dev = [ + { name = "pyrefly", marker = "(platform_machine != 'aarch64' and platform_machine != 'x86_64' and extra == 'extra-10-weathergen-cpu' and extra == 'extra-10-weathergen-gpu') or (platform_machine == 'aarch64' and sys_platform == 'linux') or (platform_machine == 'x86_64' and sys_platform == 'linux') or (sys_platform != 'linux' and extra == 'extra-10-weathergen-cpu' and extra == 'extra-10-weathergen-gpu')" }, + { name = "pytest", marker = "(platform_machine != 'aarch64' and platform_machine != 'x86_64' and extra == 'extra-10-weathergen-cpu' and extra == 'extra-10-weathergen-gpu') or (platform_machine == 'aarch64' and sys_platform == 'linux') or (platform_machine == 'x86_64' and sys_platform == 'linux') or (sys_platform != 'linux' and extra == 'extra-10-weathergen-cpu' and extra == 'extra-10-weathergen-gpu')" }, + { name = "pytest-mock", marker = "(platform_machine != 'aarch64' and platform_machine != 'x86_64' and extra == 'extra-10-weathergen-cpu' and extra == 'extra-10-weathergen-gpu') or (platform_machine == 'aarch64' and sys_platform == 'linux') or (platform_machine == 'x86_64' and sys_platform == 'linux') or (sys_platform != 'linux' and extra == 'extra-10-weathergen-cpu' and extra == 'extra-10-weathergen-gpu')" }, + { name = "ruff", marker = "(platform_machine != 'aarch64' and platform_machine != 'x86_64' and extra == 'extra-10-weathergen-cpu' and extra == 'extra-10-weathergen-gpu') or (platform_machine == 'aarch64' and sys_platform == 'linux') or (platform_machine == 'x86_64' and sys_platform == 'linux') or (sys_platform != 'linux' and extra == 'extra-10-weathergen-cpu' and extra == 'extra-10-weathergen-gpu')" }, +] + +[package.metadata] +requires-dist = [ + { name = "cartopy", specifier = ">=0.24.1" }, + { name = "omegaconf" }, + { name = "panel" }, + { name = "plotly", specifier = ">=6.2.0" }, + { name = "weathergen-common", editable = "packages/common" }, + { name = "weathergen-metrics", editable = "packages/metrics" }, + { name = "xhistogram" }, + { name = "xskillscore" }, +] + +[package.metadata.requires-dev] +dev = [ + { name = "pyrefly", specifier = "==0.36.0" }, + { name = "pytest", specifier = "~=8.3.5" }, + { name = "pytest-mock", specifier = ">=3.14.1" }, + { name = "ruff", specifier = "==0.9.7" }, +] + +[[package]] +name = "weathergen-metrics" +version = "0.1.0" +source = { editable = "packages/metrics" } +dependencies = [ + { name = "mlflow-skinny", marker = "(platform_machine != 'aarch64' and platform_machine != 'x86_64' and extra == 'extra-10-weathergen-cpu' and extra == 'extra-10-weathergen-gpu') or (platform_machine == 'aarch64' and sys_platform == 'linux') or (platform_machine == 'x86_64' and sys_platform == 'linux') or (sys_platform != 'linux' and extra == 'extra-10-weathergen-cpu' and extra == 'extra-10-weathergen-gpu')" }, + { name = "weathergen-common", marker = "(platform_machine != 'aarch64' and platform_machine != 'x86_64' and extra == 'extra-10-weathergen-cpu' and extra == 'extra-10-weathergen-gpu') or (platform_machine == 'aarch64' and sys_platform == 'linux') or (platform_machine == 'x86_64' and sys_platform == 'linux') or (sys_platform != 'linux' and extra == 'extra-10-weathergen-cpu' and extra == 'extra-10-weathergen-gpu')" }, +] + +[package.dev-dependencies] +dev = [ + { name = "pyrefly", marker = "(platform_machine != 'aarch64' and platform_machine != 'x86_64' and extra == 'extra-10-weathergen-cpu' and extra == 'extra-10-weathergen-gpu') or (platform_machine == 'aarch64' and sys_platform == 'linux') or (platform_machine == 'x86_64' and sys_platform == 'linux') or (sys_platform != 'linux' and extra == 'extra-10-weathergen-cpu' and extra == 'extra-10-weathergen-gpu')" }, + { name = "pytest", marker = "(platform_machine != 'aarch64' and platform_machine != 'x86_64' and extra == 'extra-10-weathergen-cpu' and extra == 'extra-10-weathergen-gpu') or (platform_machine == 'aarch64' and sys_platform == 'linux') or (platform_machine == 'x86_64' and sys_platform == 'linux') or (sys_platform != 'linux' and extra == 'extra-10-weathergen-cpu' and extra == 'extra-10-weathergen-gpu')" }, + { name = "pytest-mock", marker = "(platform_machine != 'aarch64' and platform_machine != 'x86_64' and extra == 'extra-10-weathergen-cpu' and extra == 'extra-10-weathergen-gpu') or (platform_machine == 'aarch64' and sys_platform == 'linux') or (platform_machine == 'x86_64' and sys_platform == 'linux') or (sys_platform != 'linux' and extra == 'extra-10-weathergen-cpu' and extra == 'extra-10-weathergen-gpu')" }, + { name = "ruff", marker = "(platform_machine != 'aarch64' and platform_machine != 'x86_64' and extra == 'extra-10-weathergen-cpu' and extra == 'extra-10-weathergen-gpu') or (platform_machine == 'aarch64' and sys_platform == 'linux') or (platform_machine == 'x86_64' and sys_platform == 'linux') or (sys_platform != 'linux' and extra == 'extra-10-weathergen-cpu' and extra == 'extra-10-weathergen-gpu')" }, +] + +[package.metadata] +requires-dist = [ + { name = "mlflow-skinny" }, + { name = "weathergen-common", editable = "packages/common" }, +] + +[package.metadata.requires-dev] +dev = [ + { name = "pyrefly", specifier = "==0.36.0" }, + { name = "pytest", specifier = "~=8.3.5" }, + { name = "pytest-mock", specifier = ">=3.14.1" }, + { name = "ruff", specifier = "==0.9.7" }, +] + +[[package]] +name = "weathergen-readers-extra" +version = "0.1.0" +source = { editable = "packages/readers_extra" } +dependencies = [ + { name = "weathergen-common", marker = "(platform_machine != 'aarch64' and platform_machine != 'x86_64' and extra == 'extra-10-weathergen-cpu' and extra == 'extra-10-weathergen-gpu') or (platform_machine == 'aarch64' and sys_platform == 'linux') or (platform_machine == 'x86_64' and sys_platform == 'linux') or (sys_platform != 'linux' and extra == 'extra-10-weathergen-cpu' and extra == 'extra-10-weathergen-gpu')" }, + { name = "xarray", marker = "(platform_machine != 'aarch64' and platform_machine != 'x86_64' and extra == 'extra-10-weathergen-cpu' and extra == 'extra-10-weathergen-gpu') or (platform_machine == 'aarch64' and sys_platform == 'linux') or (platform_machine == 'x86_64' and sys_platform == 'linux') or (sys_platform != 'linux' and extra == 'extra-10-weathergen-cpu' and extra == 'extra-10-weathergen-gpu')" }, + { name = "zarr", marker = "(platform_machine != 'aarch64' and platform_machine != 'x86_64' and extra == 'extra-10-weathergen-cpu' and extra == 'extra-10-weathergen-gpu') or (platform_machine == 'aarch64' and sys_platform == 'linux') or (platform_machine == 'x86_64' and sys_platform == 'linux') or (sys_platform != 'linux' and extra == 'extra-10-weathergen-cpu' and extra == 'extra-10-weathergen-gpu')" }, +] + +[package.dev-dependencies] +dev = [ + { name = "pyrefly", marker = "(platform_machine != 'aarch64' and platform_machine != 'x86_64' and extra == 'extra-10-weathergen-cpu' and extra == 'extra-10-weathergen-gpu') or (platform_machine == 'aarch64' and sys_platform == 'linux') or (platform_machine == 'x86_64' and sys_platform == 'linux') or (sys_platform != 'linux' and extra == 'extra-10-weathergen-cpu' and extra == 'extra-10-weathergen-gpu')" }, + { name = "pytest", marker = "(platform_machine != 'aarch64' and platform_machine != 'x86_64' and extra == 'extra-10-weathergen-cpu' and extra == 'extra-10-weathergen-gpu') or (platform_machine == 'aarch64' and sys_platform == 'linux') or (platform_machine == 'x86_64' and sys_platform == 'linux') or (sys_platform != 'linux' and extra == 'extra-10-weathergen-cpu' and extra == 'extra-10-weathergen-gpu')" }, + { name = "pytest-mock", marker = "(platform_machine != 'aarch64' and platform_machine != 'x86_64' and extra == 'extra-10-weathergen-cpu' and extra == 'extra-10-weathergen-gpu') or (platform_machine == 'aarch64' and sys_platform == 'linux') or (platform_machine == 'x86_64' and sys_platform == 'linux') or (sys_platform != 'linux' and extra == 'extra-10-weathergen-cpu' and extra == 'extra-10-weathergen-gpu')" }, + { name = "ruff", marker = "(platform_machine != 'aarch64' and platform_machine != 'x86_64' and extra == 'extra-10-weathergen-cpu' and extra == 'extra-10-weathergen-gpu') or (platform_machine == 'aarch64' and sys_platform == 'linux') or (platform_machine == 'x86_64' and sys_platform == 'linux') or (sys_platform != 'linux' and extra == 'extra-10-weathergen-cpu' and extra == 'extra-10-weathergen-gpu')" }, +] + +[package.metadata] +requires-dist = [ + { name = "weathergen-common", editable = "packages/common" }, + { name = "xarray" }, + { name = "zarr" }, +] + +[package.metadata.requires-dev] +dev = [ + { name = "pyrefly", specifier = "==0.36.0" }, + { name = "pytest", specifier = "~=8.3.5" }, + { name = "pytest-mock", specifier = ">=3.14.1" }, + { name = "ruff", specifier = "==0.9.7" }, +] + +[[package]] +name = "webencodings" +version = "0.5.1" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/0b/02/ae6ceac1baeda530866a85075641cec12989bd8d31af6d5ab4a3e8c92f47/webencodings-0.5.1.tar.gz", hash = "sha256:b36a1c245f2d304965eb4e0a82848379241dc04b865afcc4aab16748587e1923", size = 9721, upload-time = "2017-04-05T20:21:34.189Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/f4/24/2a3e3df732393fed8b3ebf2ec078f05546de641fe1b667ee316ec1dcf3b7/webencodings-0.5.1-py2.py3-none-any.whl", hash = "sha256:a0af1213f3c2226497a97e2b3aa01a7e4bee4f403f95be16fc9acd2947514a78", size = 11774, upload-time = "2017-04-05T20:21:32.581Z" }, +] + +[[package]] +name = "werkzeug" +version = "3.1.3" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "markupsafe", marker = "(platform_machine != 'aarch64' and platform_machine != 'x86_64' and extra == 'extra-10-weathergen-cpu' and extra == 'extra-10-weathergen-gpu') or (platform_machine == 'aarch64' and sys_platform == 'linux') or (platform_machine == 'x86_64' and sys_platform == 'linux') or (sys_platform != 'linux' and extra == 'extra-10-weathergen-cpu' and extra == 'extra-10-weathergen-gpu')" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/9f/69/83029f1f6300c5fb2471d621ab06f6ec6b3324685a2ce0f9777fd4a8b71e/werkzeug-3.1.3.tar.gz", hash = "sha256:60723ce945c19328679790e3282cc758aa4a6040e4bb330f53d30fa546d44746", size = 806925, upload-time = "2024-11-08T15:52:18.093Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/52/24/ab44c871b0f07f491e5d2ad12c9bd7358e527510618cb1b803a88e986db1/werkzeug-3.1.3-py3-none-any.whl", hash = "sha256:54b78bf3716d19a65be4fceccc0d1d7b89e608834989dfae50ea87564639213e", size = 224498, upload-time = "2024-11-08T15:52:16.132Z" }, +] + +[[package]] +name = "wheel" +version = "0.45.1" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/8a/98/2d9906746cdc6a6ef809ae6338005b3f21bb568bea3165cfc6a243fdc25c/wheel-0.45.1.tar.gz", hash = "sha256:661e1abd9198507b1409a20c02106d9670b2576e916d58f520316666abca6729", size = 107545, upload-time = "2024-11-23T00:18:23.513Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/0b/2c/87f3254fd8ffd29e4c02732eee68a83a1d3c346ae39bc6822dcbcb697f2b/wheel-0.45.1-py3-none-any.whl", hash = "sha256:708e7481cc80179af0e556bbf0cc00b8444c7321e2700b8d8580231d13017248", size = 72494, upload-time = "2024-11-23T00:18:21.207Z" }, +] + +[[package]] +name = "wrapt" +version = "1.17.2" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/c3/fc/e91cc220803d7bc4db93fb02facd8461c37364151b8494762cc88b0fbcef/wrapt-1.17.2.tar.gz", hash = "sha256:41388e9d4d1522446fe79d3213196bd9e3b301a336965b9e27ca2788ebd122f3", size = 55531, upload-time = "2025-01-14T10:35:45.465Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/a1/bd/ab55f849fd1f9a58ed7ea47f5559ff09741b25f00c191231f9f059c83949/wrapt-1.17.2-cp312-cp312-macosx_10_13_universal2.whl", hash = "sha256:d5e2439eecc762cd85e7bd37161d4714aa03a33c5ba884e26c81559817ca0925", size = 53799, upload-time = "2025-01-14T10:33:57.4Z" }, + { url = "https://files.pythonhosted.org/packages/53/18/75ddc64c3f63988f5a1d7e10fb204ffe5762bc663f8023f18ecaf31a332e/wrapt-1.17.2-cp312-cp312-macosx_10_13_x86_64.whl", hash = "sha256:3fc7cb4c1c744f8c05cd5f9438a3caa6ab94ce8344e952d7c45a8ed59dd88392", size = 38821, upload-time = "2025-01-14T10:33:59.334Z" }, + { url = "https://files.pythonhosted.org/packages/48/2a/97928387d6ed1c1ebbfd4efc4133a0633546bec8481a2dd5ec961313a1c7/wrapt-1.17.2-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:8fdbdb757d5390f7c675e558fd3186d590973244fab0c5fe63d373ade3e99d40", size = 38919, upload-time = "2025-01-14T10:34:04.093Z" }, + { url = "https://files.pythonhosted.org/packages/73/54/3bfe5a1febbbccb7a2f77de47b989c0b85ed3a6a41614b104204a788c20e/wrapt-1.17.2-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:5bb1d0dbf99411f3d871deb6faa9aabb9d4e744d67dcaaa05399af89d847a91d", size = 88721, upload-time = "2025-01-14T10:34:07.163Z" }, + { url = "https://files.pythonhosted.org/packages/25/cb/7262bc1b0300b4b64af50c2720ef958c2c1917525238d661c3e9a2b71b7b/wrapt-1.17.2-cp312-cp312-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:d18a4865f46b8579d44e4fe1e2bcbc6472ad83d98e22a26c963d46e4c125ef0b", size = 80899, upload-time = "2025-01-14T10:34:09.82Z" }, + { url = "https://files.pythonhosted.org/packages/2a/5a/04cde32b07a7431d4ed0553a76fdb7a61270e78c5fd5a603e190ac389f14/wrapt-1.17.2-cp312-cp312-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:bc570b5f14a79734437cb7b0500376b6b791153314986074486e0b0fa8d71d98", size = 89222, upload-time = "2025-01-14T10:34:11.258Z" }, + { url = "https://files.pythonhosted.org/packages/09/28/2e45a4f4771fcfb109e244d5dbe54259e970362a311b67a965555ba65026/wrapt-1.17.2-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:6d9187b01bebc3875bac9b087948a2bccefe464a7d8f627cf6e48b1bbae30f82", size = 86707, upload-time = "2025-01-14T10:34:12.49Z" }, + { url = "https://files.pythonhosted.org/packages/c6/d2/dcb56bf5f32fcd4bd9aacc77b50a539abdd5b6536872413fd3f428b21bed/wrapt-1.17.2-cp312-cp312-musllinux_1_2_i686.whl", hash = "sha256:9e8659775f1adf02eb1e6f109751268e493c73716ca5761f8acb695e52a756ae", size = 79685, upload-time = "2025-01-14T10:34:15.043Z" }, + { url = "https://files.pythonhosted.org/packages/80/4e/eb8b353e36711347893f502ce91c770b0b0929f8f0bed2670a6856e667a9/wrapt-1.17.2-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:e8b2816ebef96d83657b56306152a93909a83f23994f4b30ad4573b00bd11bb9", size = 87567, upload-time = "2025-01-14T10:34:16.563Z" }, + { url = "https://files.pythonhosted.org/packages/17/27/4fe749a54e7fae6e7146f1c7d914d28ef599dacd4416566c055564080fe2/wrapt-1.17.2-cp312-cp312-win32.whl", hash = "sha256:468090021f391fe0056ad3e807e3d9034e0fd01adcd3bdfba977b6fdf4213ea9", size = 36672, upload-time = "2025-01-14T10:34:17.727Z" }, + { url = "https://files.pythonhosted.org/packages/15/06/1dbf478ea45c03e78a6a8c4be4fdc3c3bddea5c8de8a93bc971415e47f0f/wrapt-1.17.2-cp312-cp312-win_amd64.whl", hash = "sha256:ec89ed91f2fa8e3f52ae53cd3cf640d6feff92ba90d62236a81e4e563ac0e991", size = 38865, upload-time = "2025-01-14T10:34:19.577Z" }, + { url = "https://files.pythonhosted.org/packages/2d/82/f56956041adef78f849db6b289b282e72b55ab8045a75abad81898c28d19/wrapt-1.17.2-py3-none-any.whl", hash = "sha256:b18f2d1533a71f069c7f82d524a52599053d4c7166e9dd374ae2136b7f40f7c8", size = 23594, upload-time = "2025-01-14T10:35:44.018Z" }, +] + +[[package]] +name = "xarray" +version = "2025.6.1" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "numpy", marker = "(platform_machine != 'aarch64' and platform_machine != 'x86_64' and extra == 'extra-10-weathergen-cpu' and extra == 'extra-10-weathergen-gpu') or (platform_machine == 'aarch64' and sys_platform == 'linux') or (platform_machine == 'x86_64' and sys_platform == 'linux') or (sys_platform != 'linux' and extra == 'extra-10-weathergen-cpu' and extra == 'extra-10-weathergen-gpu')" }, + { name = "packaging", marker = "(platform_machine != 'aarch64' and platform_machine != 'x86_64' and extra == 'extra-10-weathergen-cpu' and extra == 'extra-10-weathergen-gpu') or (platform_machine == 'aarch64' and sys_platform == 'linux') or (platform_machine == 'x86_64' and sys_platform == 'linux') or (sys_platform != 'linux' and extra == 'extra-10-weathergen-cpu' and extra == 'extra-10-weathergen-gpu')" }, + { name = "pandas", marker = "(platform_machine != 'aarch64' and platform_machine != 'x86_64' and extra == 'extra-10-weathergen-cpu' and extra == 'extra-10-weathergen-gpu') or (platform_machine == 'aarch64' and sys_platform == 'linux') or (platform_machine == 'x86_64' and sys_platform == 'linux') or (sys_platform != 'linux' and extra == 'extra-10-weathergen-cpu' and extra == 'extra-10-weathergen-gpu')" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/19/ec/e50d833518f10b0c24feb184b209bb6856f25b919ba8c1f89678b930b1cd/xarray-2025.6.1.tar.gz", hash = "sha256:a84f3f07544634a130d7dc615ae44175419f4c77957a7255161ed99c69c7c8b0", size = 3003185, upload-time = "2025-06-12T03:04:09.099Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/82/8a/6b50c1dd2260d407c1a499d47cf829f59f07007e0dcebafdabb24d1d26a5/xarray-2025.6.1-py3-none-any.whl", hash = "sha256:8b988b47f67a383bdc3b04c5db475cd165e580134c1f1943d52aee4a9c97651b", size = 1314739, upload-time = "2025-06-12T03:04:06.708Z" }, +] + +[[package]] +name = "xhistogram" +version = "0.3.2" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "dask", extra = ["array"], marker = "(platform_machine != 'aarch64' and platform_machine != 'x86_64' and extra == 'extra-10-weathergen-cpu' and extra == 'extra-10-weathergen-gpu') or (platform_machine == 'aarch64' and sys_platform == 'linux') or (platform_machine == 'x86_64' and sys_platform == 'linux') or (sys_platform != 'linux' and extra == 'extra-10-weathergen-cpu' and extra == 'extra-10-weathergen-gpu')" }, + { name = "numpy", marker = "(platform_machine != 'aarch64' and platform_machine != 'x86_64' and extra == 'extra-10-weathergen-cpu' and extra == 'extra-10-weathergen-gpu') or (platform_machine == 'aarch64' and sys_platform == 'linux') or (platform_machine == 'x86_64' and sys_platform == 'linux') or (sys_platform != 'linux' and extra == 'extra-10-weathergen-cpu' and extra == 'extra-10-weathergen-gpu')" }, + { name = "xarray", marker = "(platform_machine != 'aarch64' and platform_machine != 'x86_64' and extra == 'extra-10-weathergen-cpu' and extra == 'extra-10-weathergen-gpu') or (platform_machine == 'aarch64' and sys_platform == 'linux') or (platform_machine == 'x86_64' and sys_platform == 'linux') or (sys_platform != 'linux' and extra == 'extra-10-weathergen-cpu' and extra == 'extra-10-weathergen-gpu')" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/02/a7/9530310c67ba4a9945ba8b451ed89b6d9b5c9d119c86bd0cedc2e1abe64f/xhistogram-0.3.2.tar.gz", hash = "sha256:56b0751e1469eaed81710f644c8ba5c574b51883baa2feee26a95f2f708f91a1", size = 52684, upload-time = "2022-09-20T05:08:03.703Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/18/08/1432dd10193a5d45294bd42042a5631259ee5a12cd2e9075350546d07a03/xhistogram-0.3.2-py3-none-any.whl", hash = "sha256:ad55330d55296d273b3370678223fde0f50085e04cb744c7b3b0bb7702a2c6bf", size = 20227, upload-time = "2022-09-20T05:08:01.428Z" }, +] + +[[package]] +name = "xskillscore" +version = "0.0.27" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "dask", extra = ["array"], marker = "(platform_machine != 'aarch64' and platform_machine != 'x86_64' and extra == 'extra-10-weathergen-cpu' and extra == 'extra-10-weathergen-gpu') or (platform_machine == 'aarch64' and sys_platform == 'linux') or (platform_machine == 'x86_64' and sys_platform == 'linux') or (sys_platform != 'linux' and extra == 'extra-10-weathergen-cpu' and extra == 'extra-10-weathergen-gpu')" }, + { name = "numpy", marker = "(platform_machine != 'aarch64' and platform_machine != 'x86_64' and extra == 'extra-10-weathergen-cpu' and extra == 'extra-10-weathergen-gpu') or (platform_machine == 'aarch64' and sys_platform == 'linux') or (platform_machine == 'x86_64' and sys_platform == 'linux') or (sys_platform != 'linux' and extra == 'extra-10-weathergen-cpu' and extra == 'extra-10-weathergen-gpu')" }, + { name = "properscoring", marker = "(platform_machine != 'aarch64' and platform_machine != 'x86_64' and extra == 'extra-10-weathergen-cpu' and extra == 'extra-10-weathergen-gpu') or (platform_machine == 'aarch64' and sys_platform == 'linux') or (platform_machine == 'x86_64' and sys_platform == 'linux') or (sys_platform != 'linux' and extra == 'extra-10-weathergen-cpu' and extra == 'extra-10-weathergen-gpu')" }, + { name = "scipy", marker = "(platform_machine != 'aarch64' and platform_machine != 'x86_64' and extra == 'extra-10-weathergen-cpu' and extra == 'extra-10-weathergen-gpu') or (platform_machine == 'aarch64' and sys_platform == 'linux') or (platform_machine == 'x86_64' and sys_platform == 'linux') or (sys_platform != 'linux' and extra == 'extra-10-weathergen-cpu' and extra == 'extra-10-weathergen-gpu')" }, + { name = "statsmodels", marker = "(platform_machine != 'aarch64' and platform_machine != 'x86_64' and extra == 'extra-10-weathergen-cpu' and extra == 'extra-10-weathergen-gpu') or (platform_machine == 'aarch64' and sys_platform == 'linux') or (platform_machine == 'x86_64' and sys_platform == 'linux') or (sys_platform != 'linux' and extra == 'extra-10-weathergen-cpu' and extra == 'extra-10-weathergen-gpu')" }, + { name = "xarray", marker = "(platform_machine != 'aarch64' and platform_machine != 'x86_64' and extra == 'extra-10-weathergen-cpu' and extra == 'extra-10-weathergen-gpu') or (platform_machine == 'aarch64' and sys_platform == 'linux') or (platform_machine == 'x86_64' and sys_platform == 'linux') or (sys_platform != 'linux' and extra == 'extra-10-weathergen-cpu' and extra == 'extra-10-weathergen-gpu')" }, + { name = "xhistogram", marker = "(platform_machine != 'aarch64' and platform_machine != 'x86_64' and extra == 'extra-10-weathergen-cpu' and extra == 'extra-10-weathergen-gpu') or (platform_machine == 'aarch64' and sys_platform == 'linux') or (platform_machine == 'x86_64' and sys_platform == 'linux') or (sys_platform != 'linux' and extra == 'extra-10-weathergen-cpu' and extra == 'extra-10-weathergen-gpu')" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/a7/5b/b7522695f8fceaf95e01499b1e1ce46a748c0ada7bf2b136af46cbfcc8a1/xskillscore-0.0.27.tar.gz", hash = "sha256:89a9f728f0a6b45b88b4f3bdaebf1a541923b58fca2c0abcad274d971ec121b2", size = 218416, upload-time = "2025-07-14T17:41:00.529Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/69/fc/c921d1c2d274252c1b1cfe129a59df7d8766fca51f71a0398d19bb72fab3/xskillscore-0.0.27-py3-none-any.whl", hash = "sha256:860195cb10db7b6b27ae1d6131fb853b8adb40da579071df90c8df939ce553aa", size = 80091, upload-time = "2025-07-14T17:40:59.507Z" }, +] + +[[package]] +name = "xyzservices" +version = "2025.4.0" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/00/af/c0f7f97bb320d14c089476f487b81f733238cc5603e0914f2e409f49d589/xyzservices-2025.4.0.tar.gz", hash = "sha256:6fe764713648fac53450fbc61a3c366cb6ae5335a1b2ae0c3796b495de3709d8", size = 1134722, upload-time = "2025-04-25T10:38:09.669Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/d6/7d/b77455d7c7c51255b2992b429107fab811b2e36ceaf76da1e55a045dc568/xyzservices-2025.4.0-py3-none-any.whl", hash = "sha256:8d4db9a59213ccb4ce1cf70210584f30b10795bff47627cdfb862b39ff6e10c9", size = 90391, upload-time = "2025-04-25T10:38:08.468Z" }, +] + +[[package]] +name = "zarr" +version = "2.18.4" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "asciitree", marker = "(platform_machine != 'aarch64' and platform_machine != 'x86_64' and extra == 'extra-10-weathergen-cpu' and extra == 'extra-10-weathergen-gpu') or (platform_machine == 'aarch64' and sys_platform == 'linux') or (platform_machine == 'x86_64' and sys_platform == 'linux') or (sys_platform != 'linux' and extra == 'extra-10-weathergen-cpu' and extra == 'extra-10-weathergen-gpu')" }, + { name = "fasteners", marker = "(platform_machine != 'aarch64' and platform_machine != 'x86_64' and extra == 'extra-10-weathergen-cpu' and extra == 'extra-10-weathergen-gpu') or (platform_machine == 'aarch64' and sys_platform == 'linux') or (platform_machine == 'x86_64' and sys_platform == 'linux') or (sys_platform != 'linux' and extra == 'extra-10-weathergen-cpu' and extra == 'extra-10-weathergen-gpu')" }, + { name = "numcodecs", marker = "(platform_machine != 'aarch64' and platform_machine != 'x86_64' and extra == 'extra-10-weathergen-cpu' and extra == 'extra-10-weathergen-gpu') or (platform_machine == 'aarch64' and sys_platform == 'linux') or (platform_machine == 'x86_64' and sys_platform == 'linux') or (sys_platform != 'linux' and extra == 'extra-10-weathergen-cpu' and extra == 'extra-10-weathergen-gpu')" }, + { name = "numpy", marker = "(platform_machine != 'aarch64' and platform_machine != 'x86_64' and extra == 'extra-10-weathergen-cpu' and extra == 'extra-10-weathergen-gpu') or (platform_machine == 'aarch64' and sys_platform == 'linux') or (platform_machine == 'x86_64' and sys_platform == 'linux') or (sys_platform != 'linux' and extra == 'extra-10-weathergen-cpu' and extra == 'extra-10-weathergen-gpu')" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/21/d1/764ca5b66d91b20dede66aedc6eb9ede3adbe5c61779e7378a7ecb010e87/zarr-2.18.4.tar.gz", hash = "sha256:37790ededd0683ae1abe6ff90aa16c22543b3436810060f53d72c15e910c24bb", size = 3603684, upload-time = "2024-12-12T16:04:10.52Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/b4/d1/c84022a44afc7b7ccc442fba3daee56bdd03593d91ee4bc245a08e4fcc55/zarr-2.18.4-py3-none-any.whl", hash = "sha256:2795e20aff91093ce7e4da36ab1a138aededbd8ab66bf01fd01512e61d31e5d1", size = 210600, upload-time = "2024-12-12T16:04:06.642Z" }, +] + +[[package]] +name = "zipp" +version = "3.23.0" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/e3/02/0f2892c661036d50ede074e376733dca2ae7c6eb617489437771209d4180/zipp-3.23.0.tar.gz", hash = "sha256:a07157588a12518c9d4034df3fbbee09c814741a33ff63c05fa29d26a2404166", size = 25547, upload-time = "2025-06-08T17:06:39.4Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/2e/54/647ade08bf0db230bfea292f893923872fd20be6ac6f53b2b936ba839d75/zipp-3.23.0-py3-none-any.whl", hash = "sha256:071652d6115ed432f5ce1d34c336c0adfd6a884660d1e9712a256d3d3bd4b14e", size = 10276, upload-time = "2025-06-08T17:06:38.034Z" }, +] From b57c3241d4dea2d32681955404c3ff1834b82ef0 Mon Sep 17 00:00:00 2001 From: Timothy Hunter Date: Tue, 18 Nov 2025 17:53:56 +0100 Subject: [PATCH 03/24] [882] Project dashboard (#1251) * changes * ignore cache * chanegs * changes * Added Dev Container Folder * paths * changes * changes * comments * Changes * ruff format * skinny mlfulw * scaling issues * memory issues * memory issues * memory issues * moving new dir * moved files * updates * changes * update * link * link * link * link * revert * removing dev container * cleanups * pin streamlit * changes * cleanup * comments * comments --- .gitignore | 1 + dashboard | 1 + packages/dashboard/.python-version | 1 + packages/dashboard/README.md | 10 + packages/dashboard/atmo_eval.py | 155 ++ packages/dashboard/atmo_training.py | 118 ++ packages/dashboard/dashboard.py | 67 + packages/dashboard/data_overview.py | 91 + packages/dashboard/eng_overview.py | 205 ++ packages/dashboard/pyproject.toml | 90 + packages/dashboard/uv.lock | 1877 +++++++++++++++++ .../weathergen/dashboard/__init__.py | 0 .../dashboard/weathergen/dashboard/metrics.py | 77 + .../src/weathergen/metrics/mlflow_utils.py | 17 +- pyproject.toml | 2 + 15 files changed, 2705 insertions(+), 7 deletions(-) create mode 120000 dashboard create mode 100644 packages/dashboard/.python-version create mode 100644 packages/dashboard/README.md create mode 100644 packages/dashboard/atmo_eval.py create mode 100644 packages/dashboard/atmo_training.py create mode 100644 packages/dashboard/dashboard.py create mode 100644 packages/dashboard/data_overview.py create mode 100644 packages/dashboard/eng_overview.py create mode 100644 packages/dashboard/pyproject.toml create mode 100644 packages/dashboard/uv.lock create mode 100644 packages/dashboard/weathergen/dashboard/__init__.py create mode 100644 packages/dashboard/weathergen/dashboard/metrics.py diff --git a/.gitignore b/.gitignore index 0854b731d..827542891 100644 --- a/.gitignore +++ b/.gitignore @@ -147,6 +147,7 @@ venv/ ENV/ env.bak/ venv.bak/ +.*cache # Spyder project settings .spyderproject diff --git a/dashboard b/dashboard new file mode 120000 index 000000000..463de28d3 --- /dev/null +++ b/dashboard @@ -0,0 +1 @@ +packages/dashboard \ No newline at end of file diff --git a/packages/dashboard/.python-version b/packages/dashboard/.python-version new file mode 100644 index 000000000..e4fba2183 --- /dev/null +++ b/packages/dashboard/.python-version @@ -0,0 +1 @@ +3.12 diff --git a/packages/dashboard/README.md b/packages/dashboard/README.md new file mode 100644 index 000000000..d5c663f62 --- /dev/null +++ b/packages/dashboard/README.md @@ -0,0 +1,10 @@ +# WeatherGenerator internal dashboard + + +## Deployment + +See full instruction at https://gitlab.jsc.fz-juelich.de/esde/WeatherGenerator-private/-/wikis/home/Tracking-progress + +``` +uv run --env-file=.env streamlit run dashboard.py +``` \ No newline at end of file diff --git a/packages/dashboard/atmo_eval.py b/packages/dashboard/atmo_eval.py new file mode 100644 index 000000000..a98b32268 --- /dev/null +++ b/packages/dashboard/atmo_eval.py @@ -0,0 +1,155 @@ +import logging + +import plotly.express as px +import polars as pl +import streamlit as st +from mlflow.client import MlflowClient + +from weathergen.dashboard.metrics import ST_TTL_SEC, latest_runs, setup_mflow, stage_is_eval + +_logger = logging.getLogger(__name__) + +_logger.info("Setting up MLFlow") +client: MlflowClient = setup_mflow() + +important_scores = [ + ("metrics.score.global.rmse.ERA5.2t", "deg K"), +] + +st.markdown(""" + +# Main model training: evaluation scores + +The evaluation scores logged during the main model training runs. + +""") + + +@st.cache_data(ttl=ST_TTL_SEC, max_entries=2) +def get_runs_with_scores() -> pl.DataFrame: + """ + The runs that have evaluation scores logged. + - Only keep the eval stage runs + - Only keep the metrics.score.* metrics + """ + # Fully encapsulated logic to allow caching + runs = latest_runs() + eval_runs = runs.filter(stage_is_eval) + # Keep all non-metrics columns, plus metrics.score.* columns + # Do not keep gradient metrics or other metrics. + target_cols = [ + col + for col in eval_runs.columns + if (col.startswith("metrics.score.") or not col.startswith("metrics")) + ] + eval_runs = eval_runs.select(target_cols) + return eval_runs + + +eval_runs = get_runs_with_scores() + +# The info columns to show on hover +info_cols = [ + "tags.hpc", + "tags.uploader", + "tags.run_id", +] + + +@st.cache_data(ttl=ST_TTL_SEC, max_entries=20) +def get_score_step_48h(score_col: str) -> pl.DataFrame: + """ + Given a score name, return this score at the step corresponding to 48h. + """ + score = score_col.replace("metrics.", "") + # Caching since it makes multiple MLFlow calls + eval_runs = get_runs_with_scores() + step_48h = 8 # Each step = 6 hours => look for step = 8*6 = 48 hours + score_data = ( + eval_runs.select( + [ + pl.col("run_id"), # The MLFlow run ID + pl.col("start_time"), + pl.col(score_col), + ] + + [pl.col(c) for c in info_cols] + ) + .sort("start_time") + .filter(pl.col(score_col).is_not_null()) + ) + _logger.info(f"Getting score data for {score_col} at 48h (step={step_48h}): len={len(score_data)}") + + # Iterate over the runs to get the metric at step 48h + scores_dt: list[float | None] = [] + for row in score_data.iter_rows(named=True): + mlflow_run_id = row["run_id"] + _logger.info(f"Fetching metric history for run_id={mlflow_run_id}, score={score}") + data = client.get_metric_history( + run_id=mlflow_run_id, + key=score, + ) + # Find the value at step 48h + value_48h: float | None = None + for m in data: + if m.step == step_48h: + value_48h = m.value + break + scores_dt.append(value_48h) + score_data = score_data.with_columns(pl.Series(name="score_48h", values=scores_dt)).filter( + pl.col("score_48h").is_not_null() + ) + return score_data + + +# The specific score of interest: +for score_col, unit in important_scores: + score_data_48h = get_score_step_48h(score_col) + score = score_col.replace("metrics.", "") + + st.markdown(f""" + ## {score} at 48h + The evaluation score at 48 hours into the forecast. Unit: {unit} + """) + tab1, tab2 = st.tabs(["chart", "data"]) + tab1.plotly_chart( + px.scatter( + score_data_48h.to_pandas(), + x="start_time", + y="score_48h", + hover_data=info_cols, + ) + ) + tab2.dataframe(score_data_48h.to_pandas()) + +st.markdown(""" +# All latest evaluation scores + +These scores are harder to compare: different experiments may have different forecast lengths. + +""") + +accepted_scores = sorted([col for col in eval_runs.columns if col.startswith("metrics.score.")]) + +for score_col in accepted_scores: + score_data = ( + eval_runs.select( + [ + pl.col("start_time"), + pl.col(score_col), + ] + + [pl.col(c) for c in info_cols] + ) + .sort("start_time") + .filter(pl.col(score_col).is_not_null()) + ) + if score_data.is_empty(): + continue + st.markdown(f"## {score_col}") + st.plotly_chart( + px.scatter( + score_data.to_pandas(), + x="start_time", + y=score_col, + hover_data=info_cols, + ) + ) diff --git a/packages/dashboard/atmo_training.py b/packages/dashboard/atmo_training.py new file mode 100644 index 000000000..45a27ca5e --- /dev/null +++ b/packages/dashboard/atmo_training.py @@ -0,0 +1,118 @@ +import logging + +import plotly.express as px +import plotly.graph_objects as go +import polars as pl +import streamlit as st +from plotly.subplots import make_subplots + +from weathergen.dashboard.metrics import ( + all_runs, + latest_runs, + setup_mflow, + stage_is_train, + stage_is_val, +) + +_logger = logging.getLogger("atmo_training") + + +logging.basicConfig(level=logging.INFO) +_logger.info("Setting up MLFlow") +setup_mflow() + + +st.markdown(""" + +# Training overview + +Note: num_samples only shows the number of sample per run. +It does not include chained runs or total steps with finetuning included. +""") + +runs = latest_runs() +all_runs_pdf = all_runs() + + +accepted_metrics = ( + [ + f"metrics.stream.{stream}.loss_mse.loss_avg" + for stream in ["ERA5", "SurfaceCombined", "NPPATMS"] + ] + + ["metrics.num_samples"] + + ["metrics.loss_avg_mean"] +) + + +def make_plot(df): + def filter_met(c: str) -> bool: + return c in accepted_metrics + + plot_metrics = sorted([c for c in df.columns if filter_met(c)]) + hovertemplate = "".join( + [ + f"{col}: %{{customdata[{idx}]}}
" + if ("metrics" not in col and "params" not in col and "tags.mlflow" not in col) + else "" + for idx, col in enumerate(df.columns) + ] + ) + hovertemplate = "val: %{y}
" + hovertemplate + num_plots = len(plot_metrics) + fig = make_subplots(rows=num_plots, cols=1, subplot_titles=plot_metrics) + for i, metric in enumerate(plot_metrics): + s = go.Scatter( + x=df["end_time"], + y=df[metric], + mode="markers", + customdata=df, + hovertemplate=hovertemplate, + ) + fig.add_trace(s, row=i + 1, col=1) + + fig.update_yaxes(type="log") + fig.update_layout(height=800, width=1024, showlegend=False) + return fig + + +st.markdown("## Train") + +st.plotly_chart(make_plot(runs.filter(stage_is_train))) + +st.markdown("# Validation") + +st.plotly_chart(make_plot(runs.filter(stage_is_val))) + +st.markdown(""" +# Scaling + +Hypothesis: loss ~ O(num_samples ^ {-alpha}) + + +The deep blue dots are the most recent runs, the light blue are the eldest. +""") + +train_runs = runs.filter(stage_is_train) +min_end_date = train_runs["start_time"].cast(pl.Float64).min() +max_end_date = train_runs["start_time"].cast(pl.Float64).max() +train_runs = train_runs.with_columns( + ( + (pl.col("start_time").cast(pl.Float64) - pl.lit(min_end_date)) + / (pl.lit(max_end_date) - pl.lit(min_end_date)) + ).alias("idx") +) + +_logger.info("Number of training runs: %d", len(train_runs)) + + +st.plotly_chart( + px.scatter( + train_runs.to_pandas(), + x="metrics.num_samples", + y="metrics.loss_avg_mean", + color="idx", + hover_data=["start_time", "tags.hpc", "tags.uploader"], + log_y=True, + log_x=True, + ) +) diff --git a/packages/dashboard/dashboard.py b/packages/dashboard/dashboard.py new file mode 100644 index 000000000..ff1152de9 --- /dev/null +++ b/packages/dashboard/dashboard.py @@ -0,0 +1,67 @@ +import logging +import os + +import streamlit as st +import streamlit_authenticator as stauth + + +@st.cache_resource +def get_logger(): + logger = logging.getLogger() + if not logger.hasHandlers(): + logging.basicConfig( + format="%(asctime)s - %(levelname)s - %(name)s - %(message)s", + level=logging.INFO, + ) + print("_logger being returned", logger) # noqa: T201 + return logger + + +user = os.getenv("USER_NAME") +password = os.getenv("USER_PASSWORD") +auth_time_sec = int(os.getenv("AUTH_TIME_SEC", "1800")) + +authenticator = stauth.Authenticate( + { + "usernames": { + user: { + "email": "noreply@weathergenerator.eu", + "failed_login_attempts": 0, + "logged_in": False, + "first_name": "Test", + "last_name": "Test", + "password": password, + } + } + }, + "authenticator_cookie", + "authenticator_cookie_key", + auth_time_sec, +) + + +try: + authenticator.login() +except Exception as e: + st.error(e) + + +if st.session_state.get("authentication_status"): + pg = st.navigation( + { + "Engineering": [st.Page("eng_overview.py", title="overview")], + "Model:atmo": [ + st.Page("atmo_training.py", title="training"), + st.Page("atmo_eval.py", title="evaluation"), + ], + "Data": [st.Page("data_overview.py", title="overview")], + } + ) + pg.run() + st.sidebar.image("https://upload.wikimedia.org/wikipedia/commons/e/e1/ECMWF_logo.svg") + st.sidebar.markdown("[weathergenerator.eu](https://weathergenerator.eu)") + authenticator.logout() +elif st.session_state.get("authentication_status") is False: + st.error("Username/password is incorrect") +elif st.session_state.get("authentication_status") is None: + st.warning("Please enter your username and password") diff --git a/packages/dashboard/data_overview.py b/packages/dashboard/data_overview.py new file mode 100644 index 000000000..4129fc8f4 --- /dev/null +++ b/packages/dashboard/data_overview.py @@ -0,0 +1,91 @@ +import json +import logging +from pathlib import Path + +import plotly.express as px +import polars as pl +import streamlit as st + +_logger = logging.getLogger(__name__) +# List all the json files in ../stac/json: + +# Find the current absolute location of this file +current_file_path = Path(__file__) +_logger.info(f"Current file path: {current_file_path}") +# Get the directory: +current_dir = current_file_path.parent + +stac_dir = (current_dir / "../../stac/jsons").resolve() +_logger.info(f"STAC JSON directory: {stac_dir}") + +json_files = sorted([f for f in stac_dir.iterdir() if f.suffix == ".json"]) + + +stats = [] +for json_file in json_files: + with open(json_file) as f: + data = json.load(f) + d_id = data.get("id") + if "properties" not in data: + continue + name = data["properties"].get("name", "No title") + data_stats = {} + for fname, fprop in data.get("assets", {}).items(): + inodes = int(fprop.get("inodes", "0").replace(".", "").replace(",", "")) + size = str(fprop.get("size", "0")).lower().replace(",", ".") + # Only keep numbers or dots: + size_ = float("".join([c for c in size if c.isdigit() or c == "."])) + if "tb" in size: + size_ *= 1024**4 + elif "gb" in size: + size_ *= 1024**3 + elif "mb" in size: + size_ *= 1024**2 + + locations = list(fprop.get("locations", [])) + data_stats[fname] = {"inodes": inodes, "locations": locations, "size": size_} + for loc in locations: + stats.append( + { + "id": d_id, + "name": name, + "file": fname, + "location": loc, + "inodes": inodes, + "size": size_, + } + ) + + # + # st.write(f"Data from {json_file}:", name, data_stats) + +stats_df = pl.DataFrame(stats) + +st.markdown(""" +# INode counts + +The number of inodes on each HPC. + +This is provided from the STAC catalog JSON files. It may not refelect the current +state of the files on disk. +""") + +st.plotly_chart(px.treemap(stats_df, path=["location", "name"], values="inodes")) + +st.markdown(""" Duplication by HPC """) + +st.plotly_chart(px.treemap(stats_df, path=["name", "location", "file"], values="inodes")) + + +st.markdown(""" +# File sizes + +The size of files on each HPC. +""") + +st.plotly_chart(px.treemap(stats_df, path=["location", "name"], values="size")) + +st.markdown("## Detailed stats") + +st.write("JSON files:", json_files) +st.dataframe(stats_df) diff --git a/packages/dashboard/eng_overview.py b/packages/dashboard/eng_overview.py new file mode 100644 index 000000000..30b577861 --- /dev/null +++ b/packages/dashboard/eng_overview.py @@ -0,0 +1,205 @@ +import logging + +import plotly.express as px +import polars as pl +import polars.selectors as ps +import streamlit as st +from polars import col as C + +from weathergen.dashboard.metrics import all_runs, latest_runs, setup_mflow + +_logger = logging.getLogger("eng_overview") + + +logging.basicConfig(level=logging.INFO) +_logger.info("Setting up MLFlow") +setup_mflow() + + +st.markdown("# Engineering overview") + + +runs = latest_runs() +all_runs_pdf = all_runs() + +st.markdown("""The number of runs by month and by HPC.""") +# TODO: this is here just the number of root run ids. +# Does not count how many tries or how many validation experiments were run. +all_runs_stats = ( + all_runs_pdf.sort("start_time") + # Remove metrics and tags + .select(~ps.starts_with("metrics")) + .select(~ps.starts_with("params")) + # Just keep roots + .filter(C("tags.mlflow.parentRunId").is_null()) + # Put a month column + .with_columns(pl.date(C("start_time").dt.year(), C("start_time").dt.month(), 1).alias("month")) +) + + +runs_lifecycle_stats = ( + # Remove metrics and params + all_runs_pdf.select(~ps.starts_with("metrics")) + .select(~ps.starts_with("params")) + .filter(C("tags.run_id").is_not_null()) + # For each of the run_ids, keep status, time, all stages, hpc + .group_by("tags.run_id") + .agg( + C("status").unique(), C("start_time").min(), C("tags.stage").unique(), C("tags.hpc").first() + ) + .with_columns( + # Filter mlflow status: + # FAILED => failed + # FINISHED => finished + # else => running + pl.when(C("status").list.contains("FAILED")) + .then(pl.lit("failed")) + .otherwise( + pl.when(C("status").list.contains("FINISHED")) + .then(pl.lit("finished")) + .otherwise(pl.lit("running")) + ) + .alias("synth_status"), + # Has train/val/eval stages + C("tags.stage").list.contains("train").alias("has_train_stage"), + C("tags.stage").list.contains("val").alias("has_val_stage"), + C("tags.stage").list.contains("eval").alias("has_eval_stage"), + ) + # Put a month column + .with_columns(pl.date(C("start_time").dt.year(), C("start_time").dt.month(), 1).alias("month")) + # cast to str the week column: plotly will misinterpret it otherwise + .with_columns(C("start_time").dt.week().cast(pl.String).alias("week")) +) + + +st.plotly_chart( + px.bar( + (all_runs_stats.group_by("month", "tags.hpc").agg(pl.count("run_id"))).to_pandas(), + x="month", + y="run_id", + color="tags.hpc", + ) +) + + +st.markdown( + """ + +**The number of GPUs by run.** + +(only includes runs for which evaluation data has been uploaded) + +""" +) + +st.plotly_chart( + px.scatter( + all_runs_pdf.filter(pl.col("params.num_ranks").is_not_null()) + .select(["params.num_ranks", "start_time", "tags.hpc"]) + .to_pandas(), + y="params.num_ranks", + x="start_time", + color="tags.hpc", + # hover_data=["start_time", "tags.uploader"], + log_y=True, + ) +) + +st.markdown( + """ + +**Runs by final status** + +Developers using older versions will show running forever. + +""" +) + +_status_colors = {"finished": "green", "failed": "red", "running": "lightblue"} + +st.plotly_chart( + px.bar( + ( + runs_lifecycle_stats.group_by("week", "synth_status", "tags.hpc").agg( + pl.count("tags.run_id") + ) + ).to_pandas(), + x="week", + y="tags.run_id", + color="synth_status", + color_discrete_map=_status_colors, + ) +) + + +st.markdown( + """ + +**Fraction of completed runs uploading training data** + + +""" +) + +_present_colors = {True: "green", False: "lightgray"} + +st.plotly_chart( + px.bar( + ( + runs_lifecycle_stats.filter(pl.col("synth_status") != "running") + .group_by("week", "synth_status", "tags.hpc", "has_train_stage") + .agg(pl.count("tags.run_id")) + ).to_pandas(), + x="week", + y="tags.run_id", + color="has_train_stage", + color_discrete_map=_present_colors, + ) +) + + +st.markdown( + """ + +**Fraction uploading evaluation data** + +Developers using older versions will show running forever. + +""" +) + +st.plotly_chart( + px.bar( + ( + runs_lifecycle_stats.group_by("week", "synth_status", "tags.hpc", "has_eval_stage").agg( + pl.count("tags.run_id") + ) + ).to_pandas(), + x="week", + y="tags.run_id", + color="has_eval_stage", + color_discrete_map=_present_colors, + ) +) + +all_metrics = sorted(all_runs_pdf.select(ps.starts_with("metrics.")).columns) + +st.markdown( + f""" + +**List of MLFlow metrics by number of runs** + +There is a hard limit of 1000 metrics per run in MLFlow. + + +Total number of metrics tracked: {len(all_metrics)}. +""" +) + +st.dataframe( + all_runs_pdf.select(ps.starts_with("metrics.")) + .select([pl.count(c) for c in all_metrics]) + .transpose(include_header=True) + .sort(by="column_0", descending=True) + .to_pandas() +) diff --git a/packages/dashboard/pyproject.toml b/packages/dashboard/pyproject.toml new file mode 100644 index 000000000..b199377ee --- /dev/null +++ b/packages/dashboard/pyproject.toml @@ -0,0 +1,90 @@ +# Very simple project for the dashboard + +[project] +name = "weathergen-dashboard" +version = "0.1.0" +description = "Add your description here" +readme = "README.md" +requires-python = ">=3.12, <3.13" +dependencies = [ + "boto3<1.36", + "mlflow~=3.3.2", +# "pdbpp>=0.11.7", + "plotly~=6.1.2", + "polars~=1.30.0", + "requests~=2.32.4", + "streamlit~=1.46.0", + "streamlit-authenticator>=0.4.2", + "watchdog", + "weathergen-common", + "weathergen-metrics", +] + +[dependency-groups] +dev = [ + "pytest~=8.3.5", + "pytest-mock>=3.14.1", + "ruff==0.9.7", + "pyrefly==0.36.0", +] + +[tool.uv.sources] +weathergen-common = { path = "../common", editable = true } +weathergen-metrics = { path = "../metrics", editable = true } + + + +# The linting configuration +[tool.ruff] + +# Wide rows +line-length = 100 + +[tool.ruff.lint] +# All disabled until the code is formatted. +select = [ + # pycodestyle + "E", + # Pyflakes + "F", + # pyupgrade + "UP", + # flake8-bugbear + "B", + # flake8-simplify + "SIM", + # isort + "I", + # Banned imports + "TID", + # Naming conventions + "N", + # print + "T201" +] + +# These rules are sensible and should be enabled at a later stage. +ignore = [ + # "B006", + "B011", + "UP008", + "SIM117", + "SIM118", + "SIM102", + "SIM401", + # To ignore, not relevant for us + "SIM108", # in case additional norm layer supports are added in future + "N817", # we use heavy acronyms, e.g., allowing 'import LongModuleName as LMN' (LMN is accepted) + "E731", # overly restrictive and less readable code + "N812", # prevents us following the convention for importing torch.nn.functional as F +] + +[tool.ruff.lint.flake8-tidy-imports.banned-api] +"numpy.ndarray".msg = "Do not use 'ndarray' to describe a numpy array type, it is a function. Use numpy.typing.NDArray or numpy.typing.NDArray[np.float32] for example" + +[tool.ruff.format] +# Use Unix `\n` line endings for all files +line-ending = "lf" + + + diff --git a/packages/dashboard/uv.lock b/packages/dashboard/uv.lock new file mode 100644 index 000000000..2337be667 --- /dev/null +++ b/packages/dashboard/uv.lock @@ -0,0 +1,1877 @@ +version = 1 +revision = 3 +requires-python = "==3.12.*" + +[[package]] +name = "alembic" +version = "1.16.5" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "mako" }, + { name = "sqlalchemy" }, + { name = "typing-extensions" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/9a/ca/4dc52902cf3491892d464f5265a81e9dff094692c8a049a3ed6a05fe7ee8/alembic-1.16.5.tar.gz", hash = "sha256:a88bb7f6e513bd4301ecf4c7f2206fe93f9913f9b48dac3b78babde2d6fe765e", size = 1969868, upload-time = "2025-08-27T18:02:05.668Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/39/4a/4c61d4c84cfd9befb6fa08a702535b27b21fff08c946bc2f6139decbf7f7/alembic-1.16.5-py3-none-any.whl", hash = "sha256:e845dfe090c5ffa7b92593ae6687c5cb1a101e91fa53868497dbd79847f9dbe3", size = 247355, upload-time = "2025-08-27T18:02:07.37Z" }, +] + +[[package]] +name = "altair" +version = "5.5.0" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "jinja2" }, + { name = "jsonschema" }, + { name = "narwhals" }, + { name = "packaging" }, + { name = "typing-extensions" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/16/b1/f2969c7bdb8ad8bbdda031687defdce2c19afba2aa2c8e1d2a17f78376d8/altair-5.5.0.tar.gz", hash = "sha256:d960ebe6178c56de3855a68c47b516be38640b73fb3b5111c2a9ca90546dd73d", size = 705305, upload-time = "2024-11-23T23:39:58.542Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/aa/f3/0b6ced594e51cc95d8c1fc1640d3623770d01e4969d29c0bd09945fafefa/altair-5.5.0-py3-none-any.whl", hash = "sha256:91a310b926508d560fe0148d02a194f38b824122641ef528113d029fcd129f8c", size = 731200, upload-time = "2024-11-23T23:39:56.4Z" }, +] + +[[package]] +name = "annotated-types" +version = "0.7.0" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/ee/67/531ea369ba64dcff5ec9c3402f9f51bf748cec26dde048a2f973a4eea7f5/annotated_types-0.7.0.tar.gz", hash = "sha256:aff07c09a53a08bc8cfccb9c85b05f1aa9a2a6f23728d790723543408344ce89", size = 16081, upload-time = "2024-05-20T21:33:25.928Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/78/b6/6307fbef88d9b5ee7421e68d78a9f162e0da4900bc5f5793f6d3d0e34fb8/annotated_types-0.7.0-py3-none-any.whl", hash = "sha256:1f02e8b43a8fbbc3f3e0d4f0f4bfc8131bcb4eebe8849b8e5c773f3a1c582a53", size = 13643, upload-time = "2024-05-20T21:33:24.1Z" }, +] + +[[package]] +name = "antlr4-python3-runtime" +version = "4.9.3" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/3e/38/7859ff46355f76f8d19459005ca000b6e7012f2f1ca597746cbcd1fbfe5e/antlr4-python3-runtime-4.9.3.tar.gz", hash = "sha256:f224469b4168294902bb1efa80a8bf7855f24c99aef99cbefc1bcd3cce77881b", size = 117034, upload-time = "2021-11-06T17:52:23.524Z" } + +[[package]] +name = "anyio" +version = "4.10.0" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "idna" }, + { name = "sniffio" }, + { name = "typing-extensions" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/f1/b4/636b3b65173d3ce9a38ef5f0522789614e590dab6a8d505340a4efe4c567/anyio-4.10.0.tar.gz", hash = "sha256:3f3fae35c96039744587aa5b8371e7e8e603c0702999535961dd336026973ba6", size = 213252, upload-time = "2025-08-04T08:54:26.451Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/6f/12/e5e0282d673bb9746bacfb6e2dba8719989d3660cdb2ea79aee9a9651afb/anyio-4.10.0-py3-none-any.whl", hash = "sha256:60e474ac86736bbfd6f210f7a61218939c318f43f9972497381f1c5e930ed3d1", size = 107213, upload-time = "2025-08-04T08:54:24.882Z" }, +] + +[[package]] +name = "asciitree" +version = "0.3.3" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/2d/6a/885bc91484e1aa8f618f6f0228d76d0e67000b0fdd6090673b777e311913/asciitree-0.3.3.tar.gz", hash = "sha256:4aa4b9b649f85e3fcb343363d97564aa1fb62e249677f2e18a96765145cc0f6e", size = 3951, upload-time = "2016-09-05T19:10:42.681Z" } + +[[package]] +name = "astropy" +version = "7.1.1" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "astropy-iers-data" }, + { name = "numpy" }, + { name = "packaging" }, + { name = "pyerfa" }, + { name = "pyyaml" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/e9/e1/6b8846dabc54b6fdc0262adefa041259f52ece9c929b10e0e90937691345/astropy-7.1.1.tar.gz", hash = "sha256:6d128f0005e2c34f70113484468bf9d0e4ca1ee15a279cfd08bdd979d38db0f8", size = 6982773, upload-time = "2025-10-10T20:36:49.347Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/15/d5/11031eb9788d35826ef527260cf17d5d6ebe8995ba8d67484c236644ce1e/astropy-7.1.1-cp312-cp312-macosx_10_13_x86_64.whl", hash = "sha256:268c6bdfb1c4feef8461070bbfdd6b8c07a9badf91977623168d7d3f0ddaf70c", size = 6403108, upload-time = "2025-10-10T20:36:12.958Z" }, + { url = "https://files.pythonhosted.org/packages/01/ca/57d8eeb6f8f67fb3063d6be1e043920f4f25f8e261042fa47a4ff5764c74/astropy-7.1.1-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:48d10b23e5a53afccd3e092d0c78792f8c644197ece4a7d95d83d7e491768d4c", size = 6349935, upload-time = "2025-10-10T20:36:14.67Z" }, + { url = "https://files.pythonhosted.org/packages/9b/d0/d9d33e9cdc10010e3e2f1e30cb9748a77c3a5ca69d4f4fed82d03bcafd79/astropy-7.1.1-cp312-cp312-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:f04b4bdef1990e0b4a5cbdd7871ff172e291b9d4ac27a411e240727e4a357616", size = 10233406, upload-time = "2025-10-10T20:36:16.296Z" }, + { url = "https://files.pythonhosted.org/packages/93/9b/14fb6cf65bd18015c6fdab7c71e4ae00318dd325d0ed03441ff2bb2b913a/astropy-7.1.1-cp312-cp312-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:31233556969351c68e104700dbb81e86b02447beeea70028d2d04cd9c2fedd6f", size = 10290578, upload-time = "2025-10-10T20:36:18.595Z" }, + { url = "https://files.pythonhosted.org/packages/c5/1b/994b207601d062f31e67cabbc5827e42b8472ce926ed865f06dba8648429/astropy-7.1.1-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:70c69c9a8585d51658837a5643ed9cb4d386f343e2097bce38aba68fbdd48a7f", size = 10228861, upload-time = "2025-10-10T20:36:20.56Z" }, + { url = "https://files.pythonhosted.org/packages/2d/bd/84845404ec729f6e54a94d3f150e5d6c8808dae232f5e12262a72a428c95/astropy-7.1.1-cp312-cp312-win32.whl", hash = "sha256:452be62a2b9f68207f949089937574057415937e5273c37bdaafab0835c21259", size = 6157841, upload-time = "2025-10-10T20:36:22.313Z" }, + { url = "https://files.pythonhosted.org/packages/c1/83/80eca357b28d827f58c688b6c4e3ead88b577af55211676f3e1c13c5bfbd/astropy-7.1.1-cp312-cp312-win_amd64.whl", hash = "sha256:85595f9dce107901ccd3bf493899c4b08a0925abb75d32c72325c4aba5369bd2", size = 6287394, upload-time = "2025-10-10T20:36:24.103Z" }, +] + +[[package]] +name = "astropy-healpix" +version = "1.1.2" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "astropy" }, + { name = "numpy" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/5c/8a/dfd71e4db13706383ca2cea0b2831eb10da2c286d4494f183f80fc03cbc2/astropy_healpix-1.1.2.tar.gz", hash = "sha256:03671df12a36ec3b357c244d5154b6786362ff5d80770675c7b24815101066e4", size = 109453, upload-time = "2025-02-19T19:38:06.814Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/35/be/0caad18d7077c2547c1225bfff3edb691e03495b8952541d000cf40234b7/astropy_healpix-1.1.2-cp310-abi3-macosx_10_9_x86_64.whl", hash = "sha256:fb504c998e1661215c74da9537558cd2048d29b44acb2d63e613aae133b91668", size = 85424, upload-time = "2025-02-19T19:37:53.808Z" }, + { url = "https://files.pythonhosted.org/packages/8d/38/eb2897df8b0cba6d8dd0d08571fa7f2277002a46feb4c97fa121f3878c30/astropy_healpix-1.1.2-cp310-abi3-macosx_11_0_arm64.whl", hash = "sha256:00a0c9378d7e844aecb23d62c206a999e045a48781a320ac5f012f8c95ac4022", size = 81721, upload-time = "2025-02-19T19:37:55.72Z" }, + { url = "https://files.pythonhosted.org/packages/d9/40/0382c85a995008ba8e2f99e28d143cfe9f0a835e3a2088c36a4947c93420/astropy_healpix-1.1.2-cp310-abi3-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:ee37b14700d28cf53e2376c65f8cb6224a59f80067feb3f3cd6dd6f9a4577337", size = 191974, upload-time = "2025-02-19T19:37:56.791Z" }, + { url = "https://files.pythonhosted.org/packages/31/01/b5d91f29f36ab9fb220ef3e820dd3046f9f671fc1ec99644369dc606840b/astropy_healpix-1.1.2-cp310-abi3-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:cf7a616af0b7df9c1d96f6af1e12382f29bd43e3fb88ce98f46992bfa23a149e", size = 195201, upload-time = "2025-02-19T19:38:00.034Z" }, + { url = "https://files.pythonhosted.org/packages/39/51/54770150df54c09b06c00f24481317abcb4b6478a99772a5df399894306d/astropy_healpix-1.1.2-cp310-abi3-musllinux_1_2_x86_64.whl", hash = "sha256:3526008fc5ccd4c13f3166a878bfb856b909a00912b27d26666992615c668e88", size = 191335, upload-time = "2025-02-19T19:38:02.038Z" }, + { url = "https://files.pythonhosted.org/packages/da/5b/6bafcee285e9ef653449742e6d0ba6974e97cbffccac99727d8606e610b6/astropy_healpix-1.1.2-cp310-abi3-win32.whl", hash = "sha256:94f4a2fcee2e66ab68f8face8d20be4553cbf6ce81bd214052ddf307e2118513", size = 52693, upload-time = "2025-02-19T19:38:04.662Z" }, + { url = "https://files.pythonhosted.org/packages/01/6d/07a6dcd87aec162b9cb63167ccb3919a4f6ee739ce32035cd44887ae8708/astropy_healpix-1.1.2-cp310-abi3-win_amd64.whl", hash = "sha256:f6b3e50c49e73a66bb1847dc3451e1d22bf828c10881275bf359928e95d25fe3", size = 54830, upload-time = "2025-02-19T19:38:05.709Z" }, +] + +[[package]] +name = "astropy-iers-data" +version = "0.2025.11.10.0.38.31" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/a6/56/bbcef87b43e7ce4a23555d17aec0ff6d19a0a1880ff7e21099ee022ef004/astropy_iers_data-0.2025.11.10.0.38.31.tar.gz", hash = "sha256:487dc974b9a5114ac75ff4fd96244e8fa7d07e1504092f7c2b8138c2b9c842e9", size = 1913161, upload-time = "2025-11-10T00:39:14.899Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/d3/9d/51b958a9f8474f04462923550641ccbef9a50566a63c87d691bec443a7d0/astropy_iers_data-0.2025.11.10.0.38.31-py3-none-any.whl", hash = "sha256:620155a3f04d7c96e7f4aaa498dc4571e15449f23963574106d29d8959d4c66f", size = 1969542, upload-time = "2025-11-10T00:39:12.824Z" }, +] + +[[package]] +name = "attrs" +version = "25.3.0" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/5a/b0/1367933a8532ee6ff8d63537de4f1177af4bff9f3e829baf7331f595bb24/attrs-25.3.0.tar.gz", hash = "sha256:75d7cefc7fb576747b2c81b4442d4d4a1ce0900973527c011d1030fd3bf4af1b", size = 812032, upload-time = "2025-03-13T11:10:22.779Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/77/06/bb80f5f86020c4551da315d78b3ab75e8228f89f0162f2c3a819e407941a/attrs-25.3.0-py3-none-any.whl", hash = "sha256:427318ce031701fea540783410126f03899a97ffc6f61596ad581ac2e40e3bc3", size = 63815, upload-time = "2025-03-13T11:10:21.14Z" }, +] + +[[package]] +name = "bcrypt" +version = "4.3.0" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/bb/5d/6d7433e0f3cd46ce0b43cd65e1db465ea024dbb8216fb2404e919c2ad77b/bcrypt-4.3.0.tar.gz", hash = "sha256:3a3fd2204178b6d2adcf09cb4f6426ffef54762577a7c9b54c159008cb288c18", size = 25697, upload-time = "2025-02-28T01:24:09.174Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/11/22/5ada0b9af72b60cbc4c9a399fdde4af0feaa609d27eb0adc61607997a3fa/bcrypt-4.3.0-cp38-abi3-macosx_10_12_universal2.whl", hash = "sha256:f81b0ed2639568bf14749112298f9e4e2b28853dab50a8b357e31798686a036d", size = 498019, upload-time = "2025-02-28T01:23:05.838Z" }, + { url = "https://files.pythonhosted.org/packages/b8/8c/252a1edc598dc1ce57905be173328eda073083826955ee3c97c7ff5ba584/bcrypt-4.3.0-cp38-abi3-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:864f8f19adbe13b7de11ba15d85d4a428c7e2f344bac110f667676a0ff84924b", size = 279174, upload-time = "2025-02-28T01:23:07.274Z" }, + { url = "https://files.pythonhosted.org/packages/29/5b/4547d5c49b85f0337c13929f2ccbe08b7283069eea3550a457914fc078aa/bcrypt-4.3.0-cp38-abi3-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:3e36506d001e93bffe59754397572f21bb5dc7c83f54454c990c74a468cd589e", size = 283870, upload-time = "2025-02-28T01:23:09.151Z" }, + { url = "https://files.pythonhosted.org/packages/be/21/7dbaf3fa1745cb63f776bb046e481fbababd7d344c5324eab47f5ca92dd2/bcrypt-4.3.0-cp38-abi3-manylinux_2_28_aarch64.whl", hash = "sha256:842d08d75d9fe9fb94b18b071090220697f9f184d4547179b60734846461ed59", size = 279601, upload-time = "2025-02-28T01:23:11.461Z" }, + { url = "https://files.pythonhosted.org/packages/6d/64/e042fc8262e971347d9230d9abbe70d68b0a549acd8611c83cebd3eaec67/bcrypt-4.3.0-cp38-abi3-manylinux_2_28_armv7l.manylinux_2_31_armv7l.whl", hash = "sha256:7c03296b85cb87db865d91da79bf63d5609284fc0cab9472fdd8367bbd830753", size = 297660, upload-time = "2025-02-28T01:23:12.989Z" }, + { url = "https://files.pythonhosted.org/packages/50/b8/6294eb84a3fef3b67c69b4470fcdd5326676806bf2519cda79331ab3c3a9/bcrypt-4.3.0-cp38-abi3-manylinux_2_28_x86_64.whl", hash = "sha256:62f26585e8b219cdc909b6a0069efc5e4267e25d4a3770a364ac58024f62a761", size = 284083, upload-time = "2025-02-28T01:23:14.5Z" }, + { url = "https://files.pythonhosted.org/packages/62/e6/baff635a4f2c42e8788fe1b1633911c38551ecca9a749d1052d296329da6/bcrypt-4.3.0-cp38-abi3-manylinux_2_34_aarch64.whl", hash = "sha256:beeefe437218a65322fbd0069eb437e7c98137e08f22c4660ac2dc795c31f8bb", size = 279237, upload-time = "2025-02-28T01:23:16.686Z" }, + { url = "https://files.pythonhosted.org/packages/39/48/46f623f1b0c7dc2e5de0b8af5e6f5ac4cc26408ac33f3d424e5ad8da4a90/bcrypt-4.3.0-cp38-abi3-manylinux_2_34_x86_64.whl", hash = "sha256:97eea7408db3a5bcce4a55d13245ab3fa566e23b4c67cd227062bb49e26c585d", size = 283737, upload-time = "2025-02-28T01:23:18.897Z" }, + { url = "https://files.pythonhosted.org/packages/49/8b/70671c3ce9c0fca4a6cc3cc6ccbaa7e948875a2e62cbd146e04a4011899c/bcrypt-4.3.0-cp38-abi3-musllinux_1_1_aarch64.whl", hash = "sha256:191354ebfe305e84f344c5964c7cd5f924a3bfc5d405c75ad07f232b6dffb49f", size = 312741, upload-time = "2025-02-28T01:23:21.041Z" }, + { url = "https://files.pythonhosted.org/packages/27/fb/910d3a1caa2d249b6040a5caf9f9866c52114d51523ac2fb47578a27faee/bcrypt-4.3.0-cp38-abi3-musllinux_1_1_x86_64.whl", hash = "sha256:41261d64150858eeb5ff43c753c4b216991e0ae16614a308a15d909503617732", size = 316472, upload-time = "2025-02-28T01:23:23.183Z" }, + { url = "https://files.pythonhosted.org/packages/dc/cf/7cf3a05b66ce466cfb575dbbda39718d45a609daa78500f57fa9f36fa3c0/bcrypt-4.3.0-cp38-abi3-musllinux_1_2_aarch64.whl", hash = "sha256:33752b1ba962ee793fa2b6321404bf20011fe45b9afd2a842139de3011898fef", size = 343606, upload-time = "2025-02-28T01:23:25.361Z" }, + { url = "https://files.pythonhosted.org/packages/e3/b8/e970ecc6d7e355c0d892b7f733480f4aa8509f99b33e71550242cf0b7e63/bcrypt-4.3.0-cp38-abi3-musllinux_1_2_x86_64.whl", hash = "sha256:50e6e80a4bfd23a25f5c05b90167c19030cf9f87930f7cb2eacb99f45d1c3304", size = 362867, upload-time = "2025-02-28T01:23:26.875Z" }, + { url = "https://files.pythonhosted.org/packages/a9/97/8d3118efd8354c555a3422d544163f40d9f236be5b96c714086463f11699/bcrypt-4.3.0-cp38-abi3-win32.whl", hash = "sha256:67a561c4d9fb9465ec866177e7aebcad08fe23aaf6fbd692a6fab69088abfc51", size = 160589, upload-time = "2025-02-28T01:23:28.381Z" }, + { url = "https://files.pythonhosted.org/packages/29/07/416f0b99f7f3997c69815365babbc2e8754181a4b1899d921b3c7d5b6f12/bcrypt-4.3.0-cp38-abi3-win_amd64.whl", hash = "sha256:584027857bc2843772114717a7490a37f68da563b3620f78a849bcb54dc11e62", size = 152794, upload-time = "2025-02-28T01:23:30.187Z" }, + { url = "https://files.pythonhosted.org/packages/6e/c1/3fa0e9e4e0bfd3fd77eb8b52ec198fd6e1fd7e9402052e43f23483f956dd/bcrypt-4.3.0-cp39-abi3-macosx_10_12_universal2.whl", hash = "sha256:0d3efb1157edebfd9128e4e46e2ac1a64e0c1fe46fb023158a407c7892b0f8c3", size = 498969, upload-time = "2025-02-28T01:23:31.945Z" }, + { url = "https://files.pythonhosted.org/packages/ce/d4/755ce19b6743394787fbd7dff6bf271b27ee9b5912a97242e3caf125885b/bcrypt-4.3.0-cp39-abi3-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:08bacc884fd302b611226c01014eca277d48f0a05187666bca23aac0dad6fe24", size = 279158, upload-time = "2025-02-28T01:23:34.161Z" }, + { url = "https://files.pythonhosted.org/packages/9b/5d/805ef1a749c965c46b28285dfb5cd272a7ed9fa971f970435a5133250182/bcrypt-4.3.0-cp39-abi3-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:f6746e6fec103fcd509b96bacdfdaa2fbde9a553245dbada284435173a6f1aef", size = 284285, upload-time = "2025-02-28T01:23:35.765Z" }, + { url = "https://files.pythonhosted.org/packages/ab/2b/698580547a4a4988e415721b71eb45e80c879f0fb04a62da131f45987b96/bcrypt-4.3.0-cp39-abi3-manylinux_2_28_aarch64.whl", hash = "sha256:afe327968aaf13fc143a56a3360cb27d4ad0345e34da12c7290f1b00b8fe9a8b", size = 279583, upload-time = "2025-02-28T01:23:38.021Z" }, + { url = "https://files.pythonhosted.org/packages/f2/87/62e1e426418204db520f955ffd06f1efd389feca893dad7095bf35612eec/bcrypt-4.3.0-cp39-abi3-manylinux_2_28_armv7l.manylinux_2_31_armv7l.whl", hash = "sha256:d9af79d322e735b1fc33404b5765108ae0ff232d4b54666d46730f8ac1a43676", size = 297896, upload-time = "2025-02-28T01:23:39.575Z" }, + { url = "https://files.pythonhosted.org/packages/cb/c6/8fedca4c2ada1b6e889c52d2943b2f968d3427e5d65f595620ec4c06fa2f/bcrypt-4.3.0-cp39-abi3-manylinux_2_28_x86_64.whl", hash = "sha256:f1e3ffa1365e8702dc48c8b360fef8d7afeca482809c5e45e653af82ccd088c1", size = 284492, upload-time = "2025-02-28T01:23:40.901Z" }, + { url = "https://files.pythonhosted.org/packages/4d/4d/c43332dcaaddb7710a8ff5269fcccba97ed3c85987ddaa808db084267b9a/bcrypt-4.3.0-cp39-abi3-manylinux_2_34_aarch64.whl", hash = "sha256:3004df1b323d10021fda07a813fd33e0fd57bef0e9a480bb143877f6cba996fe", size = 279213, upload-time = "2025-02-28T01:23:42.653Z" }, + { url = "https://files.pythonhosted.org/packages/dc/7f/1e36379e169a7df3a14a1c160a49b7b918600a6008de43ff20d479e6f4b5/bcrypt-4.3.0-cp39-abi3-manylinux_2_34_x86_64.whl", hash = "sha256:531457e5c839d8caea9b589a1bcfe3756b0547d7814e9ce3d437f17da75c32b0", size = 284162, upload-time = "2025-02-28T01:23:43.964Z" }, + { url = "https://files.pythonhosted.org/packages/1c/0a/644b2731194b0d7646f3210dc4d80c7fee3ecb3a1f791a6e0ae6bb8684e3/bcrypt-4.3.0-cp39-abi3-musllinux_1_1_aarch64.whl", hash = "sha256:17a854d9a7a476a89dcef6c8bd119ad23e0f82557afbd2c442777a16408e614f", size = 312856, upload-time = "2025-02-28T01:23:46.011Z" }, + { url = "https://files.pythonhosted.org/packages/dc/62/2a871837c0bb6ab0c9a88bf54de0fc021a6a08832d4ea313ed92a669d437/bcrypt-4.3.0-cp39-abi3-musllinux_1_1_x86_64.whl", hash = "sha256:6fb1fd3ab08c0cbc6826a2e0447610c6f09e983a281b919ed721ad32236b8b23", size = 316726, upload-time = "2025-02-28T01:23:47.575Z" }, + { url = "https://files.pythonhosted.org/packages/0c/a1/9898ea3faac0b156d457fd73a3cb9c2855c6fd063e44b8522925cdd8ce46/bcrypt-4.3.0-cp39-abi3-musllinux_1_2_aarch64.whl", hash = "sha256:e965a9c1e9a393b8005031ff52583cedc15b7884fce7deb8b0346388837d6cfe", size = 343664, upload-time = "2025-02-28T01:23:49.059Z" }, + { url = "https://files.pythonhosted.org/packages/40/f2/71b4ed65ce38982ecdda0ff20c3ad1b15e71949c78b2c053df53629ce940/bcrypt-4.3.0-cp39-abi3-musllinux_1_2_x86_64.whl", hash = "sha256:79e70b8342a33b52b55d93b3a59223a844962bef479f6a0ea318ebbcadf71505", size = 363128, upload-time = "2025-02-28T01:23:50.399Z" }, + { url = "https://files.pythonhosted.org/packages/11/99/12f6a58eca6dea4be992d6c681b7ec9410a1d9f5cf368c61437e31daa879/bcrypt-4.3.0-cp39-abi3-win32.whl", hash = "sha256:b4d4e57f0a63fd0b358eb765063ff661328f69a04494427265950c71b992a39a", size = 160598, upload-time = "2025-02-28T01:23:51.775Z" }, + { url = "https://files.pythonhosted.org/packages/a9/cf/45fb5261ece3e6b9817d3d82b2f343a505fd58674a92577923bc500bd1aa/bcrypt-4.3.0-cp39-abi3-win_amd64.whl", hash = "sha256:e53e074b120f2877a35cc6c736b8eb161377caae8925c17688bd46ba56daaa5b", size = 152799, upload-time = "2025-02-28T01:23:53.139Z" }, +] + +[[package]] +name = "blinker" +version = "1.9.0" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/21/28/9b3f50ce0e048515135495f198351908d99540d69bfdc8c1d15b73dc55ce/blinker-1.9.0.tar.gz", hash = "sha256:b4ce2265a7abece45e7cc896e98dbebe6cead56bcf805a3d23136d145f5445bf", size = 22460, upload-time = "2024-11-08T17:25:47.436Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/10/cb/f2ad4230dc2eb1a74edf38f1a38b9b52277f75bef262d8908e60d957e13c/blinker-1.9.0-py3-none-any.whl", hash = "sha256:ba0efaa9080b619ff2f3459d1d500c57bddea4a6b424b60a91141db6fd2f08bc", size = 8458, upload-time = "2024-11-08T17:25:46.184Z" }, +] + +[[package]] +name = "boto3" +version = "1.35.99" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "botocore" }, + { name = "jmespath" }, + { name = "s3transfer" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/f7/99/3e8b48f15580672eda20f33439fc1622bd611f6238b6d05407320e1fb98c/boto3-1.35.99.tar.gz", hash = "sha256:e0abd794a7a591d90558e92e29a9f8837d25ece8e3c120e530526fe27eba5fca", size = 111028, upload-time = "2025-01-14T20:20:28.636Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/65/77/8bbca82f70b062181cf0ae53fd43f1ac6556f3078884bfef9da2269c06a3/boto3-1.35.99-py3-none-any.whl", hash = "sha256:83e560faaec38a956dfb3d62e05e1703ee50432b45b788c09e25107c5058bd71", size = 139178, upload-time = "2025-01-14T20:20:25.48Z" }, +] + +[[package]] +name = "botocore" +version = "1.35.99" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "jmespath" }, + { name = "python-dateutil" }, + { name = "urllib3" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/7c/9c/1df6deceee17c88f7170bad8325aa91452529d683486273928eecfd946d8/botocore-1.35.99.tar.gz", hash = "sha256:1eab44e969c39c5f3d9a3104a0836c24715579a455f12b3979a31d7cde51b3c3", size = 13490969, upload-time = "2025-01-14T20:20:11.419Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/fc/dd/d87e2a145fad9e08d0ec6edcf9d71f838ccc7acdd919acc4c0d4a93515f8/botocore-1.35.99-py3-none-any.whl", hash = "sha256:b22d27b6b617fc2d7342090d6129000af2efd20174215948c0d7ae2da0fab445", size = 13293216, upload-time = "2025-01-14T20:20:06.427Z" }, +] + +[[package]] +name = "cachetools" +version = "5.5.2" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/6c/81/3747dad6b14fa2cf53fcf10548cf5aea6913e96fab41a3c198676f8948a5/cachetools-5.5.2.tar.gz", hash = "sha256:1a661caa9175d26759571b2e19580f9d6393969e5dfca11fdb1f947a23e640d4", size = 28380, upload-time = "2025-02-20T21:01:19.524Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/72/76/20fa66124dbe6be5cafeb312ece67de6b61dd91a0247d1ea13db4ebb33c2/cachetools-5.5.2-py3-none-any.whl", hash = "sha256:d26a22bcc62eb95c3beabd9f1ee5e820d3d2704fe2967cbe350e20c8ffcd3f0a", size = 10080, upload-time = "2025-02-20T21:01:16.647Z" }, +] + +[[package]] +name = "captcha" +version = "0.7.1" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "pillow" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/b4/65/8e186bb798f33ba390eab897c995b0fcee92bc030e0f40cb8ea01f34dd07/captcha-0.7.1.tar.gz", hash = "sha256:a1b462bcc633a64d8db5efa7754548a877c698d98f87716c620a707364cabd6b", size = 226561, upload-time = "2025-03-01T05:00:13.395Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/da/ff/3f0982ecd37c2d6a7266c22e7ea2e47d0773fe449984184c5316459d2776/captcha-0.7.1-py3-none-any.whl", hash = "sha256:8b73b5aba841ad1e5bdb856205bf5f09560b728ee890eb9dae42901219c8c599", size = 147606, upload-time = "2025-03-01T05:00:10.433Z" }, +] + +[[package]] +name = "certifi" +version = "2025.6.15" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/73/f7/f14b46d4bcd21092d7d3ccef689615220d8a08fb25e564b65d20738e672e/certifi-2025.6.15.tar.gz", hash = "sha256:d747aa5a8b9bbbb1bb8c22bb13e22bd1f18e9796defa16bab421f7f7a317323b", size = 158753, upload-time = "2025-06-15T02:45:51.329Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/84/ae/320161bd181fc06471eed047ecce67b693fd7515b16d495d8932db763426/certifi-2025.6.15-py3-none-any.whl", hash = "sha256:2e0c7ce7cb5d8f8634ca55d2ba7e6ec2689a2fd6537d8dec1296a477a4910057", size = 157650, upload-time = "2025-06-15T02:45:49.977Z" }, +] + +[[package]] +name = "cffi" +version = "1.17.1" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "pycparser" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/fc/97/c783634659c2920c3fc70419e3af40972dbaf758daa229a7d6ea6135c90d/cffi-1.17.1.tar.gz", hash = "sha256:1c39c6016c32bc48dd54561950ebd6836e1670f2ae46128f67cf49e789c52824", size = 516621, upload-time = "2024-09-04T20:45:21.852Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/5a/84/e94227139ee5fb4d600a7a4927f322e1d4aea6fdc50bd3fca8493caba23f/cffi-1.17.1-cp312-cp312-macosx_10_9_x86_64.whl", hash = "sha256:805b4371bf7197c329fcb3ead37e710d1bca9da5d583f5073b799d5c5bd1eee4", size = 183178, upload-time = "2024-09-04T20:44:12.232Z" }, + { url = "https://files.pythonhosted.org/packages/da/ee/fb72c2b48656111c4ef27f0f91da355e130a923473bf5ee75c5643d00cca/cffi-1.17.1-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:733e99bc2df47476e3848417c5a4540522f234dfd4ef3ab7fafdf555b082ec0c", size = 178840, upload-time = "2024-09-04T20:44:13.739Z" }, + { url = "https://files.pythonhosted.org/packages/cc/b6/db007700f67d151abadf508cbfd6a1884f57eab90b1bb985c4c8c02b0f28/cffi-1.17.1-cp312-cp312-manylinux_2_12_i686.manylinux2010_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:1257bdabf294dceb59f5e70c64a3e2f462c30c7ad68092d01bbbfb1c16b1ba36", size = 454803, upload-time = "2024-09-04T20:44:15.231Z" }, + { url = "https://files.pythonhosted.org/packages/1a/df/f8d151540d8c200eb1c6fba8cd0dfd40904f1b0682ea705c36e6c2e97ab3/cffi-1.17.1-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:da95af8214998d77a98cc14e3a3bd00aa191526343078b530ceb0bd710fb48a5", size = 478850, upload-time = "2024-09-04T20:44:17.188Z" }, + { url = "https://files.pythonhosted.org/packages/28/c0/b31116332a547fd2677ae5b78a2ef662dfc8023d67f41b2a83f7c2aa78b1/cffi-1.17.1-cp312-cp312-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:d63afe322132c194cf832bfec0dc69a99fb9bb6bbd550f161a49e9e855cc78ff", size = 485729, upload-time = "2024-09-04T20:44:18.688Z" }, + { url = "https://files.pythonhosted.org/packages/91/2b/9a1ddfa5c7f13cab007a2c9cc295b70fbbda7cb10a286aa6810338e60ea1/cffi-1.17.1-cp312-cp312-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:f79fc4fc25f1c8698ff97788206bb3c2598949bfe0fef03d299eb1b5356ada99", size = 471256, upload-time = "2024-09-04T20:44:20.248Z" }, + { url = "https://files.pythonhosted.org/packages/b2/d5/da47df7004cb17e4955df6a43d14b3b4ae77737dff8bf7f8f333196717bf/cffi-1.17.1-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:b62ce867176a75d03a665bad002af8e6d54644fad99a3c70905c543130e39d93", size = 479424, upload-time = "2024-09-04T20:44:21.673Z" }, + { url = "https://files.pythonhosted.org/packages/0b/ac/2a28bcf513e93a219c8a4e8e125534f4f6db03e3179ba1c45e949b76212c/cffi-1.17.1-cp312-cp312-musllinux_1_1_aarch64.whl", hash = "sha256:386c8bf53c502fff58903061338ce4f4950cbdcb23e2902d86c0f722b786bbe3", size = 484568, upload-time = "2024-09-04T20:44:23.245Z" }, + { url = "https://files.pythonhosted.org/packages/d4/38/ca8a4f639065f14ae0f1d9751e70447a261f1a30fa7547a828ae08142465/cffi-1.17.1-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:4ceb10419a9adf4460ea14cfd6bc43d08701f0835e979bf821052f1805850fe8", size = 488736, upload-time = "2024-09-04T20:44:24.757Z" }, + { url = "https://files.pythonhosted.org/packages/86/c5/28b2d6f799ec0bdecf44dced2ec5ed43e0eb63097b0f58c293583b406582/cffi-1.17.1-cp312-cp312-win32.whl", hash = "sha256:a08d7e755f8ed21095a310a693525137cfe756ce62d066e53f502a83dc550f65", size = 172448, upload-time = "2024-09-04T20:44:26.208Z" }, + { url = "https://files.pythonhosted.org/packages/50/b9/db34c4755a7bd1cb2d1603ac3863f22bcecbd1ba29e5ee841a4bc510b294/cffi-1.17.1-cp312-cp312-win_amd64.whl", hash = "sha256:51392eae71afec0d0c8fb1a53b204dbb3bcabcb3c9b807eedf3e1e6ccf2de903", size = 181976, upload-time = "2024-09-04T20:44:27.578Z" }, +] + +[[package]] +name = "charset-normalizer" +version = "3.4.2" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/e4/33/89c2ced2b67d1c2a61c19c6751aa8902d46ce3dacb23600a283619f5a12d/charset_normalizer-3.4.2.tar.gz", hash = "sha256:5baececa9ecba31eff645232d59845c07aa030f0c81ee70184a90d35099a0e63", size = 126367, upload-time = "2025-05-02T08:34:42.01Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/d7/a4/37f4d6035c89cac7930395a35cc0f1b872e652eaafb76a6075943754f095/charset_normalizer-3.4.2-cp312-cp312-macosx_10_13_universal2.whl", hash = "sha256:0c29de6a1a95f24b9a1aa7aefd27d2487263f00dfd55a77719b530788f75cff7", size = 199936, upload-time = "2025-05-02T08:32:33.712Z" }, + { url = "https://files.pythonhosted.org/packages/ee/8a/1a5e33b73e0d9287274f899d967907cd0bf9c343e651755d9307e0dbf2b3/charset_normalizer-3.4.2-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:cddf7bd982eaa998934a91f69d182aec997c6c468898efe6679af88283b498d3", size = 143790, upload-time = "2025-05-02T08:32:35.768Z" }, + { url = "https://files.pythonhosted.org/packages/66/52/59521f1d8e6ab1482164fa21409c5ef44da3e9f653c13ba71becdd98dec3/charset_normalizer-3.4.2-cp312-cp312-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:fcbe676a55d7445b22c10967bceaaf0ee69407fbe0ece4d032b6eb8d4565982a", size = 153924, upload-time = "2025-05-02T08:32:37.284Z" }, + { url = "https://files.pythonhosted.org/packages/86/2d/fb55fdf41964ec782febbf33cb64be480a6b8f16ded2dbe8db27a405c09f/charset_normalizer-3.4.2-cp312-cp312-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:d41c4d287cfc69060fa91cae9683eacffad989f1a10811995fa309df656ec214", size = 146626, upload-time = "2025-05-02T08:32:38.803Z" }, + { url = "https://files.pythonhosted.org/packages/8c/73/6ede2ec59bce19b3edf4209d70004253ec5f4e319f9a2e3f2f15601ed5f7/charset_normalizer-3.4.2-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:4e594135de17ab3866138f496755f302b72157d115086d100c3f19370839dd3a", size = 148567, upload-time = "2025-05-02T08:32:40.251Z" }, + { url = "https://files.pythonhosted.org/packages/09/14/957d03c6dc343c04904530b6bef4e5efae5ec7d7990a7cbb868e4595ee30/charset_normalizer-3.4.2-cp312-cp312-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:cf713fe9a71ef6fd5adf7a79670135081cd4431c2943864757f0fa3a65b1fafd", size = 150957, upload-time = "2025-05-02T08:32:41.705Z" }, + { url = "https://files.pythonhosted.org/packages/0d/c8/8174d0e5c10ccebdcb1b53cc959591c4c722a3ad92461a273e86b9f5a302/charset_normalizer-3.4.2-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:a370b3e078e418187da8c3674eddb9d983ec09445c99a3a263c2011993522981", size = 145408, upload-time = "2025-05-02T08:32:43.709Z" }, + { url = "https://files.pythonhosted.org/packages/58/aa/8904b84bc8084ac19dc52feb4f5952c6df03ffb460a887b42615ee1382e8/charset_normalizer-3.4.2-cp312-cp312-musllinux_1_2_i686.whl", hash = "sha256:a955b438e62efdf7e0b7b52a64dc5c3396e2634baa62471768a64bc2adb73d5c", size = 153399, upload-time = "2025-05-02T08:32:46.197Z" }, + { url = "https://files.pythonhosted.org/packages/c2/26/89ee1f0e264d201cb65cf054aca6038c03b1a0c6b4ae998070392a3ce605/charset_normalizer-3.4.2-cp312-cp312-musllinux_1_2_ppc64le.whl", hash = "sha256:7222ffd5e4de8e57e03ce2cef95a4c43c98fcb72ad86909abdfc2c17d227fc1b", size = 156815, upload-time = "2025-05-02T08:32:48.105Z" }, + { url = "https://files.pythonhosted.org/packages/fd/07/68e95b4b345bad3dbbd3a8681737b4338ff2c9df29856a6d6d23ac4c73cb/charset_normalizer-3.4.2-cp312-cp312-musllinux_1_2_s390x.whl", hash = "sha256:bee093bf902e1d8fc0ac143c88902c3dfc8941f7ea1d6a8dd2bcb786d33db03d", size = 154537, upload-time = "2025-05-02T08:32:49.719Z" }, + { url = "https://files.pythonhosted.org/packages/77/1a/5eefc0ce04affb98af07bc05f3bac9094513c0e23b0562d64af46a06aae4/charset_normalizer-3.4.2-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:dedb8adb91d11846ee08bec4c8236c8549ac721c245678282dcb06b221aab59f", size = 149565, upload-time = "2025-05-02T08:32:51.404Z" }, + { url = "https://files.pythonhosted.org/packages/37/a0/2410e5e6032a174c95e0806b1a6585eb21e12f445ebe239fac441995226a/charset_normalizer-3.4.2-cp312-cp312-win32.whl", hash = "sha256:db4c7bf0e07fc3b7d89ac2a5880a6a8062056801b83ff56d8464b70f65482b6c", size = 98357, upload-time = "2025-05-02T08:32:53.079Z" }, + { url = "https://files.pythonhosted.org/packages/6c/4f/c02d5c493967af3eda9c771ad4d2bbc8df6f99ddbeb37ceea6e8716a32bc/charset_normalizer-3.4.2-cp312-cp312-win_amd64.whl", hash = "sha256:5a9979887252a82fefd3d3ed2a8e3b937a7a809f65dcb1e068b090e165bbe99e", size = 105776, upload-time = "2025-05-02T08:32:54.573Z" }, + { url = "https://files.pythonhosted.org/packages/20/94/c5790835a017658cbfabd07f3bfb549140c3ac458cfc196323996b10095a/charset_normalizer-3.4.2-py3-none-any.whl", hash = "sha256:7f56930ab0abd1c45cd15be65cc741c28b1c9a34876ce8c17a2fa107810c0af0", size = 52626, upload-time = "2025-05-02T08:34:40.053Z" }, +] + +[[package]] +name = "click" +version = "8.2.1" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "colorama", marker = "sys_platform == 'win32'" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/60/6c/8ca2efa64cf75a977a0d7fac081354553ebe483345c734fb6b6515d96bbc/click-8.2.1.tar.gz", hash = "sha256:27c491cc05d968d271d5a1db13e3b5a184636d9d930f148c50b038f0d0646202", size = 286342, upload-time = "2025-05-20T23:19:49.832Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/85/32/10bb5764d90a8eee674e9dc6f4db6a0ab47c8c4d0d83c27f7c39ac415a4d/click-8.2.1-py3-none-any.whl", hash = "sha256:61a3265b914e850b85317d0b3109c7f8cd35a670f963866005d6ef1d5175a12b", size = 102215, upload-time = "2025-05-20T23:19:47.796Z" }, +] + +[[package]] +name = "cloudpickle" +version = "3.1.1" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/52/39/069100b84d7418bc358d81669d5748efb14b9cceacd2f9c75f550424132f/cloudpickle-3.1.1.tar.gz", hash = "sha256:b216fa8ae4019d5482a8ac3c95d8f6346115d8835911fd4aefd1a445e4242c64", size = 22113, upload-time = "2025-01-14T17:02:05.085Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/7e/e8/64c37fadfc2816a7701fa8a6ed8d87327c7d54eacfbfb6edab14a2f2be75/cloudpickle-3.1.1-py3-none-any.whl", hash = "sha256:c8c5a44295039331ee9dad40ba100a9c7297b6f988e50e87ccdf3765a668350e", size = 20992, upload-time = "2025-01-14T17:02:02.417Z" }, +] + +[[package]] +name = "colorama" +version = "0.4.6" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/d8/53/6f443c9a4a8358a93a6792e2acffb9d9d5cb0a5cfd8802644b7b1c9a02e4/colorama-0.4.6.tar.gz", hash = "sha256:08695f5cb7ed6e0531a20572697297273c47b8cae5a63ffc6d6ed5c201be6e44", size = 27697, upload-time = "2022-10-25T02:36:22.414Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/d1/d6/3965ed04c63042e047cb6a3e6ed1a63a35087b6a609aa3a15ed8ac56c221/colorama-0.4.6-py2.py3-none-any.whl", hash = "sha256:4f1d9991f5acc0ca119f9d443620b77f9d6b33703e51011c16baf57afb285fc6", size = 25335, upload-time = "2022-10-25T02:36:20.889Z" }, +] + +[[package]] +name = "contourpy" +version = "1.3.3" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "numpy" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/58/01/1253e6698a07380cd31a736d248a3f2a50a7c88779a1813da27503cadc2a/contourpy-1.3.3.tar.gz", hash = "sha256:083e12155b210502d0bca491432bb04d56dc3432f95a979b429f2848c3dbe880", size = 13466174, upload-time = "2025-07-26T12:03:12.549Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/be/45/adfee365d9ea3d853550b2e735f9d66366701c65db7855cd07621732ccfc/contourpy-1.3.3-cp312-cp312-macosx_10_13_x86_64.whl", hash = "sha256:b08a32ea2f8e42cf1d4be3169a98dd4be32bafe4f22b6c4cb4ba810fa9e5d2cb", size = 293419, upload-time = "2025-07-26T12:01:21.16Z" }, + { url = "https://files.pythonhosted.org/packages/53/3e/405b59cfa13021a56bba395a6b3aca8cec012b45bf177b0eaf7a202cde2c/contourpy-1.3.3-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:556dba8fb6f5d8742f2923fe9457dbdd51e1049c4a43fd3986a0b14a1d815fc6", size = 273979, upload-time = "2025-07-26T12:01:22.448Z" }, + { url = "https://files.pythonhosted.org/packages/d4/1c/a12359b9b2ca3a845e8f7f9ac08bdf776114eb931392fcad91743e2ea17b/contourpy-1.3.3-cp312-cp312-manylinux_2_26_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:92d9abc807cf7d0e047b95ca5d957cf4792fcd04e920ca70d48add15c1a90ea7", size = 332653, upload-time = "2025-07-26T12:01:24.155Z" }, + { url = "https://files.pythonhosted.org/packages/63/12/897aeebfb475b7748ea67b61e045accdfcf0d971f8a588b67108ed7f5512/contourpy-1.3.3-cp312-cp312-manylinux_2_26_ppc64le.manylinux_2_28_ppc64le.whl", hash = "sha256:b2e8faa0ed68cb29af51edd8e24798bb661eac3bd9f65420c1887b6ca89987c8", size = 379536, upload-time = "2025-07-26T12:01:25.91Z" }, + { url = "https://files.pythonhosted.org/packages/43/8a/a8c584b82deb248930ce069e71576fc09bd7174bbd35183b7943fb1064fd/contourpy-1.3.3-cp312-cp312-manylinux_2_26_s390x.manylinux_2_28_s390x.whl", hash = "sha256:626d60935cf668e70a5ce6ff184fd713e9683fb458898e4249b63be9e28286ea", size = 384397, upload-time = "2025-07-26T12:01:27.152Z" }, + { url = "https://files.pythonhosted.org/packages/cc/8f/ec6289987824b29529d0dfda0d74a07cec60e54b9c92f3c9da4c0ac732de/contourpy-1.3.3-cp312-cp312-manylinux_2_27_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:4d00e655fcef08aba35ec9610536bfe90267d7ab5ba944f7032549c55a146da1", size = 362601, upload-time = "2025-07-26T12:01:28.808Z" }, + { url = "https://files.pythonhosted.org/packages/05/0a/a3fe3be3ee2dceb3e615ebb4df97ae6f3828aa915d3e10549ce016302bd1/contourpy-1.3.3-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:451e71b5a7d597379ef572de31eeb909a87246974d960049a9848c3bc6c41bf7", size = 1331288, upload-time = "2025-07-26T12:01:31.198Z" }, + { url = "https://files.pythonhosted.org/packages/33/1d/acad9bd4e97f13f3e2b18a3977fe1b4a37ecf3d38d815333980c6c72e963/contourpy-1.3.3-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:459c1f020cd59fcfe6650180678a9993932d80d44ccde1fa1868977438f0b411", size = 1403386, upload-time = "2025-07-26T12:01:33.947Z" }, + { url = "https://files.pythonhosted.org/packages/cf/8f/5847f44a7fddf859704217a99a23a4f6417b10e5ab1256a179264561540e/contourpy-1.3.3-cp312-cp312-win32.whl", hash = "sha256:023b44101dfe49d7d53932be418477dba359649246075c996866106da069af69", size = 185018, upload-time = "2025-07-26T12:01:35.64Z" }, + { url = "https://files.pythonhosted.org/packages/19/e8/6026ed58a64563186a9ee3f29f41261fd1828f527dd93d33b60feca63352/contourpy-1.3.3-cp312-cp312-win_amd64.whl", hash = "sha256:8153b8bfc11e1e4d75bcb0bff1db232f9e10b274e0929de9d608027e0d34ff8b", size = 226567, upload-time = "2025-07-26T12:01:36.804Z" }, + { url = "https://files.pythonhosted.org/packages/d1/e2/f05240d2c39a1ed228d8328a78b6f44cd695f7ef47beb3e684cf93604f86/contourpy-1.3.3-cp312-cp312-win_arm64.whl", hash = "sha256:07ce5ed73ecdc4a03ffe3e1b3e3c1166db35ae7584be76f65dbbe28a7791b0cc", size = 193655, upload-time = "2025-07-26T12:01:37.999Z" }, +] + +[[package]] +name = "cryptography" +version = "45.0.6" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "cffi", marker = "platform_python_implementation != 'PyPy'" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/d6/0d/d13399c94234ee8f3df384819dc67e0c5ce215fb751d567a55a1f4b028c7/cryptography-45.0.6.tar.gz", hash = "sha256:5c966c732cf6e4a276ce83b6e4c729edda2df6929083a952cc7da973c539c719", size = 744949, upload-time = "2025-08-05T23:59:27.93Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/8c/29/2793d178d0eda1ca4a09a7c4e09a5185e75738cc6d526433e8663b460ea6/cryptography-45.0.6-cp311-abi3-macosx_10_9_universal2.whl", hash = "sha256:048e7ad9e08cf4c0ab07ff7f36cc3115924e22e2266e034450a890d9e312dd74", size = 7042702, upload-time = "2025-08-05T23:58:23.464Z" }, + { url = "https://files.pythonhosted.org/packages/b3/b6/cabd07410f222f32c8d55486c464f432808abaa1f12af9afcbe8f2f19030/cryptography-45.0.6-cp311-abi3-manylinux2014_aarch64.manylinux_2_17_aarch64.whl", hash = "sha256:44647c5d796f5fc042bbc6d61307d04bf29bccb74d188f18051b635f20a9c75f", size = 4206483, upload-time = "2025-08-05T23:58:27.132Z" }, + { url = "https://files.pythonhosted.org/packages/8b/9e/f9c7d36a38b1cfeb1cc74849aabe9bf817990f7603ff6eb485e0d70e0b27/cryptography-45.0.6-cp311-abi3-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:e40b80ecf35ec265c452eea0ba94c9587ca763e739b8e559c128d23bff7ebbbf", size = 4429679, upload-time = "2025-08-05T23:58:29.152Z" }, + { url = "https://files.pythonhosted.org/packages/9c/2a/4434c17eb32ef30b254b9e8b9830cee4e516f08b47fdd291c5b1255b8101/cryptography-45.0.6-cp311-abi3-manylinux_2_28_aarch64.whl", hash = "sha256:00e8724bdad672d75e6f069b27970883179bd472cd24a63f6e620ca7e41cc0c5", size = 4210553, upload-time = "2025-08-05T23:58:30.596Z" }, + { url = "https://files.pythonhosted.org/packages/ef/1d/09a5df8e0c4b7970f5d1f3aff1b640df6d4be28a64cae970d56c6cf1c772/cryptography-45.0.6-cp311-abi3-manylinux_2_28_armv7l.manylinux_2_31_armv7l.whl", hash = "sha256:7a3085d1b319d35296176af31c90338eeb2ddac8104661df79f80e1d9787b8b2", size = 3894499, upload-time = "2025-08-05T23:58:32.03Z" }, + { url = "https://files.pythonhosted.org/packages/79/62/120842ab20d9150a9d3a6bdc07fe2870384e82f5266d41c53b08a3a96b34/cryptography-45.0.6-cp311-abi3-manylinux_2_28_x86_64.whl", hash = "sha256:1b7fa6a1c1188c7ee32e47590d16a5a0646270921f8020efc9a511648e1b2e08", size = 4458484, upload-time = "2025-08-05T23:58:33.526Z" }, + { url = "https://files.pythonhosted.org/packages/fd/80/1bc3634d45ddfed0871bfba52cf8f1ad724761662a0c792b97a951fb1b30/cryptography-45.0.6-cp311-abi3-manylinux_2_34_aarch64.whl", hash = "sha256:275ba5cc0d9e320cd70f8e7b96d9e59903c815ca579ab96c1e37278d231fc402", size = 4210281, upload-time = "2025-08-05T23:58:35.445Z" }, + { url = "https://files.pythonhosted.org/packages/7d/fe/ffb12c2d83d0ee625f124880a1f023b5878f79da92e64c37962bbbe35f3f/cryptography-45.0.6-cp311-abi3-manylinux_2_34_x86_64.whl", hash = "sha256:f4028f29a9f38a2025abedb2e409973709c660d44319c61762202206ed577c42", size = 4456890, upload-time = "2025-08-05T23:58:36.923Z" }, + { url = "https://files.pythonhosted.org/packages/8c/8e/b3f3fe0dc82c77a0deb5f493b23311e09193f2268b77196ec0f7a36e3f3e/cryptography-45.0.6-cp311-abi3-musllinux_1_2_aarch64.whl", hash = "sha256:ee411a1b977f40bd075392c80c10b58025ee5c6b47a822a33c1198598a7a5f05", size = 4333247, upload-time = "2025-08-05T23:58:38.781Z" }, + { url = "https://files.pythonhosted.org/packages/b3/a6/c3ef2ab9e334da27a1d7b56af4a2417d77e7806b2e0f90d6267ce120d2e4/cryptography-45.0.6-cp311-abi3-musllinux_1_2_x86_64.whl", hash = "sha256:e2a21a8eda2d86bb604934b6b37691585bd095c1f788530c1fcefc53a82b3453", size = 4565045, upload-time = "2025-08-05T23:58:40.415Z" }, + { url = "https://files.pythonhosted.org/packages/31/c3/77722446b13fa71dddd820a5faab4ce6db49e7e0bf8312ef4192a3f78e2f/cryptography-45.0.6-cp311-abi3-win32.whl", hash = "sha256:d063341378d7ee9c91f9d23b431a3502fc8bfacd54ef0a27baa72a0843b29159", size = 2928923, upload-time = "2025-08-05T23:58:41.919Z" }, + { url = "https://files.pythonhosted.org/packages/38/63/a025c3225188a811b82932a4dcc8457a26c3729d81578ccecbcce2cb784e/cryptography-45.0.6-cp311-abi3-win_amd64.whl", hash = "sha256:833dc32dfc1e39b7376a87b9a6a4288a10aae234631268486558920029b086ec", size = 3403805, upload-time = "2025-08-05T23:58:43.792Z" }, + { url = "https://files.pythonhosted.org/packages/5b/af/bcfbea93a30809f126d51c074ee0fac5bd9d57d068edf56c2a73abedbea4/cryptography-45.0.6-cp37-abi3-macosx_10_9_universal2.whl", hash = "sha256:3436128a60a5e5490603ab2adbabc8763613f638513ffa7d311c900a8349a2a0", size = 7020111, upload-time = "2025-08-05T23:58:45.316Z" }, + { url = "https://files.pythonhosted.org/packages/98/c6/ea5173689e014f1a8470899cd5beeb358e22bb3cf5a876060f9d1ca78af4/cryptography-45.0.6-cp37-abi3-manylinux2014_aarch64.manylinux_2_17_aarch64.whl", hash = "sha256:0d9ef57b6768d9fa58e92f4947cea96ade1233c0e236db22ba44748ffedca394", size = 4198169, upload-time = "2025-08-05T23:58:47.121Z" }, + { url = "https://files.pythonhosted.org/packages/ba/73/b12995edc0c7e2311ffb57ebd3b351f6b268fed37d93bfc6f9856e01c473/cryptography-45.0.6-cp37-abi3-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:ea3c42f2016a5bbf71825537c2ad753f2870191134933196bee408aac397b3d9", size = 4421273, upload-time = "2025-08-05T23:58:48.557Z" }, + { url = "https://files.pythonhosted.org/packages/f7/6e/286894f6f71926bc0da67408c853dd9ba953f662dcb70993a59fd499f111/cryptography-45.0.6-cp37-abi3-manylinux_2_28_aarch64.whl", hash = "sha256:20ae4906a13716139d6d762ceb3e0e7e110f7955f3bc3876e3a07f5daadec5f3", size = 4199211, upload-time = "2025-08-05T23:58:50.139Z" }, + { url = "https://files.pythonhosted.org/packages/de/34/a7f55e39b9623c5cb571d77a6a90387fe557908ffc44f6872f26ca8ae270/cryptography-45.0.6-cp37-abi3-manylinux_2_28_armv7l.manylinux_2_31_armv7l.whl", hash = "sha256:2dac5ec199038b8e131365e2324c03d20e97fe214af051d20c49db129844e8b3", size = 3883732, upload-time = "2025-08-05T23:58:52.253Z" }, + { url = "https://files.pythonhosted.org/packages/f9/b9/c6d32edbcba0cd9f5df90f29ed46a65c4631c4fbe11187feb9169c6ff506/cryptography-45.0.6-cp37-abi3-manylinux_2_28_x86_64.whl", hash = "sha256:18f878a34b90d688982e43f4b700408b478102dd58b3e39de21b5ebf6509c301", size = 4450655, upload-time = "2025-08-05T23:58:53.848Z" }, + { url = "https://files.pythonhosted.org/packages/77/2d/09b097adfdee0227cfd4c699b3375a842080f065bab9014248933497c3f9/cryptography-45.0.6-cp37-abi3-manylinux_2_34_aarch64.whl", hash = "sha256:5bd6020c80c5b2b2242d6c48487d7b85700f5e0038e67b29d706f98440d66eb5", size = 4198956, upload-time = "2025-08-05T23:58:55.209Z" }, + { url = "https://files.pythonhosted.org/packages/55/66/061ec6689207d54effdff535bbdf85cc380d32dd5377173085812565cf38/cryptography-45.0.6-cp37-abi3-manylinux_2_34_x86_64.whl", hash = "sha256:eccddbd986e43014263eda489abbddfbc287af5cddfd690477993dbb31e31016", size = 4449859, upload-time = "2025-08-05T23:58:56.639Z" }, + { url = "https://files.pythonhosted.org/packages/41/ff/e7d5a2ad2d035e5a2af116e1a3adb4d8fcd0be92a18032917a089c6e5028/cryptography-45.0.6-cp37-abi3-musllinux_1_2_aarch64.whl", hash = "sha256:550ae02148206beb722cfe4ef0933f9352bab26b087af00e48fdfb9ade35c5b3", size = 4320254, upload-time = "2025-08-05T23:58:58.833Z" }, + { url = "https://files.pythonhosted.org/packages/82/27/092d311af22095d288f4db89fcaebadfb2f28944f3d790a4cf51fe5ddaeb/cryptography-45.0.6-cp37-abi3-musllinux_1_2_x86_64.whl", hash = "sha256:5b64e668fc3528e77efa51ca70fadcd6610e8ab231e3e06ae2bab3b31c2b8ed9", size = 4554815, upload-time = "2025-08-05T23:59:00.283Z" }, + { url = "https://files.pythonhosted.org/packages/7e/01/aa2f4940262d588a8fdf4edabe4cda45854d00ebc6eaac12568b3a491a16/cryptography-45.0.6-cp37-abi3-win32.whl", hash = "sha256:780c40fb751c7d2b0c6786ceee6b6f871e86e8718a8ff4bc35073ac353c7cd02", size = 2912147, upload-time = "2025-08-05T23:59:01.716Z" }, + { url = "https://files.pythonhosted.org/packages/0a/bc/16e0276078c2de3ceef6b5a34b965f4436215efac45313df90d55f0ba2d2/cryptography-45.0.6-cp37-abi3-win_amd64.whl", hash = "sha256:20d15aed3ee522faac1a39fbfdfee25d17b1284bafd808e1640a74846d7c4d1b", size = 3390459, upload-time = "2025-08-05T23:59:03.358Z" }, +] + +[[package]] +name = "cycler" +version = "0.12.1" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/a9/95/a3dbbb5028f35eafb79008e7522a75244477d2838f38cbb722248dabc2a8/cycler-0.12.1.tar.gz", hash = "sha256:88bb128f02ba341da8ef447245a9e138fae777f6a23943da4540077d3601eb1c", size = 7615, upload-time = "2023-10-07T05:32:18.335Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/e7/05/c19819d5e3d95294a6f5947fb9b9629efb316b96de511b418c53d245aae6/cycler-0.12.1-py3-none-any.whl", hash = "sha256:85cef7cff222d8644161529808465972e51340599459b8ac3ccbac5a854e0d30", size = 8321, upload-time = "2023-10-07T05:32:16.783Z" }, +] + +[[package]] +name = "dask" +version = "2025.11.0" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "click" }, + { name = "cloudpickle" }, + { name = "fsspec" }, + { name = "packaging" }, + { name = "partd" }, + { name = "pyyaml" }, + { name = "toolz" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/db/33/eacaa72731f7fc64868caaf2d35060d50049eff889bd217263e68f76472f/dask-2025.11.0.tar.gz", hash = "sha256:23d59e624b80ee05b7cc8df858682cca58262c4c3b197ccf61da0f6543c8f7c3", size = 10984781, upload-time = "2025-11-06T16:56:51.535Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/1d/54/a46920229d12c3a6e9f0081d1bdaeffad23c1826353ace95714faee926e5/dask-2025.11.0-py3-none-any.whl", hash = "sha256:08c35a8146c05c93b34f83cf651009129c42ee71762da7ca452fb7308641c2b8", size = 1477108, upload-time = "2025-11-06T16:56:44.892Z" }, +] + +[[package]] +name = "databricks-sdk" +version = "0.64.0" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "google-auth" }, + { name = "requests" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/cf/31/18a655a4382851c897a84c94e547e3a8e1a0f2b51e4ee74227c982a53943/databricks_sdk-0.64.0.tar.gz", hash = "sha256:e21cce45bb4f1254ad5d22ea77fc30484378beb54b5b42db098d1f975c813e81", size = 746326, upload-time = "2025-08-20T11:47:22.469Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/21/70/734d3b559e72c4231531c77685f204d8c14202ada640c4f16229a6456b57/databricks_sdk-0.64.0-py3-none-any.whl", hash = "sha256:3efb2a739deda3186d0380ad6ced7d4811ced7adcaf61cbf0f897eab52974a17", size = 703407, upload-time = "2025-08-20T11:47:20.509Z" }, +] + +[[package]] +name = "deprecated" +version = "1.3.1" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "wrapt" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/49/85/12f0a49a7c4ffb70572b6c2ef13c90c88fd190debda93b23f026b25f9634/deprecated-1.3.1.tar.gz", hash = "sha256:b1b50e0ff0c1fddaa5708a2c6b0a6588bb09b892825ab2b214ac9ea9d92a5223", size = 2932523, upload-time = "2025-10-30T08:19:02.757Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/84/d0/205d54408c08b13550c733c4b85429e7ead111c7f0014309637425520a9a/deprecated-1.3.1-py2.py3-none-any.whl", hash = "sha256:597bfef186b6f60181535a29fbe44865ce137a5079f295b479886c82729d5f3f", size = 11298, upload-time = "2025-10-30T08:19:00.758Z" }, +] + +[[package]] +name = "docker" +version = "7.1.0" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "pywin32", marker = "sys_platform == 'win32'" }, + { name = "requests" }, + { name = "urllib3" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/91/9b/4a2ea29aeba62471211598dac5d96825bb49348fa07e906ea930394a83ce/docker-7.1.0.tar.gz", hash = "sha256:ad8c70e6e3f8926cb8a92619b832b4ea5299e2831c14284663184e200546fa6c", size = 117834, upload-time = "2024-05-23T11:13:57.216Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/e3/26/57c6fb270950d476074c087527a558ccb6f4436657314bfb6cdf484114c4/docker-7.1.0-py3-none-any.whl", hash = "sha256:c96b93b7f0a746f9e77d325bcfb87422a3d8bd4f03136ae8a85b37f1898d5fc0", size = 147774, upload-time = "2024-05-23T11:13:55.01Z" }, +] + +[[package]] +name = "extra-streamlit-components" +version = "0.1.81" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "streamlit" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/24/49/9b47a3450034d74259f9d4887d85be4e6a771bc21da467b253323d78c4d9/extra_streamlit_components-0.1.81.tar.gz", hash = "sha256:eb9beb7bacfe8b3d238f1888a21c78ac6cfa569341be484bca08c3ea0b15f20d", size = 2250141, upload-time = "2025-08-17T18:12:33.606Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/fd/8d/d2f1eeb52c50c990d14fd91bea35157890bb791c46b3f2bebaa5eef4bdf6/extra_streamlit_components-0.1.81-py3-none-any.whl", hash = "sha256:11a4651dbd03cac04edfbb8711757b1d10e3cdf280b8fa3a43f970d05e684619", size = 2278499, upload-time = "2025-08-17T18:12:30.803Z" }, +] + +[[package]] +name = "fastapi" +version = "0.116.1" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "pydantic" }, + { name = "starlette" }, + { name = "typing-extensions" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/78/d7/6c8b3bfe33eeffa208183ec037fee0cce9f7f024089ab1c5d12ef04bd27c/fastapi-0.116.1.tar.gz", hash = "sha256:ed52cbf946abfd70c5a0dccb24673f0670deeb517a88b3544d03c2a6bf283143", size = 296485, upload-time = "2025-07-11T16:22:32.057Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/e5/47/d63c60f59a59467fda0f93f46335c9d18526d7071f025cb5b89d5353ea42/fastapi-0.116.1-py3-none-any.whl", hash = "sha256:c46ac7c312df840f0c9e220f7964bada936781bc4e2e6eb71f1c4d7553786565", size = 95631, upload-time = "2025-07-11T16:22:30.485Z" }, +] + +[[package]] +name = "fasteners" +version = "0.20" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/2d/18/7881a99ba5244bfc82f06017316ffe93217dbbbcfa52b887caa1d4f2a6d3/fasteners-0.20.tar.gz", hash = "sha256:55dce8792a41b56f727ba6e123fcaee77fd87e638a6863cec00007bfea84c8d8", size = 25087, upload-time = "2025-08-11T10:19:37.785Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/51/ac/e5d886f892666d2d1e5cb8c1a41146e1d79ae8896477b1153a21711d3b44/fasteners-0.20-py3-none-any.whl", hash = "sha256:9422c40d1e350e4259f509fb2e608d6bc43c0136f79a00db1b49046029d0b3b7", size = 18702, upload-time = "2025-08-11T10:19:35.716Z" }, +] + +[[package]] +name = "flask" +version = "3.1.2" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "blinker" }, + { name = "click" }, + { name = "itsdangerous" }, + { name = "jinja2" }, + { name = "markupsafe" }, + { name = "werkzeug" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/dc/6d/cfe3c0fcc5e477df242b98bfe186a4c34357b4847e87ecaef04507332dab/flask-3.1.2.tar.gz", hash = "sha256:bf656c15c80190ed628ad08cdfd3aaa35beb087855e2f494910aa3774cc4fd87", size = 720160, upload-time = "2025-08-19T21:03:21.205Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/ec/f9/7f9263c5695f4bd0023734af91bedb2ff8209e8de6ead162f35d8dc762fd/flask-3.1.2-py3-none-any.whl", hash = "sha256:ca1d8112ec8a6158cc29ea4858963350011b5c846a414cdb7a954aa9e967d03c", size = 103308, upload-time = "2025-08-19T21:03:19.499Z" }, +] + +[[package]] +name = "fonttools" +version = "4.60.1" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/4b/42/97a13e47a1e51a5a7142475bbcf5107fe3a68fc34aef331c897d5fb98ad0/fonttools-4.60.1.tar.gz", hash = "sha256:ef00af0439ebfee806b25f24c8f92109157ff3fac5731dc7867957812e87b8d9", size = 3559823, upload-time = "2025-09-29T21:13:27.129Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/e3/f7/a10b101b7a6f8836a5adb47f2791f2075d044a6ca123f35985c42edc82d8/fonttools-4.60.1-cp312-cp312-macosx_10_13_universal2.whl", hash = "sha256:7b0c6d57ab00dae9529f3faf187f2254ea0aa1e04215cf2f1a8ec277c96661bc", size = 2832953, upload-time = "2025-09-29T21:11:39.616Z" }, + { url = "https://files.pythonhosted.org/packages/ed/fe/7bd094b59c926acf2304d2151354ddbeb74b94812f3dc943c231db09cb41/fonttools-4.60.1-cp312-cp312-macosx_10_13_x86_64.whl", hash = "sha256:839565cbf14645952d933853e8ade66a463684ed6ed6c9345d0faf1f0e868877", size = 2352706, upload-time = "2025-09-29T21:11:41.826Z" }, + { url = "https://files.pythonhosted.org/packages/c0/ca/4bb48a26ed95a1e7eba175535fe5805887682140ee0a0d10a88e1de84208/fonttools-4.60.1-cp312-cp312-manylinux1_x86_64.manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_5_x86_64.whl", hash = "sha256:8177ec9676ea6e1793c8a084a90b65a9f778771998eb919d05db6d4b1c0b114c", size = 4923716, upload-time = "2025-09-29T21:11:43.893Z" }, + { url = "https://files.pythonhosted.org/packages/b8/9f/2cb82999f686c1d1ddf06f6ae1a9117a880adbec113611cc9d22b2fdd465/fonttools-4.60.1-cp312-cp312-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:996a4d1834524adbb423385d5a629b868ef9d774670856c63c9a0408a3063401", size = 4968175, upload-time = "2025-09-29T21:11:46.439Z" }, + { url = "https://files.pythonhosted.org/packages/18/79/be569699e37d166b78e6218f2cde8c550204f2505038cdd83b42edc469b9/fonttools-4.60.1-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:a46b2f450bc79e06ef3b6394f0c68660529ed51692606ad7f953fc2e448bc903", size = 4911031, upload-time = "2025-09-29T21:11:48.977Z" }, + { url = "https://files.pythonhosted.org/packages/cc/9f/89411cc116effaec5260ad519162f64f9c150e5522a27cbb05eb62d0c05b/fonttools-4.60.1-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:6ec722ee589e89a89f5b7574f5c45604030aa6ae24cb2c751e2707193b466fed", size = 5062966, upload-time = "2025-09-29T21:11:54.344Z" }, + { url = "https://files.pythonhosted.org/packages/62/a1/f888221934b5731d46cb9991c7a71f30cb1f97c0ef5fcf37f8da8fce6c8e/fonttools-4.60.1-cp312-cp312-win32.whl", hash = "sha256:b2cf105cee600d2de04ca3cfa1f74f1127f8455b71dbad02b9da6ec266e116d6", size = 2218750, upload-time = "2025-09-29T21:11:56.601Z" }, + { url = "https://files.pythonhosted.org/packages/88/8f/a55b5550cd33cd1028601df41acd057d4be20efa5c958f417b0c0613924d/fonttools-4.60.1-cp312-cp312-win_amd64.whl", hash = "sha256:992775c9fbe2cf794786fa0ffca7f09f564ba3499b8fe9f2f80bd7197db60383", size = 2267026, upload-time = "2025-09-29T21:11:58.852Z" }, + { url = "https://files.pythonhosted.org/packages/c7/93/0dd45cd283c32dea1545151d8c3637b4b8c53cdb3a625aeb2885b184d74d/fonttools-4.60.1-py3-none-any.whl", hash = "sha256:906306ac7afe2156fcf0042173d6ebbb05416af70f6b370967b47f8f00103bbb", size = 1143175, upload-time = "2025-09-29T21:13:24.134Z" }, +] + +[[package]] +name = "fsspec" +version = "2025.10.0" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/24/7f/2747c0d332b9acfa75dc84447a066fdf812b5a6b8d30472b74d309bfe8cb/fsspec-2025.10.0.tar.gz", hash = "sha256:b6789427626f068f9a83ca4e8a3cc050850b6c0f71f99ddb4f542b8266a26a59", size = 309285, upload-time = "2025-10-30T14:58:44.036Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/eb/02/a6b21098b1d5d6249b7c5ab69dde30108a71e4e819d4a9778f1de1d5b70d/fsspec-2025.10.0-py3-none-any.whl", hash = "sha256:7c7712353ae7d875407f97715f0e1ffcc21e33d5b24556cb1e090ae9409ec61d", size = 200966, upload-time = "2025-10-30T14:58:42.53Z" }, +] + +[[package]] +name = "gitdb" +version = "4.0.12" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "smmap" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/72/94/63b0fc47eb32792c7ba1fe1b694daec9a63620db1e313033d18140c2320a/gitdb-4.0.12.tar.gz", hash = "sha256:5ef71f855d191a3326fcfbc0d5da835f26b13fbcba60c32c21091c349ffdb571", size = 394684, upload-time = "2025-01-02T07:20:46.413Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/a0/61/5c78b91c3143ed5c14207f463aecfc8f9dbb5092fb2869baf37c273b2705/gitdb-4.0.12-py3-none-any.whl", hash = "sha256:67073e15955400952c6565cc3e707c554a4eea2e428946f7a4c162fab9bd9bcf", size = 62794, upload-time = "2025-01-02T07:20:43.624Z" }, +] + +[[package]] +name = "gitpython" +version = "3.1.44" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "gitdb" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/c0/89/37df0b71473153574a5cdef8f242de422a0f5d26d7a9e231e6f169b4ad14/gitpython-3.1.44.tar.gz", hash = "sha256:c87e30b26253bf5418b01b0660f818967f3c503193838337fe5e573331249269", size = 214196, upload-time = "2025-01-02T07:32:43.59Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/1d/9a/4114a9057db2f1462d5c8f8390ab7383925fe1ac012eaa42402ad65c2963/GitPython-3.1.44-py3-none-any.whl", hash = "sha256:9e0e10cda9bed1ee64bc9a6de50e7e38a9c9943241cd7f585f6df3ed28011110", size = 207599, upload-time = "2025-01-02T07:32:40.731Z" }, +] + +[[package]] +name = "google-auth" +version = "2.40.3" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "cachetools" }, + { name = "pyasn1-modules" }, + { name = "rsa" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/9e/9b/e92ef23b84fa10a64ce4831390b7a4c2e53c0132568d99d4ae61d04c8855/google_auth-2.40.3.tar.gz", hash = "sha256:500c3a29adedeb36ea9cf24b8d10858e152f2412e3ca37829b3fa18e33d63b77", size = 281029, upload-time = "2025-06-04T18:04:57.577Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/17/63/b19553b658a1692443c62bd07e5868adaa0ad746a0751ba62c59568cd45b/google_auth-2.40.3-py2.py3-none-any.whl", hash = "sha256:1370d4593e86213563547f97a92752fc658456fe4514c809544f330fed45a7ca", size = 216137, upload-time = "2025-06-04T18:04:55.573Z" }, +] + +[[package]] +name = "graphene" +version = "3.4.3" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "graphql-core" }, + { name = "graphql-relay" }, + { name = "python-dateutil" }, + { name = "typing-extensions" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/cc/f6/bf62ff950c317ed03e77f3f6ddd7e34aaa98fe89d79ebd660c55343d8054/graphene-3.4.3.tar.gz", hash = "sha256:2a3786948ce75fe7e078443d37f609cbe5bb36ad8d6b828740ad3b95ed1a0aaa", size = 44739, upload-time = "2024-11-09T20:44:25.757Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/66/e0/61d8e98007182e6b2aca7cf65904721fb2e4bce0192272ab9cb6f69d8812/graphene-3.4.3-py2.py3-none-any.whl", hash = "sha256:820db6289754c181007a150db1f7fff544b94142b556d12e3ebc777a7bf36c71", size = 114894, upload-time = "2024-11-09T20:44:23.851Z" }, +] + +[[package]] +name = "graphql-core" +version = "3.2.6" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/c4/16/7574029da84834349b60ed71614d66ca3afe46e9bf9c7b9562102acb7d4f/graphql_core-3.2.6.tar.gz", hash = "sha256:c08eec22f9e40f0bd61d805907e3b3b1b9a320bc606e23dc145eebca07c8fbab", size = 505353, upload-time = "2025-01-26T16:36:27.374Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/ae/4f/7297663840621022bc73c22d7d9d80dbc78b4db6297f764b545cd5dd462d/graphql_core-3.2.6-py3-none-any.whl", hash = "sha256:78b016718c161a6fb20a7d97bbf107f331cd1afe53e45566c59f776ed7f0b45f", size = 203416, upload-time = "2025-01-26T16:36:24.868Z" }, +] + +[[package]] +name = "graphql-relay" +version = "3.2.0" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "graphql-core" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/d1/13/98fbf8d67552f102488ffc16c6f559ce71ea15f6294728d33928ab5ff14d/graphql-relay-3.2.0.tar.gz", hash = "sha256:1ff1c51298356e481a0be009ccdff249832ce53f30559c1338f22a0e0d17250c", size = 50027, upload-time = "2022-04-16T11:03:45.447Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/74/16/a4cf06adbc711bd364a73ce043b0b08d8fa5aae3df11b6ee4248bcdad2e0/graphql_relay-3.2.0-py3-none-any.whl", hash = "sha256:c9b22bd28b170ba1fe674c74384a8ff30a76c8e26f88ac3aa1584dd3179953e5", size = 16940, upload-time = "2022-04-16T11:03:43.895Z" }, +] + +[[package]] +name = "greenlet" +version = "3.2.4" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/03/b8/704d753a5a45507a7aab61f18db9509302ed3d0a27ac7e0359ec2905b1a6/greenlet-3.2.4.tar.gz", hash = "sha256:0dca0d95ff849f9a364385f36ab49f50065d76964944638be9691e1832e9f86d", size = 188260, upload-time = "2025-08-07T13:24:33.51Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/44/69/9b804adb5fd0671f367781560eb5eb586c4d495277c93bde4307b9e28068/greenlet-3.2.4-cp312-cp312-macosx_11_0_universal2.whl", hash = "sha256:3b67ca49f54cede0186854a008109d6ee71f66bd57bb36abd6d0a0267b540cdd", size = 274079, upload-time = "2025-08-07T13:15:45.033Z" }, + { url = "https://files.pythonhosted.org/packages/46/e9/d2a80c99f19a153eff70bc451ab78615583b8dac0754cfb942223d2c1a0d/greenlet-3.2.4-cp312-cp312-manylinux2014_aarch64.manylinux_2_17_aarch64.whl", hash = "sha256:ddf9164e7a5b08e9d22511526865780a576f19ddd00d62f8a665949327fde8bb", size = 640997, upload-time = "2025-08-07T13:42:56.234Z" }, + { url = "https://files.pythonhosted.org/packages/3b/16/035dcfcc48715ccd345f3a93183267167cdd162ad123cd93067d86f27ce4/greenlet-3.2.4-cp312-cp312-manylinux2014_ppc64le.manylinux_2_17_ppc64le.whl", hash = "sha256:f28588772bb5fb869a8eb331374ec06f24a83a9c25bfa1f38b6993afe9c1e968", size = 655185, upload-time = "2025-08-07T13:45:27.624Z" }, + { url = "https://files.pythonhosted.org/packages/31/da/0386695eef69ffae1ad726881571dfe28b41970173947e7c558d9998de0f/greenlet-3.2.4-cp312-cp312-manylinux2014_s390x.manylinux_2_17_s390x.whl", hash = "sha256:5c9320971821a7cb77cfab8d956fa8e39cd07ca44b6070db358ceb7f8797c8c9", size = 649926, upload-time = "2025-08-07T13:53:15.251Z" }, + { url = "https://files.pythonhosted.org/packages/68/88/69bf19fd4dc19981928ceacbc5fd4bb6bc2215d53199e367832e98d1d8fe/greenlet-3.2.4-cp312-cp312-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:c60a6d84229b271d44b70fb6e5fa23781abb5d742af7b808ae3f6efd7c9c60f6", size = 651839, upload-time = "2025-08-07T13:18:30.281Z" }, + { url = "https://files.pythonhosted.org/packages/19/0d/6660d55f7373b2ff8152401a83e02084956da23ae58cddbfb0b330978fe9/greenlet-3.2.4-cp312-cp312-manylinux_2_24_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:3b3812d8d0c9579967815af437d96623f45c0f2ae5f04e366de62a12d83a8fb0", size = 607586, upload-time = "2025-08-07T13:18:28.544Z" }, + { url = "https://files.pythonhosted.org/packages/8e/1a/c953fdedd22d81ee4629afbb38d2f9d71e37d23caace44775a3a969147d4/greenlet-3.2.4-cp312-cp312-musllinux_1_1_aarch64.whl", hash = "sha256:abbf57b5a870d30c4675928c37278493044d7c14378350b3aa5d484fa65575f0", size = 1123281, upload-time = "2025-08-07T13:42:39.858Z" }, + { url = "https://files.pythonhosted.org/packages/3f/c7/12381b18e21aef2c6bd3a636da1088b888b97b7a0362fac2e4de92405f97/greenlet-3.2.4-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:20fb936b4652b6e307b8f347665e2c615540d4b42b3b4c8a321d8286da7e520f", size = 1151142, upload-time = "2025-08-07T13:18:22.981Z" }, + { url = "https://files.pythonhosted.org/packages/27/45/80935968b53cfd3f33cf99ea5f08227f2646e044568c9b1555b58ffd61c2/greenlet-3.2.4-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:ee7a6ec486883397d70eec05059353b8e83eca9168b9f3f9a361971e77e0bcd0", size = 1564846, upload-time = "2025-11-04T12:42:15.191Z" }, + { url = "https://files.pythonhosted.org/packages/69/02/b7c30e5e04752cb4db6202a3858b149c0710e5453b71a3b2aec5d78a1aab/greenlet-3.2.4-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:326d234cbf337c9c3def0676412eb7040a35a768efc92504b947b3e9cfc7543d", size = 1633814, upload-time = "2025-11-04T12:42:17.175Z" }, + { url = "https://files.pythonhosted.org/packages/e9/08/b0814846b79399e585f974bbeebf5580fbe59e258ea7be64d9dfb253c84f/greenlet-3.2.4-cp312-cp312-win_amd64.whl", hash = "sha256:a7d4e128405eea3814a12cc2605e0e6aedb4035bf32697f72deca74de4105e02", size = 299899, upload-time = "2025-08-07T13:38:53.448Z" }, +] + +[[package]] +name = "gunicorn" +version = "23.0.0" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "packaging" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/34/72/9614c465dc206155d93eff0ca20d42e1e35afc533971379482de953521a4/gunicorn-23.0.0.tar.gz", hash = "sha256:f014447a0101dc57e294f6c18ca6b40227a4c90e9bdb586042628030cba004ec", size = 375031, upload-time = "2024-08-10T20:25:27.378Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/cb/7d/6dac2a6e1eba33ee43f318edbed4ff29151a49b5d37f080aad1e6469bca4/gunicorn-23.0.0-py3-none-any.whl", hash = "sha256:ec400d38950de4dfd418cff8328b2c8faed0edb0d517d3394e457c317908ca4d", size = 85029, upload-time = "2024-08-10T20:25:24.996Z" }, +] + +[[package]] +name = "h11" +version = "0.16.0" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/01/ee/02a2c011bdab74c6fb3c75474d40b3052059d95df7e73351460c8588d963/h11-0.16.0.tar.gz", hash = "sha256:4e35b956cf45792e4caa5885e69fba00bdbc6ffafbfa020300e549b208ee5ff1", size = 101250, upload-time = "2025-04-24T03:35:25.427Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/04/4b/29cac41a4d98d144bf5f6d33995617b185d14b22401f75ca86f384e87ff1/h11-0.16.0-py3-none-any.whl", hash = "sha256:63cf8bbe7522de3bf65932fda1d9c2772064ffb3dae62d55932da54b31cb6c86", size = 37515, upload-time = "2025-04-24T03:35:24.344Z" }, +] + +[[package]] +name = "idna" +version = "3.10" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/f1/70/7703c29685631f5a7590aa73f1f1d3fa9a380e654b86af429e0934a32f7d/idna-3.10.tar.gz", hash = "sha256:12f65c9b470abda6dc35cf8e63cc574b1c52b11df2c86030af0ac09b01b13ea9", size = 190490, upload-time = "2024-09-15T18:07:39.745Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/76/c6/c88e154df9c4e1a2a66ccf0005a88dfb2650c1dffb6f5ce603dfbd452ce3/idna-3.10-py3-none-any.whl", hash = "sha256:946d195a0d259cbba61165e88e65941f16e9b36ea6ddb97f00452bae8b1287d3", size = 70442, upload-time = "2024-09-15T18:07:37.964Z" }, +] + +[[package]] +name = "importlib-metadata" +version = "8.7.0" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "zipp" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/76/66/650a33bd90f786193e4de4b3ad86ea60b53c89b669a5c7be931fac31cdb0/importlib_metadata-8.7.0.tar.gz", hash = "sha256:d13b81ad223b890aa16c5471f2ac3056cf76c5f10f82d6f9292f0b415f389000", size = 56641, upload-time = "2025-04-27T15:29:01.736Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/20/b0/36bd937216ec521246249be3bf9855081de4c5e06a0c9b4219dbeda50373/importlib_metadata-8.7.0-py3-none-any.whl", hash = "sha256:e5dd1551894c77868a30651cef00984d50e1002d06942a7101d34870c5f02afd", size = 27656, upload-time = "2025-04-27T15:29:00.214Z" }, +] + +[[package]] +name = "iniconfig" +version = "2.3.0" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/72/34/14ca021ce8e5dfedc35312d08ba8bf51fdd999c576889fc2c24cb97f4f10/iniconfig-2.3.0.tar.gz", hash = "sha256:c76315c77db068650d49c5b56314774a7804df16fee4402c1f19d6d15d8c4730", size = 20503, upload-time = "2025-10-18T21:55:43.219Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/cb/b1/3846dd7f199d53cb17f49cba7e651e9ce294d8497c8c150530ed11865bb8/iniconfig-2.3.0-py3-none-any.whl", hash = "sha256:f631c04d2c48c52b84d0d0549c99ff3859c98df65b3101406327ecc7d53fbf12", size = 7484, upload-time = "2025-10-18T21:55:41.639Z" }, +] + +[[package]] +name = "itsdangerous" +version = "2.2.0" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/9c/cb/8ac0172223afbccb63986cc25049b154ecfb5e85932587206f42317be31d/itsdangerous-2.2.0.tar.gz", hash = "sha256:e0050c0b7da1eea53ffaf149c0cfbb5c6e2e2b69c4bef22c81fa6eb73e5f6173", size = 54410, upload-time = "2024-04-16T21:28:15.614Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/04/96/92447566d16df59b2a776c0fb82dbc4d9e07cd95062562af01e408583fc4/itsdangerous-2.2.0-py3-none-any.whl", hash = "sha256:c6242fc49e35958c8b15141343aa660db5fc54d4f13a1db01a3f5891b98700ef", size = 16234, upload-time = "2024-04-16T21:28:14.499Z" }, +] + +[[package]] +name = "jinja2" +version = "3.1.6" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "markupsafe" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/df/bf/f7da0350254c0ed7c72f3e33cef02e048281fec7ecec5f032d4aac52226b/jinja2-3.1.6.tar.gz", hash = "sha256:0137fb05990d35f1275a587e9aee6d56da821fc83491a0fb838183be43f66d6d", size = 245115, upload-time = "2025-03-05T20:05:02.478Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/62/a1/3d680cbfd5f4b8f15abc1d571870c5fc3e594bb582bc3b64ea099db13e56/jinja2-3.1.6-py3-none-any.whl", hash = "sha256:85ece4451f492d0c13c5dd7c13a64681a86afae63a5f347908daf103ce6d2f67", size = 134899, upload-time = "2025-03-05T20:05:00.369Z" }, +] + +[[package]] +name = "jmespath" +version = "1.0.1" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/00/2a/e867e8531cf3e36b41201936b7fa7ba7b5702dbef42922193f05c8976cd6/jmespath-1.0.1.tar.gz", hash = "sha256:90261b206d6defd58fdd5e85f478bf633a2901798906be2ad389150c5c60edbe", size = 25843, upload-time = "2022-06-17T18:00:12.224Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/31/b4/b9b800c45527aadd64d5b442f9b932b00648617eb5d63d2c7a6587b7cafc/jmespath-1.0.1-py3-none-any.whl", hash = "sha256:02e2e4cc71b5bcab88332eebf907519190dd9e6e82107fa7f83b1003a6252980", size = 20256, upload-time = "2022-06-17T18:00:10.251Z" }, +] + +[[package]] +name = "joblib" +version = "1.5.2" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/e8/5d/447af5ea094b9e4c4054f82e223ada074c552335b9b4b2d14bd9b35a67c4/joblib-1.5.2.tar.gz", hash = "sha256:3faa5c39054b2f03ca547da9b2f52fde67c06240c31853f306aea97f13647b55", size = 331077, upload-time = "2025-08-27T12:15:46.575Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/1e/e8/685f47e0d754320684db4425a0967f7d3fa70126bffd76110b7009a0090f/joblib-1.5.2-py3-none-any.whl", hash = "sha256:4e1f0bdbb987e6d843c70cf43714cb276623def372df3c22fe5266b2670bc241", size = 308396, upload-time = "2025-08-27T12:15:45.188Z" }, +] + +[[package]] +name = "jsonschema" +version = "4.24.0" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "attrs" }, + { name = "jsonschema-specifications" }, + { name = "referencing" }, + { name = "rpds-py" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/bf/d3/1cf5326b923a53515d8f3a2cd442e6d7e94fcc444716e879ea70a0ce3177/jsonschema-4.24.0.tar.gz", hash = "sha256:0b4e8069eb12aedfa881333004bccaec24ecef5a8a6a4b6df142b2cc9599d196", size = 353480, upload-time = "2025-05-26T18:48:10.459Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/a2/3d/023389198f69c722d039351050738d6755376c8fd343e91dc493ea485905/jsonschema-4.24.0-py3-none-any.whl", hash = "sha256:a462455f19f5faf404a7902952b6f0e3ce868f3ee09a359b05eca6673bd8412d", size = 88709, upload-time = "2025-05-26T18:48:08.417Z" }, +] + +[[package]] +name = "jsonschema-specifications" +version = "2025.4.1" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "referencing" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/bf/ce/46fbd9c8119cfc3581ee5643ea49464d168028cfb5caff5fc0596d0cf914/jsonschema_specifications-2025.4.1.tar.gz", hash = "sha256:630159c9f4dbea161a6a2205c3011cc4f18ff381b189fff48bb39b9bf26ae608", size = 15513, upload-time = "2025-04-23T12:34:07.418Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/01/0e/b27cdbaccf30b890c40ed1da9fd4a3593a5cf94dae54fb34f8a4b74fcd3f/jsonschema_specifications-2025.4.1-py3-none-any.whl", hash = "sha256:4653bffbd6584f7de83a67e0d620ef16900b390ddc7939d56684d6c81e33f1af", size = 18437, upload-time = "2025-04-23T12:34:05.422Z" }, +] + +[[package]] +name = "kiwisolver" +version = "1.4.9" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/5c/3c/85844f1b0feb11ee581ac23fe5fce65cd049a200c1446708cc1b7f922875/kiwisolver-1.4.9.tar.gz", hash = "sha256:c3b22c26c6fd6811b0ae8363b95ca8ce4ea3c202d3d0975b2914310ceb1bcc4d", size = 97564, upload-time = "2025-08-10T21:27:49.279Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/86/c9/13573a747838aeb1c76e3267620daa054f4152444d1f3d1a2324b78255b5/kiwisolver-1.4.9-cp312-cp312-macosx_10_13_universal2.whl", hash = "sha256:ac5a486ac389dddcc5bef4f365b6ae3ffff2c433324fb38dd35e3fab7c957999", size = 123686, upload-time = "2025-08-10T21:26:10.034Z" }, + { url = "https://files.pythonhosted.org/packages/51/ea/2ecf727927f103ffd1739271ca19c424d0e65ea473fbaeea1c014aea93f6/kiwisolver-1.4.9-cp312-cp312-macosx_10_13_x86_64.whl", hash = "sha256:f2ba92255faa7309d06fe44c3a4a97efe1c8d640c2a79a5ef728b685762a6fd2", size = 66460, upload-time = "2025-08-10T21:26:11.083Z" }, + { url = "https://files.pythonhosted.org/packages/5b/5a/51f5464373ce2aeb5194508298a508b6f21d3867f499556263c64c621914/kiwisolver-1.4.9-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:4a2899935e724dd1074cb568ce7ac0dce28b2cd6ab539c8e001a8578eb106d14", size = 64952, upload-time = "2025-08-10T21:26:12.058Z" }, + { url = "https://files.pythonhosted.org/packages/70/90/6d240beb0f24b74371762873e9b7f499f1e02166a2d9c5801f4dbf8fa12e/kiwisolver-1.4.9-cp312-cp312-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:f6008a4919fdbc0b0097089f67a1eb55d950ed7e90ce2cc3e640abadd2757a04", size = 1474756, upload-time = "2025-08-10T21:26:13.096Z" }, + { url = "https://files.pythonhosted.org/packages/12/42/f36816eaf465220f683fb711efdd1bbf7a7005a2473d0e4ed421389bd26c/kiwisolver-1.4.9-cp312-cp312-manylinux_2_24_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:67bb8b474b4181770f926f7b7d2f8c0248cbcb78b660fdd41a47054b28d2a752", size = 1276404, upload-time = "2025-08-10T21:26:14.457Z" }, + { url = "https://files.pythonhosted.org/packages/2e/64/bc2de94800adc830c476dce44e9b40fd0809cddeef1fde9fcf0f73da301f/kiwisolver-1.4.9-cp312-cp312-manylinux_2_24_ppc64le.manylinux_2_28_ppc64le.whl", hash = "sha256:2327a4a30d3ee07d2fbe2e7933e8a37c591663b96ce42a00bc67461a87d7df77", size = 1294410, upload-time = "2025-08-10T21:26:15.73Z" }, + { url = "https://files.pythonhosted.org/packages/5f/42/2dc82330a70aa8e55b6d395b11018045e58d0bb00834502bf11509f79091/kiwisolver-1.4.9-cp312-cp312-manylinux_2_24_s390x.manylinux_2_28_s390x.whl", hash = "sha256:7a08b491ec91b1d5053ac177afe5290adacf1f0f6307d771ccac5de30592d198", size = 1343631, upload-time = "2025-08-10T21:26:17.045Z" }, + { url = "https://files.pythonhosted.org/packages/22/fd/f4c67a6ed1aab149ec5a8a401c323cee7a1cbe364381bb6c9c0d564e0e20/kiwisolver-1.4.9-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:d8fc5c867c22b828001b6a38d2eaeb88160bf5783c6cb4a5e440efc981ce286d", size = 2224963, upload-time = "2025-08-10T21:26:18.737Z" }, + { url = "https://files.pythonhosted.org/packages/45/aa/76720bd4cb3713314677d9ec94dcc21ced3f1baf4830adde5bb9b2430a5f/kiwisolver-1.4.9-cp312-cp312-musllinux_1_2_ppc64le.whl", hash = "sha256:3b3115b2581ea35bb6d1f24a4c90af37e5d9b49dcff267eeed14c3893c5b86ab", size = 2321295, upload-time = "2025-08-10T21:26:20.11Z" }, + { url = "https://files.pythonhosted.org/packages/80/19/d3ec0d9ab711242f56ae0dc2fc5d70e298bb4a1f9dfab44c027668c673a1/kiwisolver-1.4.9-cp312-cp312-musllinux_1_2_s390x.whl", hash = "sha256:858e4c22fb075920b96a291928cb7dea5644e94c0ee4fcd5af7e865655e4ccf2", size = 2487987, upload-time = "2025-08-10T21:26:21.49Z" }, + { url = "https://files.pythonhosted.org/packages/39/e9/61e4813b2c97e86b6fdbd4dd824bf72d28bcd8d4849b8084a357bc0dd64d/kiwisolver-1.4.9-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:ed0fecd28cc62c54b262e3736f8bb2512d8dcfdc2bcf08be5f47f96bf405b145", size = 2291817, upload-time = "2025-08-10T21:26:22.812Z" }, + { url = "https://files.pythonhosted.org/packages/a0/41/85d82b0291db7504da3c2defe35c9a8a5c9803a730f297bd823d11d5fb77/kiwisolver-1.4.9-cp312-cp312-win_amd64.whl", hash = "sha256:f68208a520c3d86ea51acf688a3e3002615a7f0238002cccc17affecc86a8a54", size = 73895, upload-time = "2025-08-10T21:26:24.37Z" }, + { url = "https://files.pythonhosted.org/packages/e2/92/5f3068cf15ee5cb624a0c7596e67e2a0bb2adee33f71c379054a491d07da/kiwisolver-1.4.9-cp312-cp312-win_arm64.whl", hash = "sha256:2c1a4f57df73965f3f14df20b80ee29e6a7930a57d2d9e8491a25f676e197c60", size = 64992, upload-time = "2025-08-10T21:26:25.732Z" }, +] + +[[package]] +name = "locket" +version = "1.0.0" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/2f/83/97b29fe05cb6ae28d2dbd30b81e2e402a3eed5f460c26e9eaa5895ceacf5/locket-1.0.0.tar.gz", hash = "sha256:5c0d4c052a8bbbf750e056a8e65ccd309086f4f0f18a2eac306a8dfa4112a632", size = 4350, upload-time = "2022-04-20T22:04:44.312Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/db/bc/83e112abc66cd466c6b83f99118035867cecd41802f8d044638aa78a106e/locket-1.0.0-py2.py3-none-any.whl", hash = "sha256:b6c819a722f7b6bd955b80781788e4a66a55628b858d347536b7e81325a3a5e3", size = 4398, upload-time = "2022-04-20T22:04:42.23Z" }, +] + +[[package]] +name = "mako" +version = "1.3.10" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "markupsafe" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/9e/38/bd5b78a920a64d708fe6bc8e0a2c075e1389d53bef8413725c63ba041535/mako-1.3.10.tar.gz", hash = "sha256:99579a6f39583fa7e5630a28c3c1f440e4e97a414b80372649c0ce338da2ea28", size = 392474, upload-time = "2025-04-10T12:44:31.16Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/87/fb/99f81ac72ae23375f22b7afdb7642aba97c00a713c217124420147681a2f/mako-1.3.10-py3-none-any.whl", hash = "sha256:baef24a52fc4fc514a0887ac600f9f1cff3d82c61d4d700a1fa84d597b88db59", size = 78509, upload-time = "2025-04-10T12:50:53.297Z" }, +] + +[[package]] +name = "markupsafe" +version = "3.0.2" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/b2/97/5d42485e71dfc078108a86d6de8fa46db44a1a9295e89c5d6d4a06e23a62/markupsafe-3.0.2.tar.gz", hash = "sha256:ee55d3edf80167e48ea11a923c7386f4669df67d7994554387f84e7d8b0a2bf0", size = 20537, upload-time = "2024-10-18T15:21:54.129Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/22/09/d1f21434c97fc42f09d290cbb6350d44eb12f09cc62c9476effdb33a18aa/MarkupSafe-3.0.2-cp312-cp312-macosx_10_13_universal2.whl", hash = "sha256:9778bd8ab0a994ebf6f84c2b949e65736d5575320a17ae8984a77fab08db94cf", size = 14274, upload-time = "2024-10-18T15:21:13.777Z" }, + { url = "https://files.pythonhosted.org/packages/6b/b0/18f76bba336fa5aecf79d45dcd6c806c280ec44538b3c13671d49099fdd0/MarkupSafe-3.0.2-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:846ade7b71e3536c4e56b386c2a47adf5741d2d8b94ec9dc3e92e5e1ee1e2225", size = 12348, upload-time = "2024-10-18T15:21:14.822Z" }, + { url = "https://files.pythonhosted.org/packages/e0/25/dd5c0f6ac1311e9b40f4af06c78efde0f3b5cbf02502f8ef9501294c425b/MarkupSafe-3.0.2-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:1c99d261bd2d5f6b59325c92c73df481e05e57f19837bdca8413b9eac4bd8028", size = 24149, upload-time = "2024-10-18T15:21:15.642Z" }, + { url = "https://files.pythonhosted.org/packages/f3/f0/89e7aadfb3749d0f52234a0c8c7867877876e0a20b60e2188e9850794c17/MarkupSafe-3.0.2-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:e17c96c14e19278594aa4841ec148115f9c7615a47382ecb6b82bd8fea3ab0c8", size = 23118, upload-time = "2024-10-18T15:21:17.133Z" }, + { url = "https://files.pythonhosted.org/packages/d5/da/f2eeb64c723f5e3777bc081da884b414671982008c47dcc1873d81f625b6/MarkupSafe-3.0.2-cp312-cp312-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:88416bd1e65dcea10bc7569faacb2c20ce071dd1f87539ca2ab364bf6231393c", size = 22993, upload-time = "2024-10-18T15:21:18.064Z" }, + { url = "https://files.pythonhosted.org/packages/da/0e/1f32af846df486dce7c227fe0f2398dc7e2e51d4a370508281f3c1c5cddc/MarkupSafe-3.0.2-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:2181e67807fc2fa785d0592dc2d6206c019b9502410671cc905d132a92866557", size = 24178, upload-time = "2024-10-18T15:21:18.859Z" }, + { url = "https://files.pythonhosted.org/packages/c4/f6/bb3ca0532de8086cbff5f06d137064c8410d10779c4c127e0e47d17c0b71/MarkupSafe-3.0.2-cp312-cp312-musllinux_1_2_i686.whl", hash = "sha256:52305740fe773d09cffb16f8ed0427942901f00adedac82ec8b67752f58a1b22", size = 23319, upload-time = "2024-10-18T15:21:19.671Z" }, + { url = "https://files.pythonhosted.org/packages/a2/82/8be4c96ffee03c5b4a034e60a31294daf481e12c7c43ab8e34a1453ee48b/MarkupSafe-3.0.2-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:ad10d3ded218f1039f11a75f8091880239651b52e9bb592ca27de44eed242a48", size = 23352, upload-time = "2024-10-18T15:21:20.971Z" }, + { url = "https://files.pythonhosted.org/packages/51/ae/97827349d3fcffee7e184bdf7f41cd6b88d9919c80f0263ba7acd1bbcb18/MarkupSafe-3.0.2-cp312-cp312-win32.whl", hash = "sha256:0f4ca02bea9a23221c0182836703cbf8930c5e9454bacce27e767509fa286a30", size = 15097, upload-time = "2024-10-18T15:21:22.646Z" }, + { url = "https://files.pythonhosted.org/packages/c1/80/a61f99dc3a936413c3ee4e1eecac96c0da5ed07ad56fd975f1a9da5bc630/MarkupSafe-3.0.2-cp312-cp312-win_amd64.whl", hash = "sha256:8e06879fc22a25ca47312fbe7c8264eb0b662f6db27cb2d3bbbc74b1df4b9b87", size = 15601, upload-time = "2024-10-18T15:21:23.499Z" }, +] + +[[package]] +name = "matplotlib" +version = "3.10.6" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "contourpy" }, + { name = "cycler" }, + { name = "fonttools" }, + { name = "kiwisolver" }, + { name = "numpy" }, + { name = "packaging" }, + { name = "pillow" }, + { name = "pyparsing" }, + { name = "python-dateutil" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/a0/59/c3e6453a9676ffba145309a73c462bb407f4400de7de3f2b41af70720a3c/matplotlib-3.10.6.tar.gz", hash = "sha256:ec01b645840dd1996df21ee37f208cd8ba57644779fa20464010638013d3203c", size = 34804264, upload-time = "2025-08-30T00:14:25.137Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/ea/1a/7042f7430055d567cc3257ac409fcf608599ab27459457f13772c2d9778b/matplotlib-3.10.6-cp312-cp312-macosx_10_13_x86_64.whl", hash = "sha256:31ca662df6a80bd426f871105fdd69db7543e28e73a9f2afe80de7e531eb2347", size = 8272404, upload-time = "2025-08-30T00:12:59.112Z" }, + { url = "https://files.pythonhosted.org/packages/a9/5d/1d5f33f5b43f4f9e69e6a5fe1fb9090936ae7bc8e2ff6158e7a76542633b/matplotlib-3.10.6-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:1678bb61d897bb4ac4757b5ecfb02bfb3fddf7f808000fb81e09c510712fda75", size = 8128262, upload-time = "2025-08-30T00:13:01.141Z" }, + { url = "https://files.pythonhosted.org/packages/67/c3/135fdbbbf84e0979712df58e5e22b4f257b3f5e52a3c4aacf1b8abec0d09/matplotlib-3.10.6-cp312-cp312-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:56cd2d20842f58c03d2d6e6c1f1cf5548ad6f66b91e1e48f814e4fb5abd1cb95", size = 8697008, upload-time = "2025-08-30T00:13:03.24Z" }, + { url = "https://files.pythonhosted.org/packages/9c/be/c443ea428fb2488a3ea7608714b1bd85a82738c45da21b447dc49e2f8e5d/matplotlib-3.10.6-cp312-cp312-manylinux_2_27_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:662df55604a2f9a45435566d6e2660e41efe83cd94f4288dfbf1e6d1eae4b0bb", size = 9530166, upload-time = "2025-08-30T00:13:05.951Z" }, + { url = "https://files.pythonhosted.org/packages/a9/35/48441422b044d74034aea2a3e0d1a49023f12150ebc58f16600132b9bbaf/matplotlib-3.10.6-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:08f141d55148cd1fc870c3387d70ca4df16dee10e909b3b038782bd4bda6ea07", size = 9593105, upload-time = "2025-08-30T00:13:08.356Z" }, + { url = "https://files.pythonhosted.org/packages/45/c3/994ef20eb4154ab84cc08d033834555319e4af970165e6c8894050af0b3c/matplotlib-3.10.6-cp312-cp312-win_amd64.whl", hash = "sha256:590f5925c2d650b5c9d813c5b3b5fc53f2929c3f8ef463e4ecfa7e052044fb2b", size = 8122784, upload-time = "2025-08-30T00:13:10.367Z" }, + { url = "https://files.pythonhosted.org/packages/57/b8/5c85d9ae0e40f04e71bedb053aada5d6bab1f9b5399a0937afb5d6b02d98/matplotlib-3.10.6-cp312-cp312-win_arm64.whl", hash = "sha256:f44c8d264a71609c79a78d50349e724f5d5fc3684ead7c2a473665ee63d868aa", size = 7992823, upload-time = "2025-08-30T00:13:12.24Z" }, +] + +[[package]] +name = "mlflow" +version = "3.3.2" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "alembic" }, + { name = "cryptography" }, + { name = "docker" }, + { name = "flask" }, + { name = "graphene" }, + { name = "gunicorn", marker = "sys_platform != 'win32'" }, + { name = "matplotlib" }, + { name = "mlflow-skinny" }, + { name = "mlflow-tracing" }, + { name = "numpy" }, + { name = "pandas" }, + { name = "pyarrow" }, + { name = "scikit-learn" }, + { name = "scipy" }, + { name = "sqlalchemy" }, + { name = "waitress", marker = "sys_platform == 'win32'" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/eb/e3/05447a6c0a00b7ede38832d5c05904ccb83ae991d5792222849b25067b65/mlflow-3.3.2.tar.gz", hash = "sha256:ab9a5ffda0c05c6ba40e3c1ba4beef8f29fef0d61454f8c9485b54b1ec3e6894", size = 25788411, upload-time = "2025-08-27T13:16:17.622Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/c7/16/57765db94704f001fe68a9d5545a883a8ccf6bb6629603c20736f2e388c4/mlflow-3.3.2-py3-none-any.whl", hash = "sha256:df2bfb11bf0ed3a39cf3cefd1a114ecdcd9c44291358b4b818e3bed50878b444", size = 26417769, upload-time = "2025-08-27T13:16:14.532Z" }, +] + +[[package]] +name = "mlflow-skinny" +version = "3.3.2" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "cachetools" }, + { name = "click" }, + { name = "cloudpickle" }, + { name = "databricks-sdk" }, + { name = "fastapi" }, + { name = "gitpython" }, + { name = "importlib-metadata" }, + { name = "opentelemetry-api" }, + { name = "opentelemetry-sdk" }, + { name = "packaging" }, + { name = "protobuf" }, + { name = "pydantic" }, + { name = "pyyaml" }, + { name = "requests" }, + { name = "sqlparse" }, + { name = "typing-extensions" }, + { name = "uvicorn" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/39/ef/94ce1a8db3ae3f94a73f6841fa804b3d2c48e177861cb0883ae367411996/mlflow_skinny-3.3.2.tar.gz", hash = "sha256:cf9ad0acb753bafdcdc60d9d18a7357f2627fb0c627ab3e3b97f632958a1008b", size = 1685895, upload-time = "2025-08-27T12:34:54.677Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/fb/06/805b94473d6222249b87fa415d047b34db1e6eb24385783b6d1bc89f6ea8/mlflow_skinny-3.3.2-py3-none-any.whl", hash = "sha256:e565b08de309b9716d4f89362e0a9217d82a3c28d8d553988e0eaad6cbfe4eea", size = 2024570, upload-time = "2025-08-27T12:34:52.563Z" }, +] + +[[package]] +name = "mlflow-tracing" +version = "3.3.2" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "cachetools" }, + { name = "databricks-sdk" }, + { name = "opentelemetry-api" }, + { name = "opentelemetry-sdk" }, + { name = "packaging" }, + { name = "protobuf" }, + { name = "pydantic" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/a2/23/bf70405a194906b4fa809be79dfa4486406385f5f578157d947fdc741ef5/mlflow_tracing-3.3.2.tar.gz", hash = "sha256:003ad9c66f884e8e8bb2f5d219b5be9bcd41bb65d77a7264d8aaada853d64050", size = 926718, upload-time = "2025-08-27T12:32:14.801Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/5e/86/396a17af4e994c7ffa65609739baddc17f4436aec9511478816e157a1bda/mlflow_tracing-3.3.2-py3-none-any.whl", hash = "sha256:9a3175fb3b069c9f541c7a60a663f482b3fcb4ca8f3583da3fdf036a50179e05", size = 1120520, upload-time = "2025-08-27T12:32:13.539Z" }, +] + +[[package]] +name = "narwhals" +version = "1.43.0" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/37/d9/ec1bd9f85d30de741b281ef24dabbf029122b638ea19456ffa1b1d862205/narwhals-1.43.0.tar.gz", hash = "sha256:5a28119401fccb4d344704f806438a983bb0a5b3f4a638760d25b1d521a18a79", size = 496455, upload-time = "2025-06-16T15:34:08.467Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/1e/8d/07b892f237491e03328de4c69c17ed8b99a5b6faf84575ca06b15cbf2674/narwhals-1.43.0-py3-none-any.whl", hash = "sha256:7accb0eae172f5697ada3635f46221dfcc98e9419f694df628c0745526d5c514", size = 362730, upload-time = "2025-06-16T15:34:06.455Z" }, +] + +[[package]] +name = "numcodecs" +version = "0.15.1" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "deprecated" }, + { name = "numpy" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/63/fc/bb532969eb8236984ba65e4f0079a7da885b8ac0ce1f0835decbb3938a62/numcodecs-0.15.1.tar.gz", hash = "sha256:eeed77e4d6636641a2cc605fbc6078c7a8f2cc40f3dfa2b3f61e52e6091b04ff", size = 6267275, upload-time = "2025-02-10T10:23:33.254Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/e7/7e/f12fc32d3beedc6a8f1ec69ea0ba72e93cb99c0350feed2cff5d04679bc3/numcodecs-0.15.1-cp312-cp312-macosx_10_13_x86_64.whl", hash = "sha256:b0a9d9cd29a0088220682dda4a9898321f7813ff7802be2bbb545f6e3d2f10ff", size = 1691889, upload-time = "2025-02-10T10:23:12.934Z" }, + { url = "https://files.pythonhosted.org/packages/81/38/88e40d40288b73c3b3a390ed5614a34b0661d00255bdd4cfb91c32101364/numcodecs-0.15.1-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:a34f0fe5e5f3b837bbedbeb98794a6d4a12eeeef8d4697b523905837900b5e1c", size = 1189149, upload-time = "2025-02-10T10:23:15.803Z" }, + { url = "https://files.pythonhosted.org/packages/28/7d/7527d9180bc76011d6163c848c9cf02cd28a623c2c66cf543e1e86de7c5e/numcodecs-0.15.1-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:c3a09e22140f2c691f7df26303ff8fa2dadcf26d7d0828398c0bc09b69e5efa3", size = 8879163, upload-time = "2025-02-10T10:23:18.582Z" }, + { url = "https://files.pythonhosted.org/packages/ab/bc/b6c3cde91c754860a3467a8c058dcf0b1a5ca14d82b1c5397c700cf8b1eb/numcodecs-0.15.1-cp312-cp312-win_amd64.whl", hash = "sha256:daed6066ffcf40082da847d318b5ab6123d69ceb433ba603cb87c323a541a8bc", size = 836785, upload-time = "2025-02-10T10:23:22.314Z" }, +] + +[[package]] +name = "numpy" +version = "2.3.0" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/f3/db/8e12381333aea300890829a0a36bfa738cac95475d88982d538725143fd9/numpy-2.3.0.tar.gz", hash = "sha256:581f87f9e9e9db2cba2141400e160e9dd644ee248788d6f90636eeb8fd9260a6", size = 20382813, upload-time = "2025-06-07T14:54:32.608Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/89/59/9df493df81ac6f76e9f05cdbe013cdb0c9a37b434f6e594f5bd25e278908/numpy-2.3.0-cp312-cp312-macosx_10_13_x86_64.whl", hash = "sha256:389b85335838155a9076e9ad7f8fdba0827496ec2d2dc32ce69ce7898bde03ba", size = 20897025, upload-time = "2025-06-07T14:40:33.558Z" }, + { url = "https://files.pythonhosted.org/packages/2f/86/4ff04335901d6cf3a6bb9c748b0097546ae5af35e455ae9b962ebff4ecd7/numpy-2.3.0-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:9498f60cd6bb8238d8eaf468a3d5bb031d34cd12556af53510f05fcf581c1b7e", size = 14129882, upload-time = "2025-06-07T14:40:55.034Z" }, + { url = "https://files.pythonhosted.org/packages/71/8d/a942cd4f959de7f08a79ab0c7e6cecb7431d5403dce78959a726f0f57aa1/numpy-2.3.0-cp312-cp312-macosx_14_0_arm64.whl", hash = "sha256:622a65d40d8eb427d8e722fd410ac3ad4958002f109230bc714fa551044ebae2", size = 5110181, upload-time = "2025-06-07T14:41:04.4Z" }, + { url = "https://files.pythonhosted.org/packages/86/5d/45850982efc7b2c839c5626fb67fbbc520d5b0d7c1ba1ae3651f2f74c296/numpy-2.3.0-cp312-cp312-macosx_14_0_x86_64.whl", hash = "sha256:b9446d9d8505aadadb686d51d838f2b6688c9e85636a0c3abaeb55ed54756459", size = 6647581, upload-time = "2025-06-07T14:41:14.695Z" }, + { url = "https://files.pythonhosted.org/packages/1a/c0/c871d4a83f93b00373d3eebe4b01525eee8ef10b623a335ec262b58f4dc1/numpy-2.3.0-cp312-cp312-manylinux_2_28_aarch64.whl", hash = "sha256:50080245365d75137a2bf46151e975de63146ae6d79f7e6bd5c0e85c9931d06a", size = 14262317, upload-time = "2025-06-07T14:41:35.862Z" }, + { url = "https://files.pythonhosted.org/packages/b7/f6/bc47f5fa666d5ff4145254f9e618d56e6a4ef9b874654ca74c19113bb538/numpy-2.3.0-cp312-cp312-manylinux_2_28_x86_64.whl", hash = "sha256:c24bb4113c66936eeaa0dc1e47c74770453d34f46ee07ae4efd853a2ed1ad10a", size = 16633919, upload-time = "2025-06-07T14:42:00.622Z" }, + { url = "https://files.pythonhosted.org/packages/f5/b4/65f48009ca0c9b76df5f404fccdea5a985a1bb2e34e97f21a17d9ad1a4ba/numpy-2.3.0-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:4d8d294287fdf685281e671886c6dcdf0291a7c19db3e5cb4178d07ccf6ecc67", size = 15567651, upload-time = "2025-06-07T14:42:24.429Z" }, + { url = "https://files.pythonhosted.org/packages/f1/62/5367855a2018578e9334ed08252ef67cc302e53edc869666f71641cad40b/numpy-2.3.0-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:6295f81f093b7f5769d1728a6bd8bf7466de2adfa771ede944ce6711382b89dc", size = 18361723, upload-time = "2025-06-07T14:42:51.167Z" }, + { url = "https://files.pythonhosted.org/packages/d4/75/5baed8cd867eabee8aad1e74d7197d73971d6a3d40c821f1848b8fab8b84/numpy-2.3.0-cp312-cp312-win32.whl", hash = "sha256:e6648078bdd974ef5d15cecc31b0c410e2e24178a6e10bf511e0557eed0f2570", size = 6318285, upload-time = "2025-06-07T14:43:02.052Z" }, + { url = "https://files.pythonhosted.org/packages/bc/49/d5781eaa1a15acb3b3a3f49dc9e2ff18d92d0ce5c2976f4ab5c0a7360250/numpy-2.3.0-cp312-cp312-win_amd64.whl", hash = "sha256:0898c67a58cdaaf29994bc0e2c65230fd4de0ac40afaf1584ed0b02cd74c6fdd", size = 12732594, upload-time = "2025-06-07T14:43:21.071Z" }, + { url = "https://files.pythonhosted.org/packages/c2/1c/6d343e030815c7c97a1f9fbad00211b47717c7fe446834c224bd5311e6f1/numpy-2.3.0-cp312-cp312-win_arm64.whl", hash = "sha256:bd8df082b6c4695753ad6193018c05aac465d634834dca47a3ae06d4bb22d9ea", size = 9891498, upload-time = "2025-06-07T14:43:36.332Z" }, +] + +[[package]] +name = "omegaconf" +version = "2.3.0" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "antlr4-python3-runtime" }, + { name = "pyyaml" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/09/48/6388f1bb9da707110532cb70ec4d2822858ddfb44f1cdf1233c20a80ea4b/omegaconf-2.3.0.tar.gz", hash = "sha256:d5d4b6d29955cc50ad50c46dc269bcd92c6e00f5f90d23ab5fee7bfca4ba4cc7", size = 3298120, upload-time = "2022-12-08T20:59:22.753Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/e3/94/1843518e420fa3ed6919835845df698c7e27e183cb997394e4a670973a65/omegaconf-2.3.0-py3-none-any.whl", hash = "sha256:7b4df175cdb08ba400f45cae3bdcae7ba8365db4d165fc65fd04b050ab63b46b", size = 79500, upload-time = "2022-12-08T20:59:19.686Z" }, +] + +[[package]] +name = "opentelemetry-api" +version = "1.36.0" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "importlib-metadata" }, + { name = "typing-extensions" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/27/d2/c782c88b8afbf961d6972428821c302bd1e9e7bc361352172f0ca31296e2/opentelemetry_api-1.36.0.tar.gz", hash = "sha256:9a72572b9c416d004d492cbc6e61962c0501eaf945ece9b5a0f56597d8348aa0", size = 64780, upload-time = "2025-07-29T15:12:06.02Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/bb/ee/6b08dde0a022c463b88f55ae81149584b125a42183407dc1045c486cc870/opentelemetry_api-1.36.0-py3-none-any.whl", hash = "sha256:02f20bcacf666e1333b6b1f04e647dc1d5111f86b8e510238fcc56d7762cda8c", size = 65564, upload-time = "2025-07-29T15:11:47.998Z" }, +] + +[[package]] +name = "opentelemetry-sdk" +version = "1.36.0" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "opentelemetry-api" }, + { name = "opentelemetry-semantic-conventions" }, + { name = "typing-extensions" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/4c/85/8567a966b85a2d3f971c4d42f781c305b2b91c043724fa08fd37d158e9dc/opentelemetry_sdk-1.36.0.tar.gz", hash = "sha256:19c8c81599f51b71670661ff7495c905d8fdf6976e41622d5245b791b06fa581", size = 162557, upload-time = "2025-07-29T15:12:16.76Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/0b/59/7bed362ad1137ba5886dac8439e84cd2df6d087be7c09574ece47ae9b22c/opentelemetry_sdk-1.36.0-py3-none-any.whl", hash = "sha256:19fe048b42e98c5c1ffe85b569b7073576ad4ce0bcb6e9b4c6a39e890a6c45fb", size = 119995, upload-time = "2025-07-29T15:12:03.181Z" }, +] + +[[package]] +name = "opentelemetry-semantic-conventions" +version = "0.57b0" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "opentelemetry-api" }, + { name = "typing-extensions" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/7e/31/67dfa252ee88476a29200b0255bda8dfc2cf07b56ad66dc9a6221f7dc787/opentelemetry_semantic_conventions-0.57b0.tar.gz", hash = "sha256:609a4a79c7891b4620d64c7aac6898f872d790d75f22019913a660756f27ff32", size = 124225, upload-time = "2025-07-29T15:12:17.873Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/05/75/7d591371c6c39c73de5ce5da5a2cc7b72d1d1cd3f8f4638f553c01c37b11/opentelemetry_semantic_conventions-0.57b0-py3-none-any.whl", hash = "sha256:757f7e76293294f124c827e514c2a3144f191ef175b069ce8d1211e1e38e9e78", size = 201627, upload-time = "2025-07-29T15:12:04.174Z" }, +] + +[[package]] +name = "packaging" +version = "25.0" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/a1/d4/1fc4078c65507b51b96ca8f8c3ba19e6a61c8253c72794544580a7b6c24d/packaging-25.0.tar.gz", hash = "sha256:d443872c98d677bf60f6a1f2f8c1cb748e8fe762d2bf9d3148b5599295b0fc4f", size = 165727, upload-time = "2025-04-19T11:48:59.673Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/20/12/38679034af332785aac8774540895e234f4d07f7545804097de4b666afd8/packaging-25.0-py3-none-any.whl", hash = "sha256:29572ef2b1f17581046b3a2227d5c611fb25ec70ca1ba8554b24b0e69331a484", size = 66469, upload-time = "2025-04-19T11:48:57.875Z" }, +] + +[[package]] +name = "pandas" +version = "2.3.0" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "numpy" }, + { name = "python-dateutil" }, + { name = "pytz" }, + { name = "tzdata" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/72/51/48f713c4c728d7c55ef7444ba5ea027c26998d96d1a40953b346438602fc/pandas-2.3.0.tar.gz", hash = "sha256:34600ab34ebf1131a7613a260a61dbe8b62c188ec0ea4c296da7c9a06b004133", size = 4484490, upload-time = "2025-06-05T03:27:54.133Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/94/46/24192607058dd607dbfacdd060a2370f6afb19c2ccb617406469b9aeb8e7/pandas-2.3.0-cp312-cp312-macosx_10_13_x86_64.whl", hash = "sha256:2eb4728a18dcd2908c7fccf74a982e241b467d178724545a48d0caf534b38ebf", size = 11573865, upload-time = "2025-06-05T03:26:46.774Z" }, + { url = "https://files.pythonhosted.org/packages/9f/cc/ae8ea3b800757a70c9fdccc68b67dc0280a6e814efcf74e4211fd5dea1ca/pandas-2.3.0-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:b9d8c3187be7479ea5c3d30c32a5d73d62a621166675063b2edd21bc47614027", size = 10702154, upload-time = "2025-06-05T16:50:14.439Z" }, + { url = "https://files.pythonhosted.org/packages/d8/ba/a7883d7aab3d24c6540a2768f679e7414582cc389876d469b40ec749d78b/pandas-2.3.0-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:9ff730713d4c4f2f1c860e36c005c7cefc1c7c80c21c0688fd605aa43c9fcf09", size = 11262180, upload-time = "2025-06-05T16:50:17.453Z" }, + { url = "https://files.pythonhosted.org/packages/01/a5/931fc3ad333d9d87b10107d948d757d67ebcfc33b1988d5faccc39c6845c/pandas-2.3.0-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:ba24af48643b12ffe49b27065d3babd52702d95ab70f50e1b34f71ca703e2c0d", size = 11991493, upload-time = "2025-06-05T03:26:51.813Z" }, + { url = "https://files.pythonhosted.org/packages/d7/bf/0213986830a92d44d55153c1d69b509431a972eb73f204242988c4e66e86/pandas-2.3.0-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:404d681c698e3c8a40a61d0cd9412cc7364ab9a9cc6e144ae2992e11a2e77a20", size = 12470733, upload-time = "2025-06-06T00:00:18.651Z" }, + { url = "https://files.pythonhosted.org/packages/a4/0e/21eb48a3a34a7d4bac982afc2c4eb5ab09f2d988bdf29d92ba9ae8e90a79/pandas-2.3.0-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:6021910b086b3ca756755e86ddc64e0ddafd5e58e076c72cb1585162e5ad259b", size = 13212406, upload-time = "2025-06-05T03:26:55.992Z" }, + { url = "https://files.pythonhosted.org/packages/1f/d9/74017c4eec7a28892d8d6e31ae9de3baef71f5a5286e74e6b7aad7f8c837/pandas-2.3.0-cp312-cp312-win_amd64.whl", hash = "sha256:094e271a15b579650ebf4c5155c05dcd2a14fd4fdd72cf4854b2f7ad31ea30be", size = 10976199, upload-time = "2025-06-05T03:26:59.594Z" }, +] + +[[package]] +name = "partd" +version = "1.4.2" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "locket" }, + { name = "toolz" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/b2/3a/3f06f34820a31257ddcabdfafc2672c5816be79c7e353b02c1f318daa7d4/partd-1.4.2.tar.gz", hash = "sha256:d022c33afbdc8405c226621b015e8067888173d85f7f5ecebb3cafed9a20f02c", size = 21029, upload-time = "2024-05-06T19:51:41.945Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/71/e7/40fb618334dcdf7c5a316c0e7343c5cd82d3d866edc100d98e29bc945ecd/partd-1.4.2-py3-none-any.whl", hash = "sha256:978e4ac767ec4ba5b86c6eaa52e5a2a3bc748a2ca839e8cc798f1cc6ce6efb0f", size = 18905, upload-time = "2024-05-06T19:51:39.271Z" }, +] + +[[package]] +name = "pillow" +version = "11.2.1" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/af/cb/bb5c01fcd2a69335b86c22142b2bccfc3464087efb7fd382eee5ffc7fdf7/pillow-11.2.1.tar.gz", hash = "sha256:a64dd61998416367b7ef979b73d3a85853ba9bec4c2925f74e588879a58716b6", size = 47026707, upload-time = "2025-04-12T17:50:03.289Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/c7/40/052610b15a1b8961f52537cc8326ca6a881408bc2bdad0d852edeb6ed33b/pillow-11.2.1-cp312-cp312-macosx_10_13_x86_64.whl", hash = "sha256:78afba22027b4accef10dbd5eed84425930ba41b3ea0a86fa8d20baaf19d807f", size = 3190185, upload-time = "2025-04-12T17:48:00.417Z" }, + { url = "https://files.pythonhosted.org/packages/e5/7e/b86dbd35a5f938632093dc40d1682874c33dcfe832558fc80ca56bfcb774/pillow-11.2.1-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:78092232a4ab376a35d68c4e6d5e00dfd73454bd12b230420025fbe178ee3b0b", size = 3030306, upload-time = "2025-04-12T17:48:02.391Z" }, + { url = "https://files.pythonhosted.org/packages/a4/5c/467a161f9ed53e5eab51a42923c33051bf8d1a2af4626ac04f5166e58e0c/pillow-11.2.1-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:25a5f306095c6780c52e6bbb6109624b95c5b18e40aab1c3041da3e9e0cd3e2d", size = 4416121, upload-time = "2025-04-12T17:48:04.554Z" }, + { url = "https://files.pythonhosted.org/packages/62/73/972b7742e38ae0e2ac76ab137ca6005dcf877480da0d9d61d93b613065b4/pillow-11.2.1-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:0c7b29dbd4281923a2bfe562acb734cee96bbb129e96e6972d315ed9f232bef4", size = 4501707, upload-time = "2025-04-12T17:48:06.831Z" }, + { url = "https://files.pythonhosted.org/packages/e4/3a/427e4cb0b9e177efbc1a84798ed20498c4f233abde003c06d2650a6d60cb/pillow-11.2.1-cp312-cp312-manylinux_2_28_aarch64.whl", hash = "sha256:3e645b020f3209a0181a418bffe7b4a93171eef6c4ef6cc20980b30bebf17b7d", size = 4522921, upload-time = "2025-04-12T17:48:09.229Z" }, + { url = "https://files.pythonhosted.org/packages/fe/7c/d8b1330458e4d2f3f45d9508796d7caf0c0d3764c00c823d10f6f1a3b76d/pillow-11.2.1-cp312-cp312-manylinux_2_28_x86_64.whl", hash = "sha256:b2dbea1012ccb784a65349f57bbc93730b96e85b42e9bf7b01ef40443db720b4", size = 4612523, upload-time = "2025-04-12T17:48:11.631Z" }, + { url = "https://files.pythonhosted.org/packages/b3/2f/65738384e0b1acf451de5a573d8153fe84103772d139e1e0bdf1596be2ea/pillow-11.2.1-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:da3104c57bbd72948d75f6a9389e6727d2ab6333c3617f0a89d72d4940aa0443", size = 4587836, upload-time = "2025-04-12T17:48:13.592Z" }, + { url = "https://files.pythonhosted.org/packages/6a/c5/e795c9f2ddf3debb2dedd0df889f2fe4b053308bb59a3cc02a0cd144d641/pillow-11.2.1-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:598174aef4589af795f66f9caab87ba4ff860ce08cd5bb447c6fc553ffee603c", size = 4669390, upload-time = "2025-04-12T17:48:15.938Z" }, + { url = "https://files.pythonhosted.org/packages/96/ae/ca0099a3995976a9fce2f423166f7bff9b12244afdc7520f6ed38911539a/pillow-11.2.1-cp312-cp312-win32.whl", hash = "sha256:1d535df14716e7f8776b9e7fee118576d65572b4aad3ed639be9e4fa88a1cad3", size = 2332309, upload-time = "2025-04-12T17:48:17.885Z" }, + { url = "https://files.pythonhosted.org/packages/7c/18/24bff2ad716257fc03da964c5e8f05d9790a779a8895d6566e493ccf0189/pillow-11.2.1-cp312-cp312-win_amd64.whl", hash = "sha256:14e33b28bf17c7a38eede290f77db7c664e4eb01f7869e37fa98a5aa95978941", size = 2676768, upload-time = "2025-04-12T17:48:19.655Z" }, + { url = "https://files.pythonhosted.org/packages/da/bb/e8d656c9543276517ee40184aaa39dcb41e683bca121022f9323ae11b39d/pillow-11.2.1-cp312-cp312-win_arm64.whl", hash = "sha256:21e1470ac9e5739ff880c211fc3af01e3ae505859392bf65458c224d0bf283eb", size = 2415087, upload-time = "2025-04-12T17:48:21.991Z" }, +] + +[[package]] +name = "plotly" +version = "6.1.2" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "narwhals" }, + { name = "packaging" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/ae/77/431447616eda6a432dc3ce541b3f808ecb8803ea3d4ab2573b67f8eb4208/plotly-6.1.2.tar.gz", hash = "sha256:4fdaa228926ba3e3a213f4d1713287e69dcad1a7e66cf2025bd7d7026d5014b4", size = 7662971, upload-time = "2025-05-27T20:21:52.56Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/bf/6f/759d5da0517547a5d38aabf05d04d9f8adf83391d2c7fc33f904417d3ba2/plotly-6.1.2-py3-none-any.whl", hash = "sha256:f1548a8ed9158d59e03d7fed548c7db5549f3130d9ae19293c8638c202648f6d", size = 16265530, upload-time = "2025-05-27T20:21:46.6Z" }, +] + +[[package]] +name = "pluggy" +version = "1.6.0" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/f9/e2/3e91f31a7d2b083fe6ef3fa267035b518369d9511ffab804f839851d2779/pluggy-1.6.0.tar.gz", hash = "sha256:7dcc130b76258d33b90f61b658791dede3486c3e6bfb003ee5c9bfb396dd22f3", size = 69412, upload-time = "2025-05-15T12:30:07.975Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/54/20/4d324d65cc6d9205fabedc306948156824eb9f0ee1633355a8f7ec5c66bf/pluggy-1.6.0-py3-none-any.whl", hash = "sha256:e920276dd6813095e9377c0bc5566d94c932c33b27a3e3945d8389c374dd4746", size = 20538, upload-time = "2025-05-15T12:30:06.134Z" }, +] + +[[package]] +name = "polars" +version = "1.30.0" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/82/b6/8dbdf626c0705a57f052708c9fc0860ffc2aa97955930d5faaf6a66fcfd3/polars-1.30.0.tar.gz", hash = "sha256:dfe94ae84a5efd9ba74e616e3e125b24ca155494a931890a8f17480737c4db45", size = 4668318, upload-time = "2025-05-21T13:33:24.175Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/40/48/e9b2cb379abcc9f7aff2e701098fcdb9fe6d85dc4ad4cec7b35d39c70951/polars-1.30.0-cp39-abi3-macosx_10_12_x86_64.whl", hash = "sha256:4c33bc97c29b7112f0e689a2f8a33143973a3ff466c70b25c7fd1880225de6dd", size = 35704342, upload-time = "2025-05-21T13:32:22.996Z" }, + { url = "https://files.pythonhosted.org/packages/36/ca/f545f61282f75eea4dfde4db2944963dcd59abd50c20e33a1c894da44dad/polars-1.30.0-cp39-abi3-macosx_11_0_arm64.whl", hash = "sha256:e3d05914c364b8e39a5b10dcf97e84d76e516b3b1693880bf189a93aab3ca00d", size = 32459857, upload-time = "2025-05-21T13:32:27.728Z" }, + { url = "https://files.pythonhosted.org/packages/76/20/e018cd87d7cb6f8684355f31f4e193222455a6e8f7b942f4a2934f5969c7/polars-1.30.0-cp39-abi3-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:1a52af3862082b868c1febeae650af8ae8a2105d2cb28f0449179a7b44f54ccf", size = 36267243, upload-time = "2025-05-21T13:32:31.796Z" }, + { url = "https://files.pythonhosted.org/packages/cb/e7/b88b973021be07b13d91b9301cc14392c994225ef5107a32a8ffd3fd6424/polars-1.30.0-cp39-abi3-manylinux_2_24_aarch64.whl", hash = "sha256:ffb3ef133454275d4254442257c5f71dd6e393ce365c97997dadeb6fa9d6d4b5", size = 33416871, upload-time = "2025-05-21T13:32:35.077Z" }, + { url = "https://files.pythonhosted.org/packages/dd/7c/d46d4381adeac537b8520b653dc30cb8b7edbf59883d71fbb989e9005de1/polars-1.30.0-cp39-abi3-win_amd64.whl", hash = "sha256:c26b633a9bd530c5fc09d317fca3bb3e16c772bd7df7549a9d8ec1934773cc5d", size = 36363630, upload-time = "2025-05-21T13:32:38.286Z" }, + { url = "https://files.pythonhosted.org/packages/fb/b5/5056d0c12aadb57390d0627492bef8b1abf3549474abb9ae0fd4e2bfa885/polars-1.30.0-cp39-abi3-win_arm64.whl", hash = "sha256:476f1bde65bc7b4d9f80af370645c2981b5798d67c151055e58534e89e96f2a8", size = 32643590, upload-time = "2025-05-21T13:32:42.107Z" }, +] + +[[package]] +name = "protobuf" +version = "6.31.1" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/52/f3/b9655a711b32c19720253f6f06326faf90580834e2e83f840472d752bc8b/protobuf-6.31.1.tar.gz", hash = "sha256:d8cac4c982f0b957a4dc73a80e2ea24fab08e679c0de9deb835f4a12d69aca9a", size = 441797, upload-time = "2025-05-28T19:25:54.947Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/f3/6f/6ab8e4bf962fd5570d3deaa2d5c38f0a363f57b4501047b5ebeb83ab1125/protobuf-6.31.1-cp310-abi3-win32.whl", hash = "sha256:7fa17d5a29c2e04b7d90e5e32388b8bfd0e7107cd8e616feef7ed3fa6bdab5c9", size = 423603, upload-time = "2025-05-28T19:25:41.198Z" }, + { url = "https://files.pythonhosted.org/packages/44/3a/b15c4347dd4bf3a1b0ee882f384623e2063bb5cf9fa9d57990a4f7df2fb6/protobuf-6.31.1-cp310-abi3-win_amd64.whl", hash = "sha256:426f59d2964864a1a366254fa703b8632dcec0790d8862d30034d8245e1cd447", size = 435283, upload-time = "2025-05-28T19:25:44.275Z" }, + { url = "https://files.pythonhosted.org/packages/6a/c9/b9689a2a250264a84e66c46d8862ba788ee7a641cdca39bccf64f59284b7/protobuf-6.31.1-cp39-abi3-macosx_10_9_universal2.whl", hash = "sha256:6f1227473dc43d44ed644425268eb7c2e488ae245d51c6866d19fe158e207402", size = 425604, upload-time = "2025-05-28T19:25:45.702Z" }, + { url = "https://files.pythonhosted.org/packages/76/a1/7a5a94032c83375e4fe7e7f56e3976ea6ac90c5e85fac8576409e25c39c3/protobuf-6.31.1-cp39-abi3-manylinux2014_aarch64.whl", hash = "sha256:a40fc12b84c154884d7d4c4ebd675d5b3b5283e155f324049ae396b95ddebc39", size = 322115, upload-time = "2025-05-28T19:25:47.128Z" }, + { url = "https://files.pythonhosted.org/packages/fa/b1/b59d405d64d31999244643d88c45c8241c58f17cc887e73bcb90602327f8/protobuf-6.31.1-cp39-abi3-manylinux2014_x86_64.whl", hash = "sha256:4ee898bf66f7a8b0bd21bce523814e6fbd8c6add948045ce958b73af7e8878c6", size = 321070, upload-time = "2025-05-28T19:25:50.036Z" }, + { url = "https://files.pythonhosted.org/packages/f7/af/ab3c51ab7507a7325e98ffe691d9495ee3d3aa5f589afad65ec920d39821/protobuf-6.31.1-py3-none-any.whl", hash = "sha256:720a6c7e6b77288b85063569baae8536671b39f15cc22037ec7045658d80489e", size = 168724, upload-time = "2025-05-28T19:25:53.926Z" }, +] + +[[package]] +name = "pyarrow" +version = "20.0.0" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/a2/ee/a7810cb9f3d6e9238e61d312076a9859bf3668fd21c69744de9532383912/pyarrow-20.0.0.tar.gz", hash = "sha256:febc4a913592573c8d5805091a6c2b5064c8bd6e002131f01061797d91c783c1", size = 1125187, upload-time = "2025-04-27T12:34:23.264Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/a1/d6/0c10e0d54f6c13eb464ee9b67a68b8c71bcf2f67760ef5b6fbcddd2ab05f/pyarrow-20.0.0-cp312-cp312-macosx_12_0_arm64.whl", hash = "sha256:75a51a5b0eef32727a247707d4755322cb970be7e935172b6a3a9f9ae98404ba", size = 30815067, upload-time = "2025-04-27T12:29:44.384Z" }, + { url = "https://files.pythonhosted.org/packages/7e/e2/04e9874abe4094a06fd8b0cbb0f1312d8dd7d707f144c2ec1e5e8f452ffa/pyarrow-20.0.0-cp312-cp312-macosx_12_0_x86_64.whl", hash = "sha256:211d5e84cecc640c7a3ab900f930aaff5cd2702177e0d562d426fb7c4f737781", size = 32297128, upload-time = "2025-04-27T12:29:52.038Z" }, + { url = "https://files.pythonhosted.org/packages/31/fd/c565e5dcc906a3b471a83273039cb75cb79aad4a2d4a12f76cc5ae90a4b8/pyarrow-20.0.0-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:4ba3cf4182828be7a896cbd232aa8dd6a31bd1f9e32776cc3796c012855e1199", size = 41334890, upload-time = "2025-04-27T12:29:59.452Z" }, + { url = "https://files.pythonhosted.org/packages/af/a9/3bdd799e2c9b20c1ea6dc6fa8e83f29480a97711cf806e823f808c2316ac/pyarrow-20.0.0-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:2c3a01f313ffe27ac4126f4c2e5ea0f36a5fc6ab51f8726cf41fee4b256680bd", size = 42421775, upload-time = "2025-04-27T12:30:06.875Z" }, + { url = "https://files.pythonhosted.org/packages/10/f7/da98ccd86354c332f593218101ae56568d5dcedb460e342000bd89c49cc1/pyarrow-20.0.0-cp312-cp312-manylinux_2_28_aarch64.whl", hash = "sha256:a2791f69ad72addd33510fec7bb14ee06c2a448e06b649e264c094c5b5f7ce28", size = 40687231, upload-time = "2025-04-27T12:30:13.954Z" }, + { url = "https://files.pythonhosted.org/packages/bb/1b/2168d6050e52ff1e6cefc61d600723870bf569cbf41d13db939c8cf97a16/pyarrow-20.0.0-cp312-cp312-manylinux_2_28_x86_64.whl", hash = "sha256:4250e28a22302ce8692d3a0e8ec9d9dde54ec00d237cff4dfa9c1fbf79e472a8", size = 42295639, upload-time = "2025-04-27T12:30:21.949Z" }, + { url = "https://files.pythonhosted.org/packages/b2/66/2d976c0c7158fd25591c8ca55aee026e6d5745a021915a1835578707feb3/pyarrow-20.0.0-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:89e030dc58fc760e4010148e6ff164d2f44441490280ef1e97a542375e41058e", size = 42908549, upload-time = "2025-04-27T12:30:29.551Z" }, + { url = "https://files.pythonhosted.org/packages/31/a9/dfb999c2fc6911201dcbf348247f9cc382a8990f9ab45c12eabfd7243a38/pyarrow-20.0.0-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:6102b4864d77102dbbb72965618e204e550135a940c2534711d5ffa787df2a5a", size = 44557216, upload-time = "2025-04-27T12:30:36.977Z" }, + { url = "https://files.pythonhosted.org/packages/a0/8e/9adee63dfa3911be2382fb4d92e4b2e7d82610f9d9f668493bebaa2af50f/pyarrow-20.0.0-cp312-cp312-win_amd64.whl", hash = "sha256:96d6a0a37d9c98be08f5ed6a10831d88d52cac7b13f5287f1e0f625a0de8062b", size = 25660496, upload-time = "2025-04-27T12:30:42.809Z" }, +] + +[[package]] +name = "pyasn1" +version = "0.6.1" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/ba/e9/01f1a64245b89f039897cb0130016d79f77d52669aae6ee7b159a6c4c018/pyasn1-0.6.1.tar.gz", hash = "sha256:6f580d2bdd84365380830acf45550f2511469f673cb4a5ae3857a3170128b034", size = 145322, upload-time = "2024-09-10T22:41:42.55Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/c8/f1/d6a797abb14f6283c0ddff96bbdd46937f64122b8c925cab503dd37f8214/pyasn1-0.6.1-py3-none-any.whl", hash = "sha256:0d632f46f2ba09143da3a8afe9e33fb6f92fa2320ab7e886e2d0f7672af84629", size = 83135, upload-time = "2024-09-11T16:00:36.122Z" }, +] + +[[package]] +name = "pyasn1-modules" +version = "0.4.2" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "pyasn1" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/e9/e6/78ebbb10a8c8e4b61a59249394a4a594c1a7af95593dc933a349c8d00964/pyasn1_modules-0.4.2.tar.gz", hash = "sha256:677091de870a80aae844b1ca6134f54652fa2c8c5a52aa396440ac3106e941e6", size = 307892, upload-time = "2025-03-28T02:41:22.17Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/47/8d/d529b5d697919ba8c11ad626e835d4039be708a35b0d22de83a269a6682c/pyasn1_modules-0.4.2-py3-none-any.whl", hash = "sha256:29253a9207ce32b64c3ac6600edc75368f98473906e8fd1043bd6b5b1de2c14a", size = 181259, upload-time = "2025-03-28T02:41:19.028Z" }, +] + +[[package]] +name = "pycparser" +version = "2.22" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/1d/b2/31537cf4b1ca988837256c910a668b553fceb8f069bedc4b1c826024b52c/pycparser-2.22.tar.gz", hash = "sha256:491c8be9c040f5390f5bf44a5b07752bd07f56edf992381b05c701439eec10f6", size = 172736, upload-time = "2024-03-30T13:22:22.564Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/13/a3/a812df4e2dd5696d1f351d58b8fe16a405b234ad2886a0dab9183fb78109/pycparser-2.22-py3-none-any.whl", hash = "sha256:c3702b6d3dd8c7abc1afa565d7e63d53a1d0bd86cdc24edd75470f4de499cfcc", size = 117552, upload-time = "2024-03-30T13:22:20.476Z" }, +] + +[[package]] +name = "pydantic" +version = "2.11.7" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "annotated-types" }, + { name = "pydantic-core" }, + { name = "typing-extensions" }, + { name = "typing-inspection" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/00/dd/4325abf92c39ba8623b5af936ddb36ffcfe0beae70405d456ab1fb2f5b8c/pydantic-2.11.7.tar.gz", hash = "sha256:d989c3c6cb79469287b1569f7447a17848c998458d49ebe294e975b9baf0f0db", size = 788350, upload-time = "2025-06-14T08:33:17.137Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/6a/c0/ec2b1c8712ca690e5d61979dee872603e92b8a32f94cc1b72d53beab008a/pydantic-2.11.7-py3-none-any.whl", hash = "sha256:dde5df002701f6de26248661f6835bbe296a47bf73990135c7d07ce741b9623b", size = 444782, upload-time = "2025-06-14T08:33:14.905Z" }, +] + +[[package]] +name = "pydantic-core" +version = "2.33.2" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "typing-extensions" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/ad/88/5f2260bdfae97aabf98f1778d43f69574390ad787afb646292a638c923d4/pydantic_core-2.33.2.tar.gz", hash = "sha256:7cb8bc3605c29176e1b105350d2e6474142d7c1bd1d9327c4a9bdb46bf827acc", size = 435195, upload-time = "2025-04-23T18:33:52.104Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/18/8a/2b41c97f554ec8c71f2a8a5f85cb56a8b0956addfe8b0efb5b3d77e8bdc3/pydantic_core-2.33.2-cp312-cp312-macosx_10_12_x86_64.whl", hash = "sha256:a7ec89dc587667f22b6a0b6579c249fca9026ce7c333fc142ba42411fa243cdc", size = 2009000, upload-time = "2025-04-23T18:31:25.863Z" }, + { url = "https://files.pythonhosted.org/packages/a1/02/6224312aacb3c8ecbaa959897af57181fb6cf3a3d7917fd44d0f2917e6f2/pydantic_core-2.33.2-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:3c6db6e52c6d70aa0d00d45cdb9b40f0433b96380071ea80b09277dba021ddf7", size = 1847996, upload-time = "2025-04-23T18:31:27.341Z" }, + { url = "https://files.pythonhosted.org/packages/d6/46/6dcdf084a523dbe0a0be59d054734b86a981726f221f4562aed313dbcb49/pydantic_core-2.33.2-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:4e61206137cbc65e6d5256e1166f88331d3b6238e082d9f74613b9b765fb9025", size = 1880957, upload-time = "2025-04-23T18:31:28.956Z" }, + { url = "https://files.pythonhosted.org/packages/ec/6b/1ec2c03837ac00886ba8160ce041ce4e325b41d06a034adbef11339ae422/pydantic_core-2.33.2-cp312-cp312-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:eb8c529b2819c37140eb51b914153063d27ed88e3bdc31b71198a198e921e011", size = 1964199, upload-time = "2025-04-23T18:31:31.025Z" }, + { url = "https://files.pythonhosted.org/packages/2d/1d/6bf34d6adb9debd9136bd197ca72642203ce9aaaa85cfcbfcf20f9696e83/pydantic_core-2.33.2-cp312-cp312-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:c52b02ad8b4e2cf14ca7b3d918f3eb0ee91e63b3167c32591e57c4317e134f8f", size = 2120296, upload-time = "2025-04-23T18:31:32.514Z" }, + { url = "https://files.pythonhosted.org/packages/e0/94/2bd0aaf5a591e974b32a9f7123f16637776c304471a0ab33cf263cf5591a/pydantic_core-2.33.2-cp312-cp312-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:96081f1605125ba0855dfda83f6f3df5ec90c61195421ba72223de35ccfb2f88", size = 2676109, upload-time = "2025-04-23T18:31:33.958Z" }, + { url = "https://files.pythonhosted.org/packages/f9/41/4b043778cf9c4285d59742281a769eac371b9e47e35f98ad321349cc5d61/pydantic_core-2.33.2-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:8f57a69461af2a5fa6e6bbd7a5f60d3b7e6cebb687f55106933188e79ad155c1", size = 2002028, upload-time = "2025-04-23T18:31:39.095Z" }, + { url = "https://files.pythonhosted.org/packages/cb/d5/7bb781bf2748ce3d03af04d5c969fa1308880e1dca35a9bd94e1a96a922e/pydantic_core-2.33.2-cp312-cp312-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:572c7e6c8bb4774d2ac88929e3d1f12bc45714ae5ee6d9a788a9fb35e60bb04b", size = 2100044, upload-time = "2025-04-23T18:31:41.034Z" }, + { url = "https://files.pythonhosted.org/packages/fe/36/def5e53e1eb0ad896785702a5bbfd25eed546cdcf4087ad285021a90ed53/pydantic_core-2.33.2-cp312-cp312-musllinux_1_1_aarch64.whl", hash = "sha256:db4b41f9bd95fbe5acd76d89920336ba96f03e149097365afe1cb092fceb89a1", size = 2058881, upload-time = "2025-04-23T18:31:42.757Z" }, + { url = "https://files.pythonhosted.org/packages/01/6c/57f8d70b2ee57fc3dc8b9610315949837fa8c11d86927b9bb044f8705419/pydantic_core-2.33.2-cp312-cp312-musllinux_1_1_armv7l.whl", hash = "sha256:fa854f5cf7e33842a892e5c73f45327760bc7bc516339fda888c75ae60edaeb6", size = 2227034, upload-time = "2025-04-23T18:31:44.304Z" }, + { url = "https://files.pythonhosted.org/packages/27/b9/9c17f0396a82b3d5cbea4c24d742083422639e7bb1d5bf600e12cb176a13/pydantic_core-2.33.2-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:5f483cfb75ff703095c59e365360cb73e00185e01aaea067cd19acffd2ab20ea", size = 2234187, upload-time = "2025-04-23T18:31:45.891Z" }, + { url = "https://files.pythonhosted.org/packages/b0/6a/adf5734ffd52bf86d865093ad70b2ce543415e0e356f6cacabbc0d9ad910/pydantic_core-2.33.2-cp312-cp312-win32.whl", hash = "sha256:9cb1da0f5a471435a7bc7e439b8a728e8b61e59784b2af70d7c169f8dd8ae290", size = 1892628, upload-time = "2025-04-23T18:31:47.819Z" }, + { url = "https://files.pythonhosted.org/packages/43/e4/5479fecb3606c1368d496a825d8411e126133c41224c1e7238be58b87d7e/pydantic_core-2.33.2-cp312-cp312-win_amd64.whl", hash = "sha256:f941635f2a3d96b2973e867144fde513665c87f13fe0e193c158ac51bfaaa7b2", size = 1955866, upload-time = "2025-04-23T18:31:49.635Z" }, + { url = "https://files.pythonhosted.org/packages/0d/24/8b11e8b3e2be9dd82df4b11408a67c61bb4dc4f8e11b5b0fc888b38118b5/pydantic_core-2.33.2-cp312-cp312-win_arm64.whl", hash = "sha256:cca3868ddfaccfbc4bfb1d608e2ccaaebe0ae628e1416aeb9c4d88c001bb45ab", size = 1888894, upload-time = "2025-04-23T18:31:51.609Z" }, +] + +[[package]] +name = "pydeck" +version = "0.9.1" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "jinja2" }, + { name = "numpy" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/a1/ca/40e14e196864a0f61a92abb14d09b3d3da98f94ccb03b49cf51688140dab/pydeck-0.9.1.tar.gz", hash = "sha256:f74475ae637951d63f2ee58326757f8d4f9cd9f2a457cf42950715003e2cb605", size = 3832240, upload-time = "2024-05-10T15:36:21.153Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/ab/4c/b888e6cf58bd9db9c93f40d1c6be8283ff49d88919231afe93a6bcf61626/pydeck-0.9.1-py2.py3-none-any.whl", hash = "sha256:b3f75ba0d273fc917094fa61224f3f6076ca8752b93d46faf3bcfd9f9d59b038", size = 6900403, upload-time = "2024-05-10T15:36:17.36Z" }, +] + +[[package]] +name = "pyerfa" +version = "2.0.1.5" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "numpy" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/71/39/63cc8291b0cf324ae710df41527faf7d331bce573899199d926b3e492260/pyerfa-2.0.1.5.tar.gz", hash = "sha256:17d6b24fe4846c65d5e7d8c362dcb08199dc63b30a236aedd73875cc83e1f6c0", size = 818430, upload-time = "2024-11-11T15:22:30.852Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/7d/d9/3448a57cb5bd19950de6d6ab08bd8fbb3df60baa71726de91d73d76c481b/pyerfa-2.0.1.5-cp39-abi3-macosx_10_9_x86_64.whl", hash = "sha256:b282d7c60c4c47cf629c484c17ac504fcb04abd7b3f4dfcf53ee042afc3a5944", size = 341818, upload-time = "2024-11-11T15:22:16.467Z" }, + { url = "https://files.pythonhosted.org/packages/11/4a/31a363370478b63c6289a34743f2ba2d3ae1bd8223e004d18ab28fb92385/pyerfa-2.0.1.5-cp39-abi3-macosx_11_0_arm64.whl", hash = "sha256:be1aeb70390dd03a34faf96749d5cabc58437410b4aab7213c512323932427df", size = 329370, upload-time = "2024-11-11T15:22:17.829Z" }, + { url = "https://files.pythonhosted.org/packages/cb/96/b6210fc624123c8ae13e1eecb68fb75e3f3adff216d95eee1c7b05843e3e/pyerfa-2.0.1.5-cp39-abi3-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:b0603e8e1b839327d586c8a627cdc634b795e18b007d84f0cda5500a0908254e", size = 692794, upload-time = "2024-11-11T15:22:19.429Z" }, + { url = "https://files.pythonhosted.org/packages/e5/e0/050018d855d26d3c0b4a7d1b2ed692be758ce276d8289e2a2b44ba1014a5/pyerfa-2.0.1.5-cp39-abi3-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:0e43c7194e3242083f2350b46c09fd4bf8ba1bcc0ebd1460b98fc47fe2389906", size = 738711, upload-time = "2024-11-11T15:22:20.661Z" }, + { url = "https://files.pythonhosted.org/packages/b9/f5/ff91ee77308793ae32fa1e1de95e9edd4551456dd888b4e87c5938657ca5/pyerfa-2.0.1.5-cp39-abi3-musllinux_1_2_x86_64.whl", hash = "sha256:07b80cd70701f5d066b1ac8cce406682cfcd667a1186ec7d7ade597239a6021d", size = 722966, upload-time = "2024-11-11T15:22:21.905Z" }, + { url = "https://files.pythonhosted.org/packages/2c/56/b22b35c8551d2228ff8d445e63787112927ca13f6dc9e2c04f69d742c95b/pyerfa-2.0.1.5-cp39-abi3-win32.whl", hash = "sha256:d30b9b0df588ed5467e529d851ea324a67239096dd44703125072fd11b351ea2", size = 339955, upload-time = "2024-11-11T15:22:23.087Z" }, + { url = "https://files.pythonhosted.org/packages/b4/11/97233cf23ad5411ac6f13b1d6ee3888f90ace4f974d9bf9db887aa428912/pyerfa-2.0.1.5-cp39-abi3-win_amd64.whl", hash = "sha256:66292d437dcf75925b694977aa06eb697126e7b86553e620371ed3e48b5e0ad0", size = 349410, upload-time = "2024-11-11T15:22:24.817Z" }, +] + +[[package]] +name = "pyjwt" +version = "2.10.1" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/e7/46/bd74733ff231675599650d3e47f361794b22ef3e3770998dda30d3b63726/pyjwt-2.10.1.tar.gz", hash = "sha256:3cc5772eb20009233caf06e9d8a0577824723b44e6648ee0a2aedb6cf9381953", size = 87785, upload-time = "2024-11-28T03:43:29.933Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/61/ad/689f02752eeec26aed679477e80e632ef1b682313be70793d798c1d5fc8f/PyJWT-2.10.1-py3-none-any.whl", hash = "sha256:dcdd193e30abefd5debf142f9adfcdd2b58004e644f25406ffaebd50bd98dacb", size = 22997, upload-time = "2024-11-28T03:43:27.893Z" }, +] + +[[package]] +name = "pyparsing" +version = "3.2.5" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/f2/a5/181488fc2b9d093e3972d2a472855aae8a03f000592dbfce716a512b3359/pyparsing-3.2.5.tar.gz", hash = "sha256:2df8d5b7b2802ef88e8d016a2eb9c7aeaa923529cd251ed0fe4608275d4105b6", size = 1099274, upload-time = "2025-09-21T04:11:06.277Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/10/5e/1aa9a93198c6b64513c9d7752de7422c06402de6600a8767da1524f9570b/pyparsing-3.2.5-py3-none-any.whl", hash = "sha256:e38a4f02064cf41fe6593d328d0512495ad1f3d8a91c4f73fc401b3079a59a5e", size = 113890, upload-time = "2025-09-21T04:11:04.117Z" }, +] + +[[package]] +name = "pyrefly" +version = "0.36.0" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/47/67/c5983b8cd002132b9d0e4ecbe096c04bbd86bd6898ee1f3eb63413f0b07e/pyrefly-0.36.0.tar.gz", hash = "sha256:f0fbadae9e6fadbf078eeafaa1c415ec1ede863a621132ecaad45ed316a944a8", size = 1670536, upload-time = "2025-10-06T17:50:28.856Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/52/09/a5ac35332359f1882e83062660db0361034352353a5aad49668148deec4c/pyrefly-0.36.0-py3-none-macosx_10_12_x86_64.whl", hash = "sha256:614d6f6597024f2cbc2bd6c26b9123f6020bb22f0f905b9698f0786131e1afd4", size = 6803144, upload-time = "2025-10-06T17:50:13.38Z" }, + { url = "https://files.pythonhosted.org/packages/4c/ca/1cefd294d57977e1e2711da38dbd2f7636a454adaa8a21227db97f8dc83b/pyrefly-0.36.0-py3-none-macosx_11_0_arm64.whl", hash = "sha256:bdd5ce618ddea4d47982a2223b1f67196fa774cd2e223bfb85b202ad15144afb", size = 6350887, upload-time = "2025-10-06T17:50:15.75Z" }, + { url = "https://files.pythonhosted.org/packages/9e/85/2abcf08a8e663a231394b3ce9ee31257521dde0b53289bf3553ccf960320/pyrefly-0.36.0-py3-none-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:4cabdf10811c4af9d7960a7450a2d59c7e5d0ae1a4c19ffb1bbc7d81c00f4862", size = 6593770, upload-time = "2025-10-06T17:50:17.74Z" }, + { url = "https://files.pythonhosted.org/packages/6e/ca/1b2832a28a64e5145a070143dfaeaf4bb9083fe35b7bf179e0c84a8aa7e9/pyrefly-0.36.0-py3-none-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:222d49512d6c832a2cf661847d8e709a9b00a570f68ece2173bd840128385296", size = 7424514, upload-time = "2025-10-06T17:50:19.744Z" }, + { url = "https://files.pythonhosted.org/packages/73/23/fbcf6e094df39902cbc3733e8edf8e3ac2bb4875b48050e2f514622af54b/pyrefly-0.36.0-py3-none-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:937f79660786dc670bccba55279db347c1f3a51869af50e5602a84f8eeac4688", size = 7079712, upload-time = "2025-10-06T17:50:21.908Z" }, + { url = "https://files.pythonhosted.org/packages/e4/90/62c11f6324dd6d9990e05067758b6380cee7abf4589be8c51bc1786fa504/pyrefly-0.36.0-py3-none-win32.whl", hash = "sha256:f8683df8e3635de882163fdb7c74c643a5f2387a8f1c9f40ef36aba7722d697c", size = 6607047, upload-time = "2025-10-06T17:50:23.923Z" }, + { url = "https://files.pythonhosted.org/packages/d6/a3/44c7764dfab004fc4221ed326b9032ac446f26ce1231169d155007e02697/pyrefly-0.36.0-py3-none-win_amd64.whl", hash = "sha256:3327948305b37efcf7e30db74fd29c9b7e6e6d5d2e16effdb2508d611ff69fca", size = 7034208, upload-time = "2025-10-06T17:50:25.697Z" }, + { url = "https://files.pythonhosted.org/packages/e8/4b/e98f462612e3a335fb7cefb35d581d7fdd8ddce92f050a78b721b5855a19/pyrefly-0.36.0-py3-none-win_arm64.whl", hash = "sha256:e2fcb3a1f27418f71585e2300d8addc1d63aeb77f022cac1edae489f19da7a4b", size = 6626879, upload-time = "2025-10-06T17:50:27.312Z" }, +] + +[[package]] +name = "pytest" +version = "8.3.5" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "colorama", marker = "sys_platform == 'win32'" }, + { name = "iniconfig" }, + { name = "packaging" }, + { name = "pluggy" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/ae/3c/c9d525a414d506893f0cd8a8d0de7706446213181570cdbd766691164e40/pytest-8.3.5.tar.gz", hash = "sha256:f4efe70cc14e511565ac476b57c279e12a855b11f48f212af1080ef2263d3845", size = 1450891, upload-time = "2025-03-02T12:54:54.503Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/30/3d/64ad57c803f1fa1e963a7946b6e0fea4a70df53c1a7fed304586539c2bac/pytest-8.3.5-py3-none-any.whl", hash = "sha256:c69214aa47deac29fad6c2a4f590b9c4a9fdb16a403176fe154b79c0b4d4d820", size = 343634, upload-time = "2025-03-02T12:54:52.069Z" }, +] + +[[package]] +name = "pytest-mock" +version = "3.15.1" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "pytest" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/68/14/eb014d26be205d38ad5ad20d9a80f7d201472e08167f0bb4361e251084a9/pytest_mock-3.15.1.tar.gz", hash = "sha256:1849a238f6f396da19762269de72cb1814ab44416fa73a8686deac10b0d87a0f", size = 34036, upload-time = "2025-09-16T16:37:27.081Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/5a/cc/06253936f4a7fa2e0f48dfe6d851d9c56df896a9ab09ac019d70b760619c/pytest_mock-3.15.1-py3-none-any.whl", hash = "sha256:0a25e2eb88fe5168d535041d09a4529a188176ae608a6d249ee65abc0949630d", size = 10095, upload-time = "2025-09-16T16:37:25.734Z" }, +] + +[[package]] +name = "python-dateutil" +version = "2.9.0.post0" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "six" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/66/c0/0c8b6ad9f17a802ee498c46e004a0eb49bc148f2fd230864601a86dcf6db/python-dateutil-2.9.0.post0.tar.gz", hash = "sha256:37dd54208da7e1cd875388217d5e00ebd4179249f90fb72437e91a35459a0ad3", size = 342432, upload-time = "2024-03-01T18:36:20.211Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/ec/57/56b9bcc3c9c6a792fcbaf139543cee77261f3651ca9da0c93f5c1221264b/python_dateutil-2.9.0.post0-py2.py3-none-any.whl", hash = "sha256:a8b2bc7bffae282281c8140a97d3aa9c14da0b136dfe83f850eea9a5f7470427", size = 229892, upload-time = "2024-03-01T18:36:18.57Z" }, +] + +[[package]] +name = "pytz" +version = "2025.2" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/f8/bf/abbd3cdfb8fbc7fb3d4d38d320f2441b1e7cbe29be4f23797b4a2b5d8aac/pytz-2025.2.tar.gz", hash = "sha256:360b9e3dbb49a209c21ad61809c7fb453643e048b38924c765813546746e81c3", size = 320884, upload-time = "2025-03-25T02:25:00.538Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/81/c4/34e93fe5f5429d7570ec1fa436f1986fb1f00c3e0f43a589fe2bbcd22c3f/pytz-2025.2-py2.py3-none-any.whl", hash = "sha256:5ddf76296dd8c44c26eb8f4b6f35488f3ccbf6fbbd7adee0b7262d43f0ec2f00", size = 509225, upload-time = "2025-03-25T02:24:58.468Z" }, +] + +[[package]] +name = "pywin32" +version = "311" +source = { registry = "https://pypi.org/simple" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/e7/ab/01ea1943d4eba0f850c3c61e78e8dd59757ff815ff3ccd0a84de5f541f42/pywin32-311-cp312-cp312-win32.whl", hash = "sha256:750ec6e621af2b948540032557b10a2d43b0cee2ae9758c54154d711cc852d31", size = 8706543, upload-time = "2025-07-14T20:13:20.765Z" }, + { url = "https://files.pythonhosted.org/packages/d1/a8/a0e8d07d4d051ec7502cd58b291ec98dcc0c3fff027caad0470b72cfcc2f/pywin32-311-cp312-cp312-win_amd64.whl", hash = "sha256:b8c095edad5c211ff31c05223658e71bf7116daa0ecf3ad85f3201ea3190d067", size = 9495040, upload-time = "2025-07-14T20:13:22.543Z" }, + { url = "https://files.pythonhosted.org/packages/ba/3a/2ae996277b4b50f17d61f0603efd8253cb2d79cc7ae159468007b586396d/pywin32-311-cp312-cp312-win_arm64.whl", hash = "sha256:e286f46a9a39c4a18b319c28f59b61de793654af2f395c102b4f819e584b5852", size = 8710102, upload-time = "2025-07-14T20:13:24.682Z" }, +] + +[[package]] +name = "pyyaml" +version = "6.0.2" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/54/ed/79a089b6be93607fa5cdaedf301d7dfb23af5f25c398d5ead2525b063e17/pyyaml-6.0.2.tar.gz", hash = "sha256:d584d9ec91ad65861cc08d42e834324ef890a082e591037abe114850ff7bbc3e", size = 130631, upload-time = "2024-08-06T20:33:50.674Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/86/0c/c581167fc46d6d6d7ddcfb8c843a4de25bdd27e4466938109ca68492292c/PyYAML-6.0.2-cp312-cp312-macosx_10_9_x86_64.whl", hash = "sha256:c70c95198c015b85feafc136515252a261a84561b7b1d51e3384e0655ddf25ab", size = 183873, upload-time = "2024-08-06T20:32:25.131Z" }, + { url = "https://files.pythonhosted.org/packages/a8/0c/38374f5bb272c051e2a69281d71cba6fdb983413e6758b84482905e29a5d/PyYAML-6.0.2-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:ce826d6ef20b1bc864f0a68340c8b3287705cae2f8b4b1d932177dcc76721725", size = 173302, upload-time = "2024-08-06T20:32:26.511Z" }, + { url = "https://files.pythonhosted.org/packages/c3/93/9916574aa8c00aa06bbac729972eb1071d002b8e158bd0e83a3b9a20a1f7/PyYAML-6.0.2-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:1f71ea527786de97d1a0cc0eacd1defc0985dcf6b3f17bb77dcfc8c34bec4dc5", size = 739154, upload-time = "2024-08-06T20:32:28.363Z" }, + { url = "https://files.pythonhosted.org/packages/95/0f/b8938f1cbd09739c6da569d172531567dbcc9789e0029aa070856f123984/PyYAML-6.0.2-cp312-cp312-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:9b22676e8097e9e22e36d6b7bda33190d0d400f345f23d4065d48f4ca7ae0425", size = 766223, upload-time = "2024-08-06T20:32:30.058Z" }, + { url = "https://files.pythonhosted.org/packages/b9/2b/614b4752f2e127db5cc206abc23a8c19678e92b23c3db30fc86ab731d3bd/PyYAML-6.0.2-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:80bab7bfc629882493af4aa31a4cfa43a4c57c83813253626916b8c7ada83476", size = 767542, upload-time = "2024-08-06T20:32:31.881Z" }, + { url = "https://files.pythonhosted.org/packages/d4/00/dd137d5bcc7efea1836d6264f049359861cf548469d18da90cd8216cf05f/PyYAML-6.0.2-cp312-cp312-musllinux_1_1_aarch64.whl", hash = "sha256:0833f8694549e586547b576dcfaba4a6b55b9e96098b36cdc7ebefe667dfed48", size = 731164, upload-time = "2024-08-06T20:32:37.083Z" }, + { url = "https://files.pythonhosted.org/packages/c9/1f/4f998c900485e5c0ef43838363ba4a9723ac0ad73a9dc42068b12aaba4e4/PyYAML-6.0.2-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:8b9c7197f7cb2738065c481a0461e50ad02f18c78cd75775628afb4d7137fb3b", size = 756611, upload-time = "2024-08-06T20:32:38.898Z" }, + { url = "https://files.pythonhosted.org/packages/df/d1/f5a275fdb252768b7a11ec63585bc38d0e87c9e05668a139fea92b80634c/PyYAML-6.0.2-cp312-cp312-win32.whl", hash = "sha256:ef6107725bd54b262d6dedcc2af448a266975032bc85ef0172c5f059da6325b4", size = 140591, upload-time = "2024-08-06T20:32:40.241Z" }, + { url = "https://files.pythonhosted.org/packages/0c/e8/4f648c598b17c3d06e8753d7d13d57542b30d56e6c2dedf9c331ae56312e/PyYAML-6.0.2-cp312-cp312-win_amd64.whl", hash = "sha256:7e7401d0de89a9a855c839bc697c079a4af81cf878373abd7dc625847d25cbd8", size = 156338, upload-time = "2024-08-06T20:32:41.93Z" }, +] + +[[package]] +name = "referencing" +version = "0.36.2" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "attrs" }, + { name = "rpds-py" }, + { name = "typing-extensions" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/2f/db/98b5c277be99dd18bfd91dd04e1b759cad18d1a338188c936e92f921c7e2/referencing-0.36.2.tar.gz", hash = "sha256:df2e89862cd09deabbdba16944cc3f10feb6b3e6f18e902f7cc25609a34775aa", size = 74744, upload-time = "2025-01-25T08:48:16.138Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/c1/b1/3baf80dc6d2b7bc27a95a67752d0208e410351e3feb4eb78de5f77454d8d/referencing-0.36.2-py3-none-any.whl", hash = "sha256:e8699adbbf8b5c7de96d8ffa0eb5c158b3beafce084968e2ea8bb08c6794dcd0", size = 26775, upload-time = "2025-01-25T08:48:14.241Z" }, +] + +[[package]] +name = "requests" +version = "2.32.4" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "certifi" }, + { name = "charset-normalizer" }, + { name = "idna" }, + { name = "urllib3" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/e1/0a/929373653770d8a0d7ea76c37de6e41f11eb07559b103b1c02cafb3f7cf8/requests-2.32.4.tar.gz", hash = "sha256:27d0316682c8a29834d3264820024b62a36942083d52caf2f14c0591336d3422", size = 135258, upload-time = "2025-06-09T16:43:07.34Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/7c/e4/56027c4a6b4ae70ca9de302488c5ca95ad4a39e190093d6c1a8ace08341b/requests-2.32.4-py3-none-any.whl", hash = "sha256:27babd3cda2a6d50b30443204ee89830707d396671944c998b5975b031ac2b2c", size = 64847, upload-time = "2025-06-09T16:43:05.728Z" }, +] + +[[package]] +name = "rpds-py" +version = "0.25.1" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/8c/a6/60184b7fc00dd3ca80ac635dd5b8577d444c57e8e8742cecabfacb829921/rpds_py-0.25.1.tar.gz", hash = "sha256:8960b6dac09b62dac26e75d7e2c4a22efb835d827a7278c34f72b2b84fa160e3", size = 27304, upload-time = "2025-05-21T12:46:12.502Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/7f/81/28ab0408391b1dc57393653b6a0cf2014cc282cc2909e4615e63e58262be/rpds_py-0.25.1-cp312-cp312-macosx_10_12_x86_64.whl", hash = "sha256:b5ffe453cde61f73fea9430223c81d29e2fbf412a6073951102146c84e19e34c", size = 364647, upload-time = "2025-05-21T12:43:28.559Z" }, + { url = "https://files.pythonhosted.org/packages/2c/9a/7797f04cad0d5e56310e1238434f71fc6939d0bc517192a18bb99a72a95f/rpds_py-0.25.1-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:115874ae5e2fdcfc16b2aedc95b5eef4aebe91b28e7e21951eda8a5dc0d3461b", size = 350454, upload-time = "2025-05-21T12:43:30.615Z" }, + { url = "https://files.pythonhosted.org/packages/69/3c/93d2ef941b04898011e5d6eaa56a1acf46a3b4c9f4b3ad1bbcbafa0bee1f/rpds_py-0.25.1-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:a714bf6e5e81b0e570d01f56e0c89c6375101b8463999ead3a93a5d2a4af91fa", size = 389665, upload-time = "2025-05-21T12:43:32.629Z" }, + { url = "https://files.pythonhosted.org/packages/c1/57/ad0e31e928751dde8903a11102559628d24173428a0f85e25e187defb2c1/rpds_py-0.25.1-cp312-cp312-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:35634369325906bcd01577da4c19e3b9541a15e99f31e91a02d010816b49bfda", size = 403873, upload-time = "2025-05-21T12:43:34.576Z" }, + { url = "https://files.pythonhosted.org/packages/16/ad/c0c652fa9bba778b4f54980a02962748479dc09632e1fd34e5282cf2556c/rpds_py-0.25.1-cp312-cp312-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:d4cb2b3ddc16710548801c6fcc0cfcdeeff9dafbc983f77265877793f2660309", size = 525866, upload-time = "2025-05-21T12:43:36.123Z" }, + { url = "https://files.pythonhosted.org/packages/2a/39/3e1839bc527e6fcf48d5fec4770070f872cdee6c6fbc9b259932f4e88a38/rpds_py-0.25.1-cp312-cp312-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:9ceca1cf097ed77e1a51f1dbc8d174d10cb5931c188a4505ff9f3e119dfe519b", size = 416886, upload-time = "2025-05-21T12:43:38.034Z" }, + { url = "https://files.pythonhosted.org/packages/7a/95/dd6b91cd4560da41df9d7030a038298a67d24f8ca38e150562644c829c48/rpds_py-0.25.1-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:2c2cd1a4b0c2b8c5e31ffff50d09f39906fe351389ba143c195566056c13a7ea", size = 390666, upload-time = "2025-05-21T12:43:40.065Z" }, + { url = "https://files.pythonhosted.org/packages/64/48/1be88a820e7494ce0a15c2d390ccb7c52212370badabf128e6a7bb4cb802/rpds_py-0.25.1-cp312-cp312-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:1de336a4b164c9188cb23f3703adb74a7623ab32d20090d0e9bf499a2203ad65", size = 425109, upload-time = "2025-05-21T12:43:42.263Z" }, + { url = "https://files.pythonhosted.org/packages/cf/07/3e2a17927ef6d7720b9949ec1b37d1e963b829ad0387f7af18d923d5cfa5/rpds_py-0.25.1-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:9fca84a15333e925dd59ce01da0ffe2ffe0d6e5d29a9eeba2148916d1824948c", size = 567244, upload-time = "2025-05-21T12:43:43.846Z" }, + { url = "https://files.pythonhosted.org/packages/d2/e5/76cf010998deccc4f95305d827847e2eae9c568099c06b405cf96384762b/rpds_py-0.25.1-cp312-cp312-musllinux_1_2_i686.whl", hash = "sha256:88ec04afe0c59fa64e2f6ea0dd9657e04fc83e38de90f6de201954b4d4eb59bd", size = 596023, upload-time = "2025-05-21T12:43:45.932Z" }, + { url = "https://files.pythonhosted.org/packages/52/9a/df55efd84403736ba37a5a6377b70aad0fd1cb469a9109ee8a1e21299a1c/rpds_py-0.25.1-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:a8bd2f19e312ce3e1d2c635618e8a8d8132892bb746a7cf74780a489f0f6cdcb", size = 561634, upload-time = "2025-05-21T12:43:48.263Z" }, + { url = "https://files.pythonhosted.org/packages/ab/aa/dc3620dd8db84454aaf9374bd318f1aa02578bba5e567f5bf6b79492aca4/rpds_py-0.25.1-cp312-cp312-win32.whl", hash = "sha256:e5e2f7280d8d0d3ef06f3ec1b4fd598d386cc6f0721e54f09109a8132182fbfe", size = 222713, upload-time = "2025-05-21T12:43:49.897Z" }, + { url = "https://files.pythonhosted.org/packages/a3/7f/7cef485269a50ed5b4e9bae145f512d2a111ca638ae70cc101f661b4defd/rpds_py-0.25.1-cp312-cp312-win_amd64.whl", hash = "sha256:db58483f71c5db67d643857404da360dce3573031586034b7d59f245144cc192", size = 235280, upload-time = "2025-05-21T12:43:51.893Z" }, + { url = "https://files.pythonhosted.org/packages/99/f2/c2d64f6564f32af913bf5f3f7ae41c7c263c5ae4c4e8f1a17af8af66cd46/rpds_py-0.25.1-cp312-cp312-win_arm64.whl", hash = "sha256:6d50841c425d16faf3206ddbba44c21aa3310a0cebc3c1cdfc3e3f4f9f6f5728", size = 225399, upload-time = "2025-05-21T12:43:53.351Z" }, +] + +[[package]] +name = "rsa" +version = "4.9.1" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "pyasn1" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/da/8a/22b7beea3ee0d44b1916c0c1cb0ee3af23b700b6da9f04991899d0c555d4/rsa-4.9.1.tar.gz", hash = "sha256:e7bdbfdb5497da4c07dfd35530e1a902659db6ff241e39d9953cad06ebd0ae75", size = 29034, upload-time = "2025-04-16T09:51:18.218Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/64/8d/0133e4eb4beed9e425d9a98ed6e081a55d195481b7632472be1af08d2f6b/rsa-4.9.1-py3-none-any.whl", hash = "sha256:68635866661c6836b8d39430f97a996acbd61bfa49406748ea243539fe239762", size = 34696, upload-time = "2025-04-16T09:51:17.142Z" }, +] + +[[package]] +name = "ruff" +version = "0.9.7" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/39/8b/a86c300359861b186f18359adf4437ac8e4c52e42daa9eedc731ef9d5b53/ruff-0.9.7.tar.gz", hash = "sha256:643757633417907510157b206e490c3aa11cab0c087c912f60e07fbafa87a4c6", size = 3669813, upload-time = "2025-02-20T13:26:52.111Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/b1/f3/3a1d22973291226df4b4e2ff70196b926b6f910c488479adb0eeb42a0d7f/ruff-0.9.7-py3-none-linux_armv6l.whl", hash = "sha256:99d50def47305fe6f233eb8dabfd60047578ca87c9dcb235c9723ab1175180f4", size = 11774588, upload-time = "2025-02-20T13:25:52.253Z" }, + { url = "https://files.pythonhosted.org/packages/8e/c9/b881f4157b9b884f2994fd08ee92ae3663fb24e34b0372ac3af999aa7fc6/ruff-0.9.7-py3-none-macosx_10_12_x86_64.whl", hash = "sha256:d59105ae9c44152c3d40a9c40d6331a7acd1cdf5ef404fbe31178a77b174ea66", size = 11746848, upload-time = "2025-02-20T13:25:57.279Z" }, + { url = "https://files.pythonhosted.org/packages/14/89/2f546c133f73886ed50a3d449e6bf4af27d92d2f960a43a93d89353f0945/ruff-0.9.7-py3-none-macosx_11_0_arm64.whl", hash = "sha256:f313b5800483770bd540cddac7c90fc46f895f427b7820f18fe1822697f1fec9", size = 11177525, upload-time = "2025-02-20T13:26:00.007Z" }, + { url = "https://files.pythonhosted.org/packages/d7/93/6b98f2c12bf28ab9def59c50c9c49508519c5b5cfecca6de871cf01237f6/ruff-0.9.7-py3-none-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:042ae32b41343888f59c0a4148f103208bf6b21c90118d51dc93a68366f4e903", size = 11996580, upload-time = "2025-02-20T13:26:03.274Z" }, + { url = "https://files.pythonhosted.org/packages/8e/3f/b3fcaf4f6d875e679ac2b71a72f6691a8128ea3cb7be07cbb249f477c061/ruff-0.9.7-py3-none-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:87862589373b33cc484b10831004e5e5ec47dc10d2b41ba770e837d4f429d721", size = 11525674, upload-time = "2025-02-20T13:26:06.073Z" }, + { url = "https://files.pythonhosted.org/packages/f0/48/33fbf18defb74d624535d5d22adcb09a64c9bbabfa755bc666189a6b2210/ruff-0.9.7-py3-none-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:a17e1e01bee0926d351a1ee9bc15c445beae888f90069a6192a07a84af544b6b", size = 12739151, upload-time = "2025-02-20T13:26:08.964Z" }, + { url = "https://files.pythonhosted.org/packages/63/b5/7e161080c5e19fa69495cbab7c00975ef8a90f3679caa6164921d7f52f4a/ruff-0.9.7-py3-none-manylinux_2_17_ppc64.manylinux2014_ppc64.whl", hash = "sha256:7c1f880ac5b2cbebd58b8ebde57069a374865c73f3bf41f05fe7a179c1c8ef22", size = 13416128, upload-time = "2025-02-20T13:26:12.54Z" }, + { url = "https://files.pythonhosted.org/packages/4e/c8/b5e7d61fb1c1b26f271ac301ff6d9de5e4d9a9a63f67d732fa8f200f0c88/ruff-0.9.7-py3-none-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:e63fc20143c291cab2841dbb8260e96bafbe1ba13fd3d60d28be2c71e312da49", size = 12870858, upload-time = "2025-02-20T13:26:16.794Z" }, + { url = "https://files.pythonhosted.org/packages/da/cb/2a1a8e4e291a54d28259f8fc6a674cd5b8833e93852c7ef5de436d6ed729/ruff-0.9.7-py3-none-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:91ff963baed3e9a6a4eba2a02f4ca8eaa6eba1cc0521aec0987da8d62f53cbef", size = 14786046, upload-time = "2025-02-20T13:26:19.85Z" }, + { url = "https://files.pythonhosted.org/packages/ca/6c/c8f8a313be1943f333f376d79724260da5701426c0905762e3ddb389e3f4/ruff-0.9.7-py3-none-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:88362e3227c82f63eaebf0b2eff5b88990280fb1ecf7105523883ba8c3aaf6fb", size = 12550834, upload-time = "2025-02-20T13:26:23.082Z" }, + { url = "https://files.pythonhosted.org/packages/9d/ad/f70cf5e8e7c52a25e166bdc84c082163c9c6f82a073f654c321b4dff9660/ruff-0.9.7-py3-none-musllinux_1_2_aarch64.whl", hash = "sha256:0372c5a90349f00212270421fe91874b866fd3626eb3b397ede06cd385f6f7e0", size = 11961307, upload-time = "2025-02-20T13:26:26.738Z" }, + { url = "https://files.pythonhosted.org/packages/52/d5/4f303ea94a5f4f454daf4d02671b1fbfe2a318b5fcd009f957466f936c50/ruff-0.9.7-py3-none-musllinux_1_2_armv7l.whl", hash = "sha256:d76b8ab60e99e6424cd9d3d923274a1324aefce04f8ea537136b8398bbae0a62", size = 11612039, upload-time = "2025-02-20T13:26:30.26Z" }, + { url = "https://files.pythonhosted.org/packages/eb/c8/bd12a23a75603c704ce86723be0648ba3d4ecc2af07eecd2e9fa112f7e19/ruff-0.9.7-py3-none-musllinux_1_2_i686.whl", hash = "sha256:0c439bdfc8983e1336577f00e09a4e7a78944fe01e4ea7fe616d00c3ec69a3d0", size = 12168177, upload-time = "2025-02-20T13:26:33.452Z" }, + { url = "https://files.pythonhosted.org/packages/cc/57/d648d4f73400fef047d62d464d1a14591f2e6b3d4a15e93e23a53c20705d/ruff-0.9.7-py3-none-musllinux_1_2_x86_64.whl", hash = "sha256:115d1f15e8fdd445a7b4dc9a30abae22de3f6bcabeb503964904471691ef7606", size = 12610122, upload-time = "2025-02-20T13:26:37.365Z" }, + { url = "https://files.pythonhosted.org/packages/49/79/acbc1edd03ac0e2a04ae2593555dbc9990b34090a9729a0c4c0cf20fb595/ruff-0.9.7-py3-none-win32.whl", hash = "sha256:e9ece95b7de5923cbf38893f066ed2872be2f2f477ba94f826c8defdd6ec6b7d", size = 9988751, upload-time = "2025-02-20T13:26:40.366Z" }, + { url = "https://files.pythonhosted.org/packages/6d/95/67153a838c6b6ba7a2401241fd8a00cd8c627a8e4a0491b8d853dedeffe0/ruff-0.9.7-py3-none-win_amd64.whl", hash = "sha256:3770fe52b9d691a15f0b87ada29c45324b2ace8f01200fb0c14845e499eb0c2c", size = 11002987, upload-time = "2025-02-20T13:26:43.762Z" }, + { url = "https://files.pythonhosted.org/packages/63/6a/aca01554949f3a401991dc32fe22837baeaccb8a0d868256cbb26a029778/ruff-0.9.7-py3-none-win_arm64.whl", hash = "sha256:b075a700b2533feb7a01130ff656a4ec0d5f340bb540ad98759b8401c32c2037", size = 10177763, upload-time = "2025-02-20T13:26:48.92Z" }, +] + +[[package]] +name = "s3transfer" +version = "0.10.4" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "botocore" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/c0/0a/1cdbabf9edd0ea7747efdf6c9ab4e7061b085aa7f9bfc36bb1601563b069/s3transfer-0.10.4.tar.gz", hash = "sha256:29edc09801743c21eb5ecbc617a152df41d3c287f67b615f73e5f750583666a7", size = 145287, upload-time = "2024-11-20T21:06:05.981Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/66/05/7957af15543b8c9799209506df4660cba7afc4cf94bfb60513827e96bed6/s3transfer-0.10.4-py3-none-any.whl", hash = "sha256:244a76a24355363a68164241438de1b72f8781664920260c48465896b712a41e", size = 83175, upload-time = "2024-11-20T21:06:03.961Z" }, +] + +[[package]] +name = "scikit-learn" +version = "1.7.2" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "joblib" }, + { name = "numpy" }, + { name = "scipy" }, + { name = "threadpoolctl" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/98/c2/a7855e41c9d285dfe86dc50b250978105dce513d6e459ea66a6aeb0e1e0c/scikit_learn-1.7.2.tar.gz", hash = "sha256:20e9e49ecd130598f1ca38a1d85090e1a600147b9c02fa6f15d69cb53d968fda", size = 7193136, upload-time = "2025-09-09T08:21:29.075Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/a7/aa/3996e2196075689afb9fce0410ebdb4a09099d7964d061d7213700204409/scikit_learn-1.7.2-cp312-cp312-macosx_10_13_x86_64.whl", hash = "sha256:8d91a97fa2b706943822398ab943cde71858a50245e31bc71dba62aab1d60a96", size = 9259818, upload-time = "2025-09-09T08:20:43.19Z" }, + { url = "https://files.pythonhosted.org/packages/43/5d/779320063e88af9c4a7c2cf463ff11c21ac9c8bd730c4a294b0000b666c9/scikit_learn-1.7.2-cp312-cp312-macosx_12_0_arm64.whl", hash = "sha256:acbc0f5fd2edd3432a22c69bed78e837c70cf896cd7993d71d51ba6708507476", size = 8636997, upload-time = "2025-09-09T08:20:45.468Z" }, + { url = "https://files.pythonhosted.org/packages/5c/d0/0c577d9325b05594fdd33aa970bf53fb673f051a45496842caee13cfd7fe/scikit_learn-1.7.2-cp312-cp312-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:e5bf3d930aee75a65478df91ac1225ff89cd28e9ac7bd1196853a9229b6adb0b", size = 9478381, upload-time = "2025-09-09T08:20:47.982Z" }, + { url = "https://files.pythonhosted.org/packages/82/70/8bf44b933837ba8494ca0fc9a9ab60f1c13b062ad0197f60a56e2fc4c43e/scikit_learn-1.7.2-cp312-cp312-manylinux_2_27_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:b4d6e9deed1a47aca9fe2f267ab8e8fe82ee20b4526b2c0cd9e135cea10feb44", size = 9300296, upload-time = "2025-09-09T08:20:50.366Z" }, + { url = "https://files.pythonhosted.org/packages/c6/99/ed35197a158f1fdc2fe7c3680e9c70d0128f662e1fee4ed495f4b5e13db0/scikit_learn-1.7.2-cp312-cp312-win_amd64.whl", hash = "sha256:6088aa475f0785e01bcf8529f55280a3d7d298679f50c0bb70a2364a82d0b290", size = 8731256, upload-time = "2025-09-09T08:20:52.627Z" }, +] + +[[package]] +name = "scipy" +version = "1.16.2" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "numpy" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/4c/3b/546a6f0bfe791bbb7f8d591613454d15097e53f906308ec6f7c1ce588e8e/scipy-1.16.2.tar.gz", hash = "sha256:af029b153d243a80afb6eabe40b0a07f8e35c9adc269c019f364ad747f826a6b", size = 30580599, upload-time = "2025-09-11T17:48:08.271Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/b7/8d/6396e00db1282279a4ddd507c5f5e11f606812b608ee58517ce8abbf883f/scipy-1.16.2-cp312-cp312-macosx_10_14_x86_64.whl", hash = "sha256:89d6c100fa5c48472047632e06f0876b3c4931aac1f4291afc81a3644316bb0d", size = 36646259, upload-time = "2025-09-11T17:40:39.329Z" }, + { url = "https://files.pythonhosted.org/packages/3b/93/ea9edd7e193fceb8eef149804491890bde73fb169c896b61aa3e2d1e4e77/scipy-1.16.2-cp312-cp312-macosx_12_0_arm64.whl", hash = "sha256:ca748936cd579d3f01928b30a17dc474550b01272d8046e3e1ee593f23620371", size = 28888976, upload-time = "2025-09-11T17:40:46.82Z" }, + { url = "https://files.pythonhosted.org/packages/91/4d/281fddc3d80fd738ba86fd3aed9202331180b01e2c78eaae0642f22f7e83/scipy-1.16.2-cp312-cp312-macosx_14_0_arm64.whl", hash = "sha256:fac4f8ce2ddb40e2e3d0f7ec36d2a1e7f92559a2471e59aec37bd8d9de01fec0", size = 20879905, upload-time = "2025-09-11T17:40:52.545Z" }, + { url = "https://files.pythonhosted.org/packages/69/40/b33b74c84606fd301b2915f0062e45733c6ff5708d121dd0deaa8871e2d0/scipy-1.16.2-cp312-cp312-macosx_14_0_x86_64.whl", hash = "sha256:033570f1dcefd79547a88e18bccacff025c8c647a330381064f561d43b821232", size = 23553066, upload-time = "2025-09-11T17:40:59.014Z" }, + { url = "https://files.pythonhosted.org/packages/55/a7/22c739e2f21a42cc8f16bc76b47cff4ed54fbe0962832c589591c2abec34/scipy-1.16.2-cp312-cp312-manylinux2014_aarch64.manylinux_2_17_aarch64.whl", hash = "sha256:ea3421209bf00c8a5ef2227de496601087d8f638a2363ee09af059bd70976dc1", size = 33336407, upload-time = "2025-09-11T17:41:06.796Z" }, + { url = "https://files.pythonhosted.org/packages/53/11/a0160990b82999b45874dc60c0c183d3a3a969a563fffc476d5a9995c407/scipy-1.16.2-cp312-cp312-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:f66bd07ba6f84cd4a380b41d1bf3c59ea488b590a2ff96744845163309ee8e2f", size = 35673281, upload-time = "2025-09-11T17:41:15.055Z" }, + { url = "https://files.pythonhosted.org/packages/96/53/7ef48a4cfcf243c3d0f1643f5887c81f29fdf76911c4e49331828e19fc0a/scipy-1.16.2-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:5e9feab931bd2aea4a23388c962df6468af3d808ddf2d40f94a81c5dc38f32ef", size = 36004222, upload-time = "2025-09-11T17:41:23.868Z" }, + { url = "https://files.pythonhosted.org/packages/49/7f/71a69e0afd460049d41c65c630c919c537815277dfea214031005f474d78/scipy-1.16.2-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:03dfc75e52f72cf23ec2ced468645321407faad8f0fe7b1f5b49264adbc29cb1", size = 38664586, upload-time = "2025-09-11T17:41:31.021Z" }, + { url = "https://files.pythonhosted.org/packages/34/95/20e02ca66fb495a95fba0642fd48e0c390d0ece9b9b14c6e931a60a12dea/scipy-1.16.2-cp312-cp312-win_amd64.whl", hash = "sha256:0ce54e07bbb394b417457409a64fd015be623f36e330ac49306433ffe04bc97e", size = 38550641, upload-time = "2025-09-11T17:41:36.61Z" }, + { url = "https://files.pythonhosted.org/packages/92/ad/13646b9beb0a95528ca46d52b7babafbe115017814a611f2065ee4e61d20/scipy-1.16.2-cp312-cp312-win_arm64.whl", hash = "sha256:2a8ffaa4ac0df81a0b94577b18ee079f13fecdb924df3328fc44a7dc5ac46851", size = 25456070, upload-time = "2025-09-11T17:41:41.3Z" }, +] + +[[package]] +name = "six" +version = "1.17.0" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/94/e7/b2c673351809dca68a0e064b6af791aa332cf192da575fd474ed7d6f16a2/six-1.17.0.tar.gz", hash = "sha256:ff70335d468e7eb6ec65b95b99d3a2836546063f63acc5171de367e834932a81", size = 34031, upload-time = "2024-12-04T17:35:28.174Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/b7/ce/149a00dd41f10bc29e5921b496af8b574d8413afcd5e30dfa0ed46c2cc5e/six-1.17.0-py2.py3-none-any.whl", hash = "sha256:4721f391ed90541fddacab5acf947aa0d3dc7d27b2e1e8eda2be8970586c3274", size = 11050, upload-time = "2024-12-04T17:35:26.475Z" }, +] + +[[package]] +name = "smmap" +version = "5.0.2" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/44/cd/a040c4b3119bbe532e5b0732286f805445375489fceaec1f48306068ee3b/smmap-5.0.2.tar.gz", hash = "sha256:26ea65a03958fa0c8a1c7e8c7a58fdc77221b8910f6be2131affade476898ad5", size = 22329, upload-time = "2025-01-02T07:14:40.909Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/04/be/d09147ad1ec7934636ad912901c5fd7667e1c858e19d355237db0d0cd5e4/smmap-5.0.2-py3-none-any.whl", hash = "sha256:b30115f0def7d7531d22a0fb6502488d879e75b260a9db4d0819cfb25403af5e", size = 24303, upload-time = "2025-01-02T07:14:38.724Z" }, +] + +[[package]] +name = "sniffio" +version = "1.3.1" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/a2/87/a6771e1546d97e7e041b6ae58d80074f81b7d5121207425c964ddf5cfdbd/sniffio-1.3.1.tar.gz", hash = "sha256:f4324edc670a0f49750a81b895f35c3adb843cca46f0530f79fc1babb23789dc", size = 20372, upload-time = "2024-02-25T23:20:04.057Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/e9/44/75a9c9421471a6c4805dbf2356f7c181a29c1879239abab1ea2cc8f38b40/sniffio-1.3.1-py3-none-any.whl", hash = "sha256:2f6da418d1f1e0fddd844478f41680e794e6051915791a034ff65e5f100525a2", size = 10235, upload-time = "2024-02-25T23:20:01.196Z" }, +] + +[[package]] +name = "sqlalchemy" +version = "2.0.43" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "greenlet", marker = "platform_machine == 'AMD64' or platform_machine == 'WIN32' or platform_machine == 'aarch64' or platform_machine == 'amd64' or platform_machine == 'ppc64le' or platform_machine == 'win32' or platform_machine == 'x86_64'" }, + { name = "typing-extensions" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/d7/bc/d59b5d97d27229b0e009bd9098cd81af71c2fa5549c580a0a67b9bed0496/sqlalchemy-2.0.43.tar.gz", hash = "sha256:788bfcef6787a7764169cfe9859fe425bf44559619e1d9f56f5bddf2ebf6f417", size = 9762949, upload-time = "2025-08-11T14:24:58.438Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/61/db/20c78f1081446095450bdc6ee6cc10045fce67a8e003a5876b6eaafc5cc4/sqlalchemy-2.0.43-cp312-cp312-macosx_10_13_x86_64.whl", hash = "sha256:20d81fc2736509d7a2bd33292e489b056cbae543661bb7de7ce9f1c0cd6e7f24", size = 2134891, upload-time = "2025-08-11T15:51:13.019Z" }, + { url = "https://files.pythonhosted.org/packages/45/0a/3d89034ae62b200b4396f0f95319f7d86e9945ee64d2343dcad857150fa2/sqlalchemy-2.0.43-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:25b9fc27650ff5a2c9d490c13c14906b918b0de1f8fcbb4c992712d8caf40e83", size = 2123061, upload-time = "2025-08-11T15:51:14.319Z" }, + { url = "https://files.pythonhosted.org/packages/cb/10/2711f7ff1805919221ad5bee205971254845c069ee2e7036847103ca1e4c/sqlalchemy-2.0.43-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:6772e3ca8a43a65a37c88e2f3e2adfd511b0b1da37ef11ed78dea16aeae85bd9", size = 3320384, upload-time = "2025-08-11T15:52:35.088Z" }, + { url = "https://files.pythonhosted.org/packages/6e/0e/3d155e264d2ed2778484006ef04647bc63f55b3e2d12e6a4f787747b5900/sqlalchemy-2.0.43-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:1a113da919c25f7f641ffbd07fbc9077abd4b3b75097c888ab818f962707eb48", size = 3329648, upload-time = "2025-08-11T15:56:34.153Z" }, + { url = "https://files.pythonhosted.org/packages/5b/81/635100fb19725c931622c673900da5efb1595c96ff5b441e07e3dd61f2be/sqlalchemy-2.0.43-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:4286a1139f14b7d70141c67a8ae1582fc2b69105f1b09d9573494eb4bb4b2687", size = 3258030, upload-time = "2025-08-11T15:52:36.933Z" }, + { url = "https://files.pythonhosted.org/packages/0c/ed/a99302716d62b4965fded12520c1cbb189f99b17a6d8cf77611d21442e47/sqlalchemy-2.0.43-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:529064085be2f4d8a6e5fab12d36ad44f1909a18848fcfbdb59cc6d4bbe48efe", size = 3294469, upload-time = "2025-08-11T15:56:35.553Z" }, + { url = "https://files.pythonhosted.org/packages/5d/a2/3a11b06715149bf3310b55a98b5c1e84a42cfb949a7b800bc75cb4e33abc/sqlalchemy-2.0.43-cp312-cp312-win32.whl", hash = "sha256:b535d35dea8bbb8195e7e2b40059e2253acb2b7579b73c1b432a35363694641d", size = 2098906, upload-time = "2025-08-11T15:55:00.645Z" }, + { url = "https://files.pythonhosted.org/packages/bc/09/405c915a974814b90aa591280623adc6ad6b322f61fd5cff80aeaef216c9/sqlalchemy-2.0.43-cp312-cp312-win_amd64.whl", hash = "sha256:1c6d85327ca688dbae7e2b06d7d84cfe4f3fffa5b5f9e21bb6ce9d0e1a0e0e0a", size = 2126260, upload-time = "2025-08-11T15:55:02.965Z" }, + { url = "https://files.pythonhosted.org/packages/b8/d9/13bdde6521f322861fab67473cec4b1cc8999f3871953531cf61945fad92/sqlalchemy-2.0.43-py3-none-any.whl", hash = "sha256:1681c21dd2ccee222c2fe0bef671d1aef7c504087c9c4e800371cfcc8ac966fc", size = 1924759, upload-time = "2025-08-11T15:39:53.024Z" }, +] + +[[package]] +name = "sqlparse" +version = "0.5.3" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/e5/40/edede8dd6977b0d3da179a342c198ed100dd2aba4be081861ee5911e4da4/sqlparse-0.5.3.tar.gz", hash = "sha256:09f67787f56a0b16ecdbde1bfc7f5d9c3371ca683cfeaa8e6ff60b4807ec9272", size = 84999, upload-time = "2024-12-10T12:05:30.728Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/a9/5c/bfd6bd0bf979426d405cc6e71eceb8701b148b16c21d2dc3c261efc61c7b/sqlparse-0.5.3-py3-none-any.whl", hash = "sha256:cf2196ed3418f3ba5de6af7e82c694a9fbdbfecccdfc72e281548517081f16ca", size = 44415, upload-time = "2024-12-10T12:05:27.824Z" }, +] + +[[package]] +name = "starlette" +version = "0.47.3" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "anyio" }, + { name = "typing-extensions" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/15/b9/cc3017f9a9c9b6e27c5106cc10cc7904653c3eec0729793aec10479dd669/starlette-0.47.3.tar.gz", hash = "sha256:6bc94f839cc176c4858894f1f8908f0ab79dfec1a6b8402f6da9be26ebea52e9", size = 2584144, upload-time = "2025-08-24T13:36:42.122Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/ce/fd/901cfa59aaa5b30a99e16876f11abe38b59a1a2c51ffb3d7142bb6089069/starlette-0.47.3-py3-none-any.whl", hash = "sha256:89c0778ca62a76b826101e7c709e70680a1699ca7da6b44d38eb0a7e61fe4b51", size = 72991, upload-time = "2025-08-24T13:36:40.887Z" }, +] + +[[package]] +name = "streamlit" +version = "1.46.0" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "altair" }, + { name = "blinker" }, + { name = "cachetools" }, + { name = "click" }, + { name = "gitpython" }, + { name = "numpy" }, + { name = "packaging" }, + { name = "pandas" }, + { name = "pillow" }, + { name = "protobuf" }, + { name = "pyarrow" }, + { name = "pydeck" }, + { name = "requests" }, + { name = "tenacity" }, + { name = "toml" }, + { name = "tornado" }, + { name = "typing-extensions" }, + { name = "watchdog", marker = "sys_platform != 'darwin'" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/72/83/f2aac961479594d1d7ee42cf485e3674992769d506732005cea91e11504a/streamlit-1.46.0.tar.gz", hash = "sha256:0b2734b48f11f1e5c8046011b6b1a2274982dc657eef2ade8db70f0e1dc53dda", size = 9651454, upload-time = "2025-06-18T09:12:01.221Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/be/26/79bbb77bec3d605f7de7a4b45c806b44d112e8c9bce77fb620e03d9f2b88/streamlit-1.46.0-py3-none-any.whl", hash = "sha256:f8624acabafcf18611a0fac2635cf181a7ba922b45bd131ae15fc8f80e1a5482", size = 10050930, upload-time = "2025-06-18T09:11:58.447Z" }, +] + +[[package]] +name = "streamlit-authenticator" +version = "0.4.2" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "bcrypt" }, + { name = "captcha" }, + { name = "cryptography" }, + { name = "extra-streamlit-components" }, + { name = "pyjwt" }, + { name = "pyyaml" }, + { name = "streamlit" }, +] +wheels = [ + { url = "https://files.pythonhosted.org/packages/83/47/837b158e1a5b0d187d20c6be22c46d84d12a8d3e8d7113b67ebb33e221c9/streamlit_authenticator-0.4.2-py3-none-any.whl", hash = "sha256:442acccef6af65e2b0feb15d5e9f68707f204c1d31c60673690d87179c7ca5b2", size = 43197, upload-time = "2025-03-01T20:36:07.566Z" }, +] + +[[package]] +name = "tenacity" +version = "9.1.2" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/0a/d4/2b0cd0fe285e14b36db076e78c93766ff1d529d70408bd1d2a5a84f1d929/tenacity-9.1.2.tar.gz", hash = "sha256:1169d376c297e7de388d18b4481760d478b0e99a777cad3a9c86e556f4b697cb", size = 48036, upload-time = "2025-04-02T08:25:09.966Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/e5/30/643397144bfbfec6f6ef821f36f33e57d35946c44a2352d3c9f0ae847619/tenacity-9.1.2-py3-none-any.whl", hash = "sha256:f77bf36710d8b73a50b2dd155c97b870017ad21afe6ab300326b0371b3b05138", size = 28248, upload-time = "2025-04-02T08:25:07.678Z" }, +] + +[[package]] +name = "threadpoolctl" +version = "3.6.0" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/b7/4d/08c89e34946fce2aec4fbb45c9016efd5f4d7f24af8e5d93296e935631d8/threadpoolctl-3.6.0.tar.gz", hash = "sha256:8ab8b4aa3491d812b623328249fab5302a68d2d71745c8a4c719a2fcaba9f44e", size = 21274, upload-time = "2025-03-13T13:49:23.031Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/32/d5/f9a850d79b0851d1d4ef6456097579a9005b31fea68726a4ae5f2d82ddd9/threadpoolctl-3.6.0-py3-none-any.whl", hash = "sha256:43a0b8fd5a2928500110039e43a5eed8480b918967083ea48dc3ab9f13c4a7fb", size = 18638, upload-time = "2025-03-13T13:49:21.846Z" }, +] + +[[package]] +name = "toml" +version = "0.10.2" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/be/ba/1f744cdc819428fc6b5084ec34d9b30660f6f9daaf70eead706e3203ec3c/toml-0.10.2.tar.gz", hash = "sha256:b3bda1d108d5dd99f4a20d24d9c348e91c4db7ab1b749200bded2f839ccbe68f", size = 22253, upload-time = "2020-11-01T01:40:22.204Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/44/6f/7120676b6d73228c96e17f1f794d8ab046fc910d781c8d151120c3f1569e/toml-0.10.2-py2.py3-none-any.whl", hash = "sha256:806143ae5bfb6a3c6e736a764057db0e6a0e05e338b5630894a5f779cabb4f9b", size = 16588, upload-time = "2020-11-01T01:40:20.672Z" }, +] + +[[package]] +name = "toolz" +version = "1.1.0" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/11/d6/114b492226588d6ff54579d95847662fc69196bdeec318eb45393b24c192/toolz-1.1.0.tar.gz", hash = "sha256:27a5c770d068c110d9ed9323f24f1543e83b2f300a687b7891c1a6d56b697b5b", size = 52613, upload-time = "2025-10-17T04:03:21.661Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/fb/12/5911ae3eeec47800503a238d971e51722ccea5feb8569b735184d5fcdbc0/toolz-1.1.0-py3-none-any.whl", hash = "sha256:15ccc861ac51c53696de0a5d6d4607f99c210739caf987b5d2054f3efed429d8", size = 58093, upload-time = "2025-10-17T04:03:20.435Z" }, +] + +[[package]] +name = "tornado" +version = "6.5.1" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/51/89/c72771c81d25d53fe33e3dca61c233b665b2780f21820ba6fd2c6793c12b/tornado-6.5.1.tar.gz", hash = "sha256:84ceece391e8eb9b2b95578db65e920d2a61070260594819589609ba9bc6308c", size = 509934, upload-time = "2025-05-22T18:15:38.788Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/77/89/f4532dee6843c9e0ebc4e28d4be04c67f54f60813e4bf73d595fe7567452/tornado-6.5.1-cp39-abi3-macosx_10_9_universal2.whl", hash = "sha256:d50065ba7fd11d3bd41bcad0825227cc9a95154bad83239357094c36708001f7", size = 441948, upload-time = "2025-05-22T18:15:20.862Z" }, + { url = "https://files.pythonhosted.org/packages/15/9a/557406b62cffa395d18772e0cdcf03bed2fff03b374677348eef9f6a3792/tornado-6.5.1-cp39-abi3-macosx_10_9_x86_64.whl", hash = "sha256:9e9ca370f717997cb85606d074b0e5b247282cf5e2e1611568b8821afe0342d6", size = 440112, upload-time = "2025-05-22T18:15:22.591Z" }, + { url = "https://files.pythonhosted.org/packages/55/82/7721b7319013a3cf881f4dffa4f60ceff07b31b394e459984e7a36dc99ec/tornado-6.5.1-cp39-abi3-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:b77e9dfa7ed69754a54c89d82ef746398be82f749df69c4d3abe75c4d1ff4888", size = 443672, upload-time = "2025-05-22T18:15:24.027Z" }, + { url = "https://files.pythonhosted.org/packages/7d/42/d11c4376e7d101171b94e03cef0cbce43e823ed6567ceda571f54cf6e3ce/tornado-6.5.1-cp39-abi3-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:253b76040ee3bab8bcf7ba9feb136436a3787208717a1fb9f2c16b744fba7331", size = 443019, upload-time = "2025-05-22T18:15:25.735Z" }, + { url = "https://files.pythonhosted.org/packages/7d/f7/0c48ba992d875521ac761e6e04b0a1750f8150ae42ea26df1852d6a98942/tornado-6.5.1-cp39-abi3-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:308473f4cc5a76227157cdf904de33ac268af770b2c5f05ca6c1161d82fdd95e", size = 443252, upload-time = "2025-05-22T18:15:27.499Z" }, + { url = "https://files.pythonhosted.org/packages/89/46/d8d7413d11987e316df4ad42e16023cd62666a3c0dfa1518ffa30b8df06c/tornado-6.5.1-cp39-abi3-musllinux_1_2_aarch64.whl", hash = "sha256:caec6314ce8a81cf69bd89909f4b633b9f523834dc1a352021775d45e51d9401", size = 443930, upload-time = "2025-05-22T18:15:29.299Z" }, + { url = "https://files.pythonhosted.org/packages/78/b2/f8049221c96a06df89bed68260e8ca94beca5ea532ffc63b1175ad31f9cc/tornado-6.5.1-cp39-abi3-musllinux_1_2_i686.whl", hash = "sha256:13ce6e3396c24e2808774741331638ee6c2f50b114b97a55c5b442df65fd9692", size = 443351, upload-time = "2025-05-22T18:15:31.038Z" }, + { url = "https://files.pythonhosted.org/packages/76/ff/6a0079e65b326cc222a54720a748e04a4db246870c4da54ece4577bfa702/tornado-6.5.1-cp39-abi3-musllinux_1_2_x86_64.whl", hash = "sha256:5cae6145f4cdf5ab24744526cc0f55a17d76f02c98f4cff9daa08ae9a217448a", size = 443328, upload-time = "2025-05-22T18:15:32.426Z" }, + { url = "https://files.pythonhosted.org/packages/49/18/e3f902a1d21f14035b5bc6246a8c0f51e0eef562ace3a2cea403c1fb7021/tornado-6.5.1-cp39-abi3-win32.whl", hash = "sha256:e0a36e1bc684dca10b1aa75a31df8bdfed656831489bc1e6a6ebed05dc1ec365", size = 444396, upload-time = "2025-05-22T18:15:34.205Z" }, + { url = "https://files.pythonhosted.org/packages/7b/09/6526e32bf1049ee7de3bebba81572673b19a2a8541f795d887e92af1a8bc/tornado-6.5.1-cp39-abi3-win_amd64.whl", hash = "sha256:908e7d64567cecd4c2b458075589a775063453aeb1d2a1853eedb806922f568b", size = 444840, upload-time = "2025-05-22T18:15:36.1Z" }, + { url = "https://files.pythonhosted.org/packages/55/a7/535c44c7bea4578e48281d83c615219f3ab19e6abc67625ef637c73987be/tornado-6.5.1-cp39-abi3-win_arm64.whl", hash = "sha256:02420a0eb7bf617257b9935e2b754d1b63897525d8a289c9d65690d580b4dcf7", size = 443596, upload-time = "2025-05-22T18:15:37.433Z" }, +] + +[[package]] +name = "typing-extensions" +version = "4.14.0" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/d1/bc/51647cd02527e87d05cb083ccc402f93e441606ff1f01739a62c8ad09ba5/typing_extensions-4.14.0.tar.gz", hash = "sha256:8676b788e32f02ab42d9e7c61324048ae4c6d844a399eebace3d4979d75ceef4", size = 107423, upload-time = "2025-06-02T14:52:11.399Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/69/e0/552843e0d356fbb5256d21449fa957fa4eff3bbc135a74a691ee70c7c5da/typing_extensions-4.14.0-py3-none-any.whl", hash = "sha256:a1514509136dd0b477638fc68d6a91497af5076466ad0fa6c338e44e359944af", size = 43839, upload-time = "2025-06-02T14:52:10.026Z" }, +] + +[[package]] +name = "typing-inspection" +version = "0.4.1" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "typing-extensions" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/f8/b1/0c11f5058406b3af7609f121aaa6b609744687f1d158b3c3a5bf4cc94238/typing_inspection-0.4.1.tar.gz", hash = "sha256:6ae134cc0203c33377d43188d4064e9b357dba58cff3185f22924610e70a9d28", size = 75726, upload-time = "2025-05-21T18:55:23.885Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/17/69/cd203477f944c353c31bade965f880aa1061fd6bf05ded0726ca845b6ff7/typing_inspection-0.4.1-py3-none-any.whl", hash = "sha256:389055682238f53b04f7badcb49b989835495a96700ced5dab2d8feae4b26f51", size = 14552, upload-time = "2025-05-21T18:55:22.152Z" }, +] + +[[package]] +name = "tzdata" +version = "2025.2" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/95/32/1a225d6164441be760d75c2c42e2780dc0873fe382da3e98a2e1e48361e5/tzdata-2025.2.tar.gz", hash = "sha256:b60a638fcc0daffadf82fe0f57e53d06bdec2f36c4df66280ae79bce6bd6f2b9", size = 196380, upload-time = "2025-03-23T13:54:43.652Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/5c/23/c7abc0ca0a1526a0774eca151daeb8de62ec457e77262b66b359c3c7679e/tzdata-2025.2-py2.py3-none-any.whl", hash = "sha256:1a403fada01ff9221ca8044d701868fa132215d84beb92242d9acd2147f667a8", size = 347839, upload-time = "2025-03-23T13:54:41.845Z" }, +] + +[[package]] +name = "urllib3" +version = "2.4.0" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/8a/78/16493d9c386d8e60e442a35feac5e00f0913c0f4b7c217c11e8ec2ff53e0/urllib3-2.4.0.tar.gz", hash = "sha256:414bc6535b787febd7567804cc015fee39daab8ad86268f1310a9250697de466", size = 390672, upload-time = "2025-04-10T15:23:39.232Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/6b/11/cc635220681e93a0183390e26485430ca2c7b5f9d33b15c74c2861cb8091/urllib3-2.4.0-py3-none-any.whl", hash = "sha256:4e16665048960a0900c702d4a66415956a584919c03361cac9f1df5c5dd7e813", size = 128680, upload-time = "2025-04-10T15:23:37.377Z" }, +] + +[[package]] +name = "uvicorn" +version = "0.35.0" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "click" }, + { name = "h11" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/5e/42/e0e305207bb88c6b8d3061399c6a961ffe5fbb7e2aa63c9234df7259e9cd/uvicorn-0.35.0.tar.gz", hash = "sha256:bc662f087f7cf2ce11a1d7fd70b90c9f98ef2e2831556dd078d131b96cc94a01", size = 78473, upload-time = "2025-06-28T16:15:46.058Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/d2/e2/dc81b1bd1dcfe91735810265e9d26bc8ec5da45b4c0f6237e286819194c3/uvicorn-0.35.0-py3-none-any.whl", hash = "sha256:197535216b25ff9b785e29a0b79199f55222193d47f820816e7da751e9bc8d4a", size = 66406, upload-time = "2025-06-28T16:15:44.816Z" }, +] + +[[package]] +name = "waitress" +version = "3.0.2" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/bf/cb/04ddb054f45faa306a230769e868c28b8065ea196891f09004ebace5b184/waitress-3.0.2.tar.gz", hash = "sha256:682aaaf2af0c44ada4abfb70ded36393f0e307f4ab9456a215ce0020baefc31f", size = 179901, upload-time = "2024-11-16T20:02:35.195Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/8d/57/a27182528c90ef38d82b636a11f606b0cbb0e17588ed205435f8affe3368/waitress-3.0.2-py3-none-any.whl", hash = "sha256:c56d67fd6e87c2ee598b76abdd4e96cfad1f24cacdea5078d382b1f9d7b5ed2e", size = 56232, upload-time = "2024-11-16T20:02:33.858Z" }, +] + +[[package]] +name = "watchdog" +version = "6.0.0" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/db/7d/7f3d619e951c88ed75c6037b246ddcf2d322812ee8ea189be89511721d54/watchdog-6.0.0.tar.gz", hash = "sha256:9ddf7c82fda3ae8e24decda1338ede66e1c99883db93711d8fb941eaa2d8c282", size = 131220, upload-time = "2024-11-01T14:07:13.037Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/39/ea/3930d07dafc9e286ed356a679aa02d777c06e9bfd1164fa7c19c288a5483/watchdog-6.0.0-cp312-cp312-macosx_10_13_universal2.whl", hash = "sha256:bdd4e6f14b8b18c334febb9c4425a878a2ac20efd1e0b231978e7b150f92a948", size = 96471, upload-time = "2024-11-01T14:06:37.745Z" }, + { url = "https://files.pythonhosted.org/packages/12/87/48361531f70b1f87928b045df868a9fd4e253d9ae087fa4cf3f7113be363/watchdog-6.0.0-cp312-cp312-macosx_10_13_x86_64.whl", hash = "sha256:c7c15dda13c4eb00d6fb6fc508b3c0ed88b9d5d374056b239c4ad1611125c860", size = 88449, upload-time = "2024-11-01T14:06:39.748Z" }, + { url = "https://files.pythonhosted.org/packages/5b/7e/8f322f5e600812e6f9a31b75d242631068ca8f4ef0582dd3ae6e72daecc8/watchdog-6.0.0-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:6f10cb2d5902447c7d0da897e2c6768bca89174d0c6e1e30abec5421af97a5b0", size = 89054, upload-time = "2024-11-01T14:06:41.009Z" }, + { url = "https://files.pythonhosted.org/packages/a9/c7/ca4bf3e518cb57a686b2feb4f55a1892fd9a3dd13f470fca14e00f80ea36/watchdog-6.0.0-py3-none-manylinux2014_aarch64.whl", hash = "sha256:7607498efa04a3542ae3e05e64da8202e58159aa1fa4acddf7678d34a35d4f13", size = 79079, upload-time = "2024-11-01T14:06:59.472Z" }, + { url = "https://files.pythonhosted.org/packages/5c/51/d46dc9332f9a647593c947b4b88e2381c8dfc0942d15b8edc0310fa4abb1/watchdog-6.0.0-py3-none-manylinux2014_armv7l.whl", hash = "sha256:9041567ee8953024c83343288ccc458fd0a2d811d6a0fd68c4c22609e3490379", size = 79078, upload-time = "2024-11-01T14:07:01.431Z" }, + { url = "https://files.pythonhosted.org/packages/d4/57/04edbf5e169cd318d5f07b4766fee38e825d64b6913ca157ca32d1a42267/watchdog-6.0.0-py3-none-manylinux2014_i686.whl", hash = "sha256:82dc3e3143c7e38ec49d61af98d6558288c415eac98486a5c581726e0737c00e", size = 79076, upload-time = "2024-11-01T14:07:02.568Z" }, + { url = "https://files.pythonhosted.org/packages/ab/cc/da8422b300e13cb187d2203f20b9253e91058aaf7db65b74142013478e66/watchdog-6.0.0-py3-none-manylinux2014_ppc64.whl", hash = "sha256:212ac9b8bf1161dc91bd09c048048a95ca3a4c4f5e5d4a7d1b1a7d5752a7f96f", size = 79077, upload-time = "2024-11-01T14:07:03.893Z" }, + { url = "https://files.pythonhosted.org/packages/2c/3b/b8964e04ae1a025c44ba8e4291f86e97fac443bca31de8bd98d3263d2fcf/watchdog-6.0.0-py3-none-manylinux2014_ppc64le.whl", hash = "sha256:e3df4cbb9a450c6d49318f6d14f4bbc80d763fa587ba46ec86f99f9e6876bb26", size = 79078, upload-time = "2024-11-01T14:07:05.189Z" }, + { url = "https://files.pythonhosted.org/packages/62/ae/a696eb424bedff7407801c257d4b1afda455fe40821a2be430e173660e81/watchdog-6.0.0-py3-none-manylinux2014_s390x.whl", hash = "sha256:2cce7cfc2008eb51feb6aab51251fd79b85d9894e98ba847408f662b3395ca3c", size = 79077, upload-time = "2024-11-01T14:07:06.376Z" }, + { url = "https://files.pythonhosted.org/packages/b5/e8/dbf020b4d98251a9860752a094d09a65e1b436ad181faf929983f697048f/watchdog-6.0.0-py3-none-manylinux2014_x86_64.whl", hash = "sha256:20ffe5b202af80ab4266dcd3e91aae72bf2da48c0d33bdb15c66658e685e94e2", size = 79078, upload-time = "2024-11-01T14:07:07.547Z" }, + { url = "https://files.pythonhosted.org/packages/07/f6/d0e5b343768e8bcb4cda79f0f2f55051bf26177ecd5651f84c07567461cf/watchdog-6.0.0-py3-none-win32.whl", hash = "sha256:07df1fdd701c5d4c8e55ef6cf55b8f0120fe1aef7ef39a1c6fc6bc2e606d517a", size = 79065, upload-time = "2024-11-01T14:07:09.525Z" }, + { url = "https://files.pythonhosted.org/packages/db/d9/c495884c6e548fce18a8f40568ff120bc3a4b7b99813081c8ac0c936fa64/watchdog-6.0.0-py3-none-win_amd64.whl", hash = "sha256:cbafb470cf848d93b5d013e2ecb245d4aa1c8fd0504e863ccefa32445359d680", size = 79070, upload-time = "2024-11-01T14:07:10.686Z" }, + { url = "https://files.pythonhosted.org/packages/33/e8/e40370e6d74ddba47f002a32919d91310d6074130fe4e17dabcafc15cbf1/watchdog-6.0.0-py3-none-win_ia64.whl", hash = "sha256:a1914259fa9e1454315171103c6a30961236f508b9b623eae470268bbcc6a22f", size = 79067, upload-time = "2024-11-01T14:07:11.845Z" }, +] + +[[package]] +name = "weathergen-common" +version = "0.1.0" +source = { editable = "../common" } +dependencies = [ + { name = "astropy-healpix" }, + { name = "dask" }, + { name = "numcodecs" }, + { name = "omegaconf" }, + { name = "pyyaml" }, + { name = "xarray" }, + { name = "zarr" }, +] + +[package.metadata] +requires-dist = [ + { name = "astropy-healpix", specifier = "~=1.1.2" }, + { name = "dask", specifier = ">=2024.9.1" }, + { name = "numcodecs", specifier = "<0.16.0" }, + { name = "omegaconf", specifier = "~=2.3.0" }, + { name = "pyyaml" }, + { name = "xarray", specifier = ">=2025.6.1" }, + { name = "zarr", specifier = "==2.18.4,<3" }, +] + +[package.metadata.requires-dev] +dev = [ + { name = "pyrefly", specifier = "==0.36.0" }, + { name = "pytest", specifier = "~=8.3.5" }, + { name = "pytest-mock", specifier = ">=3.14.1" }, + { name = "ruff", specifier = "==0.9.7" }, +] + +[[package]] +name = "weathergen-dashboard" +version = "0.1.0" +source = { virtual = "." } +dependencies = [ + { name = "boto3" }, + { name = "mlflow" }, + { name = "plotly" }, + { name = "polars" }, + { name = "requests" }, + { name = "streamlit" }, + { name = "streamlit-authenticator" }, + { name = "watchdog" }, + { name = "weathergen-common" }, + { name = "weathergen-metrics" }, +] + +[package.dev-dependencies] +dev = [ + { name = "pyrefly" }, + { name = "pytest" }, + { name = "pytest-mock" }, + { name = "ruff" }, +] + +[package.metadata] +requires-dist = [ + { name = "boto3", specifier = "<1.36" }, + { name = "mlflow", specifier = "~=3.3.2" }, + { name = "plotly", specifier = "~=6.1.2" }, + { name = "polars", specifier = "~=1.30.0" }, + { name = "requests", specifier = "~=2.32.4" }, + { name = "streamlit", specifier = "~=1.46.0" }, + { name = "streamlit-authenticator", specifier = ">=0.4.2" }, + { name = "watchdog" }, + { name = "weathergen-common", editable = "../common" }, + { name = "weathergen-metrics", editable = "../metrics" }, +] + +[package.metadata.requires-dev] +dev = [ + { name = "pyrefly", specifier = "==0.36.0" }, + { name = "pytest", specifier = "~=8.3.5" }, + { name = "pytest-mock", specifier = ">=3.14.1" }, + { name = "ruff", specifier = "==0.9.7" }, +] + +[[package]] +name = "weathergen-metrics" +version = "0.1.0" +source = { editable = "../metrics" } +dependencies = [ + { name = "mlflow-skinny" }, + { name = "weathergen-common" }, +] + +[package.metadata] +requires-dist = [ + { name = "mlflow-skinny" }, + { name = "weathergen-common", editable = "../common" }, +] + +[package.metadata.requires-dev] +dev = [ + { name = "pyrefly", specifier = "==0.36.0" }, + { name = "pytest", specifier = "~=8.3.5" }, + { name = "pytest-mock", specifier = ">=3.14.1" }, + { name = "ruff", specifier = "==0.9.7" }, +] + +[[package]] +name = "werkzeug" +version = "3.1.3" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "markupsafe" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/9f/69/83029f1f6300c5fb2471d621ab06f6ec6b3324685a2ce0f9777fd4a8b71e/werkzeug-3.1.3.tar.gz", hash = "sha256:60723ce945c19328679790e3282cc758aa4a6040e4bb330f53d30fa546d44746", size = 806925, upload-time = "2024-11-08T15:52:18.093Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/52/24/ab44c871b0f07f491e5d2ad12c9bd7358e527510618cb1b803a88e986db1/werkzeug-3.1.3-py3-none-any.whl", hash = "sha256:54b78bf3716d19a65be4fceccc0d1d7b89e608834989dfae50ea87564639213e", size = 224498, upload-time = "2024-11-08T15:52:16.132Z" }, +] + +[[package]] +name = "wrapt" +version = "2.0.1" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/49/2a/6de8a50cb435b7f42c46126cf1a54b2aab81784e74c8595c8e025e8f36d3/wrapt-2.0.1.tar.gz", hash = "sha256:9c9c635e78497cacb81e84f8b11b23e0aacac7a136e73b8e5b2109a1d9fc468f", size = 82040, upload-time = "2025-11-07T00:45:33.312Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/cb/73/8cb252858dc8254baa0ce58ce382858e3a1cf616acebc497cb13374c95c6/wrapt-2.0.1-cp312-cp312-macosx_10_13_universal2.whl", hash = "sha256:1fdbb34da15450f2b1d735a0e969c24bdb8d8924892380126e2a293d9902078c", size = 78129, upload-time = "2025-11-07T00:43:48.852Z" }, + { url = "https://files.pythonhosted.org/packages/19/42/44a0db2108526ee6e17a5ab72478061158f34b08b793df251d9fbb9a7eb4/wrapt-2.0.1-cp312-cp312-macosx_10_13_x86_64.whl", hash = "sha256:3d32794fe940b7000f0519904e247f902f0149edbe6316c710a8562fb6738841", size = 61205, upload-time = "2025-11-07T00:43:50.402Z" }, + { url = "https://files.pythonhosted.org/packages/4d/8a/5b4b1e44b791c22046e90d9b175f9a7581a8cc7a0debbb930f81e6ae8e25/wrapt-2.0.1-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:386fb54d9cd903ee0012c09291336469eb7b244f7183d40dc3e86a16a4bace62", size = 61692, upload-time = "2025-11-07T00:43:51.678Z" }, + { url = "https://files.pythonhosted.org/packages/11/53/3e794346c39f462bcf1f58ac0487ff9bdad02f9b6d5ee2dc84c72e0243b2/wrapt-2.0.1-cp312-cp312-manylinux1_x86_64.manylinux_2_28_x86_64.manylinux_2_5_x86_64.whl", hash = "sha256:7b219cb2182f230676308cdcacd428fa837987b89e4b7c5c9025088b8a6c9faf", size = 121492, upload-time = "2025-11-07T00:43:55.017Z" }, + { url = "https://files.pythonhosted.org/packages/c6/7e/10b7b0e8841e684c8ca76b462a9091c45d62e8f2de9c4b1390b690eadf16/wrapt-2.0.1-cp312-cp312-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:641e94e789b5f6b4822bb8d8ebbdfc10f4e4eae7756d648b717d980f657a9eb9", size = 123064, upload-time = "2025-11-07T00:43:56.323Z" }, + { url = "https://files.pythonhosted.org/packages/0e/d1/3c1e4321fc2f5ee7fd866b2d822aa89b84495f28676fd976c47327c5b6aa/wrapt-2.0.1-cp312-cp312-manylinux_2_31_riscv64.manylinux_2_39_riscv64.whl", hash = "sha256:fe21b118b9f58859b5ebaa4b130dee18669df4bd111daad082b7beb8799ad16b", size = 117403, upload-time = "2025-11-07T00:43:53.258Z" }, + { url = "https://files.pythonhosted.org/packages/a4/b0/d2f0a413cf201c8c2466de08414a15420a25aa83f53e647b7255cc2fab5d/wrapt-2.0.1-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:17fb85fa4abc26a5184d93b3efd2dcc14deb4b09edcdb3535a536ad34f0b4dba", size = 121500, upload-time = "2025-11-07T00:43:57.468Z" }, + { url = "https://files.pythonhosted.org/packages/bd/45/bddb11d28ca39970a41ed48a26d210505120f925918592283369219f83cc/wrapt-2.0.1-cp312-cp312-musllinux_1_2_riscv64.whl", hash = "sha256:b89ef9223d665ab255ae42cc282d27d69704d94be0deffc8b9d919179a609684", size = 116299, upload-time = "2025-11-07T00:43:58.877Z" }, + { url = "https://files.pythonhosted.org/packages/81/af/34ba6dd570ef7a534e7eec0c25e2615c355602c52aba59413411c025a0cb/wrapt-2.0.1-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:a453257f19c31b31ba593c30d997d6e5be39e3b5ad9148c2af5a7314061c63eb", size = 120622, upload-time = "2025-11-07T00:43:59.962Z" }, + { url = "https://files.pythonhosted.org/packages/e2/3e/693a13b4146646fb03254636f8bafd20c621955d27d65b15de07ab886187/wrapt-2.0.1-cp312-cp312-win32.whl", hash = "sha256:3e271346f01e9c8b1130a6a3b0e11908049fe5be2d365a5f402778049147e7e9", size = 58246, upload-time = "2025-11-07T00:44:03.169Z" }, + { url = "https://files.pythonhosted.org/packages/a7/36/715ec5076f925a6be95f37917b66ebbeaa1372d1862c2ccd7a751574b068/wrapt-2.0.1-cp312-cp312-win_amd64.whl", hash = "sha256:2da620b31a90cdefa9cd0c2b661882329e2e19d1d7b9b920189956b76c564d75", size = 60492, upload-time = "2025-11-07T00:44:01.027Z" }, + { url = "https://files.pythonhosted.org/packages/ef/3e/62451cd7d80f65cc125f2b426b25fbb6c514bf6f7011a0c3904fc8c8df90/wrapt-2.0.1-cp312-cp312-win_arm64.whl", hash = "sha256:aea9c7224c302bc8bfc892b908537f56c430802560e827b75ecbde81b604598b", size = 58987, upload-time = "2025-11-07T00:44:02.095Z" }, + { url = "https://files.pythonhosted.org/packages/15/d1/b51471c11592ff9c012bd3e2f7334a6ff2f42a7aed2caffcf0bdddc9cb89/wrapt-2.0.1-py3-none-any.whl", hash = "sha256:4d2ce1bf1a48c5277d7969259232b57645aae5686dba1eaeade39442277afbca", size = 44046, upload-time = "2025-11-07T00:45:32.116Z" }, +] + +[[package]] +name = "xarray" +version = "2025.10.1" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "numpy" }, + { name = "packaging" }, + { name = "pandas" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/7b/ce/f5dd613ddd0b3f839c59e6c2fa20c62469bf671bf4c92a12b09dc0972326/xarray-2025.10.1.tar.gz", hash = "sha256:3c2b5ad7389825bd624ada5ff26b01ac54b1aae72e2fe0d724d81d40a2bf5785", size = 3058736, upload-time = "2025-10-07T20:25:56.708Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/c3/78/4d6d68555a92cb97b4c192759c4ab585c5cb23490f64d4ddf12c66a3b051/xarray-2025.10.1-py3-none-any.whl", hash = "sha256:a4e699433b87a7fac340951bc36648645eeef72bdd915ff055ac2fd99865a73d", size = 1365202, upload-time = "2025-10-07T20:25:54.964Z" }, +] + +[[package]] +name = "zarr" +version = "2.18.4" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "asciitree" }, + { name = "fasteners", marker = "sys_platform != 'emscripten'" }, + { name = "numcodecs" }, + { name = "numpy" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/21/d1/764ca5b66d91b20dede66aedc6eb9ede3adbe5c61779e7378a7ecb010e87/zarr-2.18.4.tar.gz", hash = "sha256:37790ededd0683ae1abe6ff90aa16c22543b3436810060f53d72c15e910c24bb", size = 3603684, upload-time = "2024-12-12T16:04:10.52Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/b4/d1/c84022a44afc7b7ccc442fba3daee56bdd03593d91ee4bc245a08e4fcc55/zarr-2.18.4-py3-none-any.whl", hash = "sha256:2795e20aff91093ce7e4da36ab1a138aededbd8ab66bf01fd01512e61d31e5d1", size = 210600, upload-time = "2024-12-12T16:04:06.642Z" }, +] + +[[package]] +name = "zipp" +version = "3.23.0" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/e3/02/0f2892c661036d50ede074e376733dca2ae7c6eb617489437771209d4180/zipp-3.23.0.tar.gz", hash = "sha256:a07157588a12518c9d4034df3fbbee09c814741a33ff63c05fa29d26a2404166", size = 25547, upload-time = "2025-06-08T17:06:39.4Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/2e/54/647ade08bf0db230bfea292f893923872fd20be6ac6f53b2b936ba839d75/zipp-3.23.0-py3-none-any.whl", hash = "sha256:071652d6115ed432f5ce1d34c336c0adfd6a884660d1e9712a256d3d3bd4b14e", size = 10276, upload-time = "2025-06-08T17:06:38.034Z" }, +] diff --git a/packages/dashboard/weathergen/dashboard/__init__.py b/packages/dashboard/weathergen/dashboard/__init__.py new file mode 100644 index 000000000..e69de29bb diff --git a/packages/dashboard/weathergen/dashboard/metrics.py b/packages/dashboard/weathergen/dashboard/metrics.py new file mode 100644 index 000000000..50fd1ed41 --- /dev/null +++ b/packages/dashboard/weathergen/dashboard/metrics.py @@ -0,0 +1,77 @@ +""" +Downloads metrics from MLFlow. +""" + +import logging + +import mlflow +import mlflow.client +import polars as pl +import streamlit as st +from mlflow.client import MlflowClient + +from weathergen.metrics.mlflow_utils import setup_mlflow as setup_mlflow_utils + +_logger = logging.getLogger(__name__) + +phase = "train" +exp_lifecycle = "test" +project = "WeatherGenerator" +experiment_id = "384213844828345" +all_stages = ["train", "val", "eval"] + +# Polars utilities +stage_is_eval = pl.col("tags.stage") == "eval" +stage_is_train = pl.col("tags.stage") == "train" +stage_is_val = pl.col("tags.stage") == "val" + + +# Cache TTL in seconds +ST_TTL_SEC = 3600 + + +class MlFlowUpload: + tracking_uri = "databricks" + registry_uri = "databricks-uc" + experiment_name = "/Shared/weathergen-dev/core-model/defaultExperiment" + + +@st.cache_resource(ttl=ST_TTL_SEC) +def setup_mflow() -> MlflowClient: + return setup_mlflow_utils(private_config=None) + + +@st.cache_data(ttl=ST_TTL_SEC, max_entries=2) +def latest_runs(): + """ + Get the latest runs for each WG run_id and stage. + """ + _logger.info("Downloading latest runs from MLFlow") + runs_pdf = pl.DataFrame( + mlflow.search_runs( + experiment_ids=[experiment_id], + # filter_string="status='FINISHED' AND tags.completion_status = 'success'", + ) + ) + runs_pdf = runs_pdf.filter(pl.col("tags.stage").is_in(all_stages)) + latest_run_by_exp = ( + runs_pdf.sort(by="end_time", descending=True) + .group_by(["tags.run_id", "tags.stage"]) + .agg(pl.col("*").last()) + .sort(by="tags.run_id") + ) + _logger.info("Number of latest runs: %d", len(runs_pdf)) + return latest_run_by_exp + + +@st.cache_data(ttl=ST_TTL_SEC, max_entries=2) +def all_runs(): + _logger.info("Downloading all runs from MLFlow") + runs_pdf = pl.DataFrame( + mlflow.search_runs( + experiment_ids=[experiment_id], + # filter_string="status='FINISHED' AND tags.completion_status = 'success'", + ) + ) + _logger.info("Number of all runs: %d", len(runs_pdf)) + return runs_pdf diff --git a/packages/metrics/src/weathergen/metrics/mlflow_utils.py b/packages/metrics/src/weathergen/metrics/mlflow_utils.py index 27a8bec8e..636a39a64 100644 --- a/packages/metrics/src/weathergen/metrics/mlflow_utils.py +++ b/packages/metrics/src/weathergen/metrics/mlflow_utils.py @@ -17,8 +17,6 @@ project_name = "WeatherGenerator" project_lifecycle = "dev" -_platform_env = get_platform_env() - class MlFlowUpload: tracking_uri = "databricks" @@ -35,13 +33,14 @@ def run_tags(cls, run_id: str, phase: str, from_run_id: str | None) -> dict[str, """ Returns the tags to be set for a run. """ + # Directly calling get_platform_env() here because it may not be available at import time. dct = { "lifecycle": project_lifecycle, - "hpc": _platform_env.get_hpc() or "unknown", + "hpc": get_platform_env().get_hpc() or "unknown", "run_id": run_id, "stage": phase, "project": project_name, - "uploader": _platform_env.get_hpc_user() or "unknown", + "uploader": get_platform_env().get_hpc_user() or "unknown", "completion_status": "success", } if from_run_id: @@ -139,9 +138,13 @@ def log_scores( ) -def setup_mlflow(private_config: Config) -> MlflowClient: - os.environ["DATABRICKS_HOST"] = private_config["mlflow"]["tracking_uri"] - os.environ["DATABRICKS_TOKEN"] = private_config["secrets"]["mlflow_token"] +def setup_mlflow(private_config: Config | None) -> MlflowClient: + if private_config is None: + assert os.environ.get("DATABRICKS_HOST") is not None, "DATABRICKS_HOST not set" + assert os.environ.get("DATABRICKS_TOKEN") is not None, "DATABRICKS_TOKEN not set" + else: + os.environ["DATABRICKS_HOST"] = private_config["mlflow"]["tracking_uri"] + os.environ["DATABRICKS_TOKEN"] = private_config["secrets"]["mlflow_token"] mlflow.set_tracking_uri(MlFlowUpload.tracking_uri) mlflow.set_registry_uri(MlFlowUpload.registry_uri) mlflow_client = mlflow.client.MlflowClient( diff --git a/pyproject.toml b/pyproject.toml index 0f0f7a296..fb2b7c409 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -252,5 +252,7 @@ members = [ "packages/evaluate", "packages/metrics", "packages/readers_extra", +# Explicitly not depending on 'packages/dashboard' : this causes issues when deploying +# the streamlit dashboard. ] From fd10f031f4674c5aff362bee72eb075595481500 Mon Sep 17 00:00:00 2001 From: Timothy Hunter Date: Wed, 19 Nov 2025 15:00:38 +0100 Subject: [PATCH 04/24] [1291] Remove epoch reference from WeatherGenReader (#1292) (#1299) * [1291] remove epoch ref * Adding backward compatibility to epoch and adapting score .json naming --------- Co-authored-by: Simone Norberti <63310821+simone99n@users.noreply.github.com> Co-authored-by: Matthias --- packages/evaluate/src/weathergen/evaluate/io_reader.py | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/packages/evaluate/src/weathergen/evaluate/io_reader.py b/packages/evaluate/src/weathergen/evaluate/io_reader.py index 66fb2602d..fe12c58e5 100644 --- a/packages/evaluate/src/weathergen/evaluate/io_reader.py +++ b/packages/evaluate/src/weathergen/evaluate/io_reader.py @@ -469,7 +469,8 @@ def __init__(self, eval_cfg: dict, run_id: str, private_paths: dict | None = Non super().__init__(eval_cfg, run_id, private_paths) - self.mini_epoch = eval_cfg.mini_epoch + # TODO: remove backwards compatibility to "epoch" in Feb. 2026 + self.mini_epoch = getattr(eval_cfg, "mini_epoch", eval_cfg["epoch"]) self.rank = eval_cfg.rank # Load model configuration and set (run-id specific) directories @@ -889,7 +890,7 @@ def load_scores(self, stream: str, region: str, metric: str) -> xr.DataArray | N """ score_path = ( Path(self.metrics_dir) - / f"{self.run_id}_{stream}_{region}_{metric}_epoch{self.epoch:05d}.json" + / f"{self.run_id}_{stream}_{region}_{metric}_chkpt{self.mini_epoch:05d}.json" ) _logger.debug(f"Looking for: {score_path}") From 7b9d3495281961bd9a9cb22e5ad35e43c8c66020 Mon Sep 17 00:00:00 2001 From: Seb Hickman <56727418+shmh40@users.noreply.github.com> Date: Wed, 19 Nov 2025 18:34:39 +0000 Subject: [PATCH 05/24] [1291] epoch backward compatibility (#1302) (#1303) Co-authored-by: Simone Norberti <63310821+simone99n@users.noreply.github.com> --- packages/evaluate/src/weathergen/evaluate/io_reader.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/packages/evaluate/src/weathergen/evaluate/io_reader.py b/packages/evaluate/src/weathergen/evaluate/io_reader.py index fe12c58e5..e6f409da6 100644 --- a/packages/evaluate/src/weathergen/evaluate/io_reader.py +++ b/packages/evaluate/src/weathergen/evaluate/io_reader.py @@ -470,7 +470,7 @@ def __init__(self, eval_cfg: dict, run_id: str, private_paths: dict | None = Non super().__init__(eval_cfg, run_id, private_paths) # TODO: remove backwards compatibility to "epoch" in Feb. 2026 - self.mini_epoch = getattr(eval_cfg, "mini_epoch", eval_cfg["epoch"]) + self.mini_epoch = getattr(eval_cfg, "mini_epoch", getattr(eval_cfg, "epoch", -1)) self.rank = eval_cfg.rank # Load model configuration and set (run-id specific) directories From ae298786093daf7ac6a501e517eb78799cfffe11 Mon Sep 17 00:00:00 2001 From: Timothy Hunter Date: Tue, 25 Nov 2025 14:14:48 +0100 Subject: [PATCH 06/24] [1137] Example of tag descriptions (#1212) (#1351) * exampels * more explanations --- config/default_config.yml | 22 ++++++++++++++++++++++ 1 file changed, 22 insertions(+) diff --git a/config/default_config.yml b/config/default_config.yml index d7d66660f..695b2e71e 100644 --- a/config/default_config.yml +++ b/config/default_config.yml @@ -161,3 +161,25 @@ train_log_freq: terminal: 10 metrics: 20 checkpoint: 250 + + +# Tags for experiment tracking +# These tags will be logged in MLFlow along with completed runs for train, eval, val +# The tags are free-form, with the following rules: +# - tags should be primitive types (strings, numbers, booleans). NO lists or dictionaries +# - tags should not duplicate existing config entries. +# - try to reuse existing tags where possible. MLFlow does not like having too many unique tags +# - do not use long strings in values (less than 20 characters is a good rule of thumb, we may enforce this in the future) +wgtags: + # The name of the organization of the person running the experiment. + # This may be autofilled in the future. Expected values are lowercase strings of + # the organizations codenames in https://confluence.ecmwf.int/display/MAEL/Staff+Contact+List + # e.g. "ecmwf", "cmcc", "metnor", "jsc", "escience" + org: None + # The name of the experiment. This is a distinctive codename for the experiment campaign being run. + # This is expected to be the primary tag for comparing experiments in MLFlow. + # Expected values are lowercase strings with no spaces, just underscores: + # Examples: "rollout_ablation_grid" + exp: None + # *** Experiment-specific tags *** + grid: None From bdcfa0728fc9a4748911476939af2792e3caa085 Mon Sep 17 00:00:00 2001 From: Simon Grasse <161459968+grassesi@users.noreply.github.com> Date: Thu, 27 Nov 2025 14:12:27 +0100 Subject: [PATCH 07/24] remove softlink dashboard -> packages/dashboard (#1298) (#1371) --- dashboard | 1 - 1 file changed, 1 deletion(-) delete mode 120000 dashboard diff --git a/dashboard b/dashboard deleted file mode 120000 index 463de28d3..000000000 --- a/dashboard +++ /dev/null @@ -1 +0,0 @@ -packages/dashboard \ No newline at end of file From f4f84e805f58ee6c1a4751c43ec48439f33e01c6 Mon Sep 17 00:00:00 2001 From: wesselkamp1 Date: Mon, 1 Dec 2025 15:31:10 +0100 Subject: [PATCH 08/24] initialise model config --- config/lst_config.yml | 194 ++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 194 insertions(+) create mode 100644 config/lst_config.yml diff --git a/config/lst_config.yml b/config/lst_config.yml new file mode 100644 index 000000000..21d64301e --- /dev/null +++ b/config/lst_config.yml @@ -0,0 +1,194 @@ +streams_directory: "./config/streams/seviri_lst/" + +embed_orientation: "channels" +embed_local_coords: True +embed_centroids_local_coords: False +embed_size_centroids: 0 +embed_unembed_mode: "block" +embed_dropout_rate: 0.1 + +target_cell_local_prediction: True + +ae_local_dim_embed: 1024 +ae_local_num_blocks: 2 +ae_local_num_heads: 16 +ae_local_dropout_rate: 0.1 +ae_local_with_qk_lnorm: True + +ae_local_num_queries: 1 +ae_local_queries_per_cell: False +ae_adapter_num_heads: 16 +ae_adapter_embed: 128 +ae_adapter_with_qk_lnorm: True +ae_adapter_with_residual: True +ae_adapter_dropout_rate: 0.1 + +ae_global_dim_embed: 2048 +ae_global_num_blocks: 8 +ae_global_num_heads: 32 +ae_global_dropout_rate: 0.1 +ae_global_with_qk_lnorm: True +# TODO: switching to < 1 triggers triton-related issues. +# See https://github.com/ecmwf/WeatherGenerator/issues/1050 +ae_global_att_dense_rate: 1.0 +ae_global_block_factor: 64 +ae_global_mlp_hidden_factor: 2 + +ae_aggregation_num_blocks: 2 +ae_aggregation_num_heads: 32 +ae_aggregation_dropout_rate: 0.1 +ae_aggregation_with_qk_lnorm: True +ae_aggregation_att_dense_rate: 1.0 +ae_aggregation_block_factor: 64 +ae_aggregation_mlp_hidden_factor: 2 + +decoder_type: PerceiverIOCoordConditioning # CrossAttentionAdaNormConditioning +pred_adapter_kv: False +pred_self_attention: True +pred_dyadic_dims: False +pred_mlp_adaln: True + +# number of steps offset applied to first target window; if set to zero and forecast_steps=0 then +# one is training an auto-encoder +forecast_offset : 0 +forecast_delta_hrs: 0 +forecast_steps: 0 +forecast_policy: null +forecast_att_dense_rate: 1.0 +fe_num_blocks: 0 +fe_num_heads: 16 +fe_dropout_rate: 0.1 +fe_with_qk_lnorm: True +impute_latent_noise_std: 0.0 # 1e-4 + +healpix_level: 5 + +with_mixed_precision: True +with_flash_attention: True +compile_model: False +with_fsdp: True +attention_dtype: bf16 +mixed_precision_dtype: bf16 +mlp_norm_eps: 1e-5 +norm_eps: 1e-4 + +latent_noise_kl_weight: 0.0 # 1e-5 +latent_noise_gamma: 2.0 +latent_noise_saturate_encodings: 5 +latent_noise_use_additive_noise: False +latent_noise_deterministic_latents: True + +batch_size_per_gpu: 1 +batch_size_validation_per_gpu: 1 + +# a regex that needs to fully match the name of the modules you want to freeze +# e.g. ".*ERA5" will match any module whose name ends in ERA5\ +# encoders and decoders that exist per stream have the stream name attached at the end +freeze_modules: "" + +# whether to track the exponential moving average of weights for validation +validate_with_ema: True +ema_ramp_up_ratio: 0.09 +ema_halflife_in_thousands: 1e-3 + +# training mode: "forecast" or "masking" (masked token modeling) +# for "masking" to train with auto-encoder mode, forecast_offset should be 0 +training_mode: "masking" +training_mode_config: {"losses": {LossPhysical: {weight: 0.7, loss_fcts: [['mse', 0.8], ['mae', 0.2]]},} + } +# training_mode_config: {"loss": {LossPhysical: [['mse', 0.7]], +# LossLatent: [['mse', 0.3]], +# LossStudentTeacher: [{'iBOT': {}, 'JEPA': {options}}],} +# } +validation_mode_config: {"losses": {LossPhysical: {weight: 1.0, loss_fcts: [['mse', 1.0]]},} + } +# masking rate when training mode is "masking"; ignored in foreacast mode +masking_rate: 0.6 +# sample the masking rate (with normal distribution centered at masking_rate) +# note that a sampled masking rate leads to varying requirements +masking_rate_sampling: True +# sample a subset of all target points, useful e.g. to reduce memory requirements (also can specify per-stream) +sampling_rate_target: 1.0 +# include a masking strategy here, currently only supporting "random", "block", "healpix", "channel", "causal" and "combination" +masking_strategy: "random" +# masking_strategy_config is a dictionary of additional parameters for the masking strategy +# required for "healpix" and "channel" masking strategies +# "healpix": requires healpix mask level to be specified with `hl_mask` +# "channel": requires "mode" to be specified, "per_cell" or "global", +masking_strategy_config: {"strategies": ["random", "healpix", "channel"], + "probabilities": [0.34, 0.33, 0.33], + "hl_mask": 3, "mode": "per_cell", + "same_strategy_per_batch": false + } + +num_mini_epochs: 32 +samples_per_mini_epoch: 4096 +samples_per_validation: 512 + +shuffle: True + +lr_scaling_policy: "sqrt" +lr_start: 1e-6 +lr_max: 5e-5 +lr_final_decay: 1e-6 +lr_final: 0.0 +lr_steps_warmup: 512 +lr_steps_cooldown: 512 +lr_policy_warmup: "cosine" +lr_policy_decay: "constant" +lr_policy_cooldown: "linear" + +grad_clip: 1.0 +weight_decay: 0.1 +norm_type: "LayerNorm" +nn_module: "te" +log_grad_norms: False + +start_date: 197901010000 +end_date: 202012310000 +start_date_val: 202101010000 +end_date_val: 202201010000 +len_hrs: 6 +step_hrs: 6 +input_window_steps: 1 + +val_initial: False + +loader_num_workers: 8 +log_validation: 0 +streams_output: ["ERA5"] + +istep: 0 +run_history: [] + +desc: "" +data_loader_rng_seed: ??? +run_id: ??? + +# The period to log in the training loop (in number of batch steps) +train_log_freq: + terminal: 10 + metrics: 20 + checkpoint: 250 + + +# Tags for experiment tracking +# These tags will be logged in MLFlow along with completed runs for train, eval, val +# The tags are free-form, with the following rules: +# - tags should be primitive types (strings, numbers, booleans). NO lists or dictionaries +# - tags should not duplicate existing config entries. +# - try to reuse existing tags where possible. MLFlow does not like having too many unique tags +# - do not use long strings in values (less than 20 characters is a good rule of thumb, we may enforce this in the future) +wgtags: + # The name of the organization of the person running the experiment. + # This may be autofilled in the future. Expected values are lowercase strings of + # the organizations codenames in https://confluence.ecmwf.int/display/MAEL/Staff+Contact+List + # e.g. "ecmwf", "cmcc", "metnor", "jsc", "escience" + org: mpg + # The name of the experiment. This is a distinctive codename for the experiment campaign being run. + # This is expected to be the primary tag for comparing experiments in MLFlow. + # Expected values are lowercase strings with no spaces, just underscores: + # Examples: "rollout_ablation_grid" + exp: lst_finetune + # *** Experiment-specific tags *** + grid: v0 \ No newline at end of file From 5a88b1d60281df8ecda455e7ede63677e278ceec Mon Sep 17 00:00:00 2001 From: wesselkamp1 Date: Mon, 1 Dec 2025 15:31:37 +0100 Subject: [PATCH 09/24] initialise stream config --- config/streams/seviri_lst/seviri_lst.yml | 34 ++++++++++++++++++++++++ 1 file changed, 34 insertions(+) create mode 100644 config/streams/seviri_lst/seviri_lst.yml diff --git a/config/streams/seviri_lst/seviri_lst.yml b/config/streams/seviri_lst/seviri_lst.yml new file mode 100644 index 000000000..9a185fa22 --- /dev/null +++ b/config/streams/seviri_lst/seviri_lst.yml @@ -0,0 +1,34 @@ +SEVIRI_LST : + type : msg_lst + filenames : ['mpg_seviri_2017_lst_v0'] + data_start_time : "2017-01-01 00:00" + data_end_time : "2017-12-31 00:00" + source_exclude: [] + target_exclude: ["LANDCOV", "quality_flag", "DEM", "FVC", "LW_MASK"] + target: ["LST"] + source: [] + geoinfos: ["DEM", "LANDCOV"] + metadata: "/p/scratch/weatherai/shared/weather_generator_data/mpg_seviri_2017_lst_v0/metadata" + experiment: "johannesburg" + loss_weight : 1.0 + masking_rate : 0.6 + masking_rate_none : 0.05 + token_size : 64 + tokenize_spacetime : True + max_num_targets: -1 + embed : + net : transformer + num_tokens : 1 + num_heads : 2 + dim_embed : 256 + num_blocks : 2 + embed_target_coords : + net : linear + dim_embed : 256 + target_readout : + type : 'obs_value' + num_layers : 2 + num_heads : 8 + pred_head : + ens_size : 1 + num_layers : 1 \ No newline at end of file From 8addc96c85c422513ade691d50b6c30e33d71e45 Mon Sep 17 00:00:00 2001 From: wesselkamp1 Date: Mon, 1 Dec 2025 15:32:46 +0100 Subject: [PATCH 10/24] initial datareader plus adjustment in multistream sampler --- src/weathergen/datasets/data_reader_seviri.py | 244 ++++++++++++++++++ .../datasets/multi_stream_data_sampler.py | 4 + 2 files changed, 248 insertions(+) create mode 100644 src/weathergen/datasets/data_reader_seviri.py diff --git a/src/weathergen/datasets/data_reader_seviri.py b/src/weathergen/datasets/data_reader_seviri.py new file mode 100644 index 000000000..0f30faa76 --- /dev/null +++ b/src/weathergen/datasets/data_reader_seviri.py @@ -0,0 +1,244 @@ +# (C) Copyright 2024 WeatherGenerator contributors. +# +# This software is licensed under the terms of the Apache Licence Version 2.0 +# which can be obtained at http://www.apache.org/licenses/LICENSE-2.0. +# +# In applying this licence, ECMWF does not waive the privileges and immunities +# granted to it by virtue of its status as an intergovernmental organisation +# nor does it submit to any jurisdiction. + +import logging +from pathlib import Path +from typing import override + +import numpy as np +import xarray as xr +from numpy.typing import NDArray + +# for interactive debugging +import code +import pdb + +# for reading the parquet files +import pandas as pd + +from weathergen.datasets.data_reader_base import ( + DataReaderTimestep, + ReaderData, + TimeWindowHandler, + TIndex, + check_reader_data, +) + +_logger = logging.getLogger(__name__) + + + +class DataReaderSeviri(DataReaderTimestep): + """Data reader for SEVIRI satellite data.""" + + def __init__( + self, + tw_handler: TimeWindowHandler, + filename: Path, + stream_info: dict, + ) -> None: + + """Initialize the SEVIRI data reader.""" + np32 = np.float32 + + # open the dataset the way we want it + time_ds = xr.open_zarr(filename, group= "era5") + ds = xr.open_zarr(filename, group= "seviri") + + #code.interact(local=locals()) + #pdb.breakpoint() + print("Max time: ", time_ds.time.max().values) + + # check if the data overlaps with the time window, otherwise initialises as empty datareader + if tw_handler.t_start >= time_ds.time.max() or tw_handler.t_end <= time_ds.time.min(): + name = stream_info["name"] + _logger.warning(f"{name} is not supported over data loader window. Stream is skipped.") + super().__init__(tw_handler, stream_info) + self.init_empty() + return + + if "frequency" in stream_info: + assert False, "Frequency sub-sampling currently not supported" + + # checks length of time in dataset + data_start_time = time_ds.time[0].values + data_end_time = time_ds.time[20].values + + period = (data_end_time - data_start_time) + + assert data_start_time is not None and data_end_time is not None, ( + data_start_time, + data_end_time, + ) + + # sets the time window handler and stream info in the base class + super().__init__( + tw_handler, + stream_info, + data_start_time, + data_end_time, + period, + ) + + # If there is no overlap with the time range, no need to keep the dataset. + if tw_handler.t_start >= data_end_time or tw_handler.t_end <= data_start_time: + self.init_empty() + return + else: + self.ds = ds + self.len = len(ds) + + self.channels_file = [k for k in self.ds.keys()] + + # caches lats and lons + lat_name = stream_info.get("latitude_name", "latitude") + self.latitudes = _clip_lat(np.array(ds[lat_name], dtype=np32)) + lon_name = stream_info.get("longitude_name", "longitude") + self.longitudes = _clip_lon(np.array(ds[lon_name], dtype=np32)) + + + self.geoinfo_channels = stream_info.get("geoinfos", []) + self.geoinfo_idx = [self.channels_file.index(ch) for ch in self.geoinfo_channels] + # cache geoinfos + self.geoinfo_data = np.stack([np.array(ds[ch], dtype=np32) for ch in self.geoinfo_channels]) + self.geoinfo_data = self.geoinfo_data.transpose() + + # select/filter requested target channels + # this will access the stream info, hence make sure to set it. + self.target_idx = self.select_channels(ds, "target") + self.target_channels = [self.channels_file[i] for i in self.target_idx] + + self.source_idx = self.select_channels(ds, "source") + self.source_channels = [self.channels_file[i] for i in self.source_idx] + + ds_name = stream_info["name"] + _logger.info(f"{ds_name}: target channels: {self.target_channels}") + + self.properties = { + "stream_id": 0, + } + + # or your function to load or compute the statistics + self._create_statistics_lookup() + + self.mean, self.stdev = self.mean_lookup[self.target_channels].values.astype(np32), self.std_lookup[self.target_channels].values.astype(np32) + + self.mean_geoinfo, self.stdev_geoinfo = self.mean_lookup[self.geoinfo_channels].values.astype(np32), self.std_lookup[self.geoinfo_channels].values.astype(np32) + + def _create_statistics_lookup(self): + statistics = Path(self.stream_info["metadata"]) / self.stream_info["experiment"] / "seviri_statistics.parquet" + df_stats = pd.read_parquet(statistics) + self.mean_lookup = df_stats.set_index('variable')["mean"] + self.std_lookup = df_stats.set_index('variable')["std"] + + @override + def init_empty(self) -> None: + super().init_empty() + self.ds = None + self.len = 0 + + @override + def length(self) -> int: + return self.len + + @override + def _get(self, idx: TIndex) -> ReaderData: + """ + Get data for window (for either source or target, through public interface) + Parameters + ---------- + idx : int + Index of temporal window + channels_idx : np.array + Selection of channels + Returns + ------- + ReaderData providing coords, geoinfos, data, datetimes + """ + + (t_idxs, dtr) = self._get_dataset_idxs(idx) + + if self.ds is None or self.len == 0 or len(t_idxs) == 0: + return ReaderData.empty( + num_data_fields=len(channels_idx), num_geo_fields=len(self.geoinfo_idx) + ) + + assert t_idxs[0] >= 0, "index must be non-negative" + didx_start = t_idxs[0] + # End is inclusive + didx_end = t_idxs[-1] + 1 + + # extract number of time steps and collapse ensemble dimension + # ds is a wrapper around zarr with get_coordinate_selection not being exposed since + # subsetting is pushed to the ctor via frequency argument; this also ensures that no sub- + # sampling is required here + sel_channels = [self.channels_file[i] for i in channels_idx] + data = self.ds[sel_channels].isel(time=slice(didx_start, didx_end)).to_array().values + # flatten along time dimension + data = data.transpose([1, 2, 0]).reshape((data.shape[1] * data.shape[2], data.shape[0])) + # set invalid values to NaN + mask = data == self.fillvalue + data[mask] = np.nan + + # construct lat/lon coords + latlon = np.concatenate( + [ + np.expand_dims(self.latitudes, 0), + np.expand_dims(self.longitudes, 0), + ], + axis=0, + ).transpose() + + # repeat len(t_idxs) times + coords = np.vstack((latlon,) * len(t_idxs)) + geoinfos = np.vstack((self.geoinfo_data,) * len(t_idxs)) + + # date time matching #data points of data + datetimes = np.repeat(self.ds.time[didx_start:didx_end].values, len(data) // len(t_idxs)) + + rd = ReaderData( + coords=coords, + geoinfos=geoinfos, + data=data, + datetimes=datetimes, + ) + check_reader_data(rd, dtr) + + return rd + + def select_channels(self, ds, ch_type: str) -> NDArray[np.int64]: + + """Select channels based on stream info for either source or target.""" + + channels = self.stream_info.get(ch_type) + assert channels is not None, f"{ch_type} channels need to be specified" + # sanity check + is_empty = len(channels) == 0 if channels is not None else False + if is_empty: + stream_name = self.stream_info["name"] + _logger.warning(f"No channel for {stream_name} for {ch_type}.") + + chs_idx = np.sort([self.channels_file.index(ch) for ch in channels]) + + return np.array(chs_idx) + + +def _clip_lat(lats: NDArray) -> NDArray[np.float32]: + """ + Clip latitudes to the range [-90, 90] and ensure periodicity. + """ + return (2 * np.clip(lats, -90.0, 90.0) - lats).astype(np.float32) + + +# TODO: move to base class +def _clip_lon(lons: NDArray) -> NDArray[np.float32]: + """ + Clip longitudes to the range [-180, 180] and ensure periodicity. + """ + return ((lons + 180.0) % 360.0 - 180.0).astype(np.float32) \ No newline at end of file diff --git a/src/weathergen/datasets/multi_stream_data_sampler.py b/src/weathergen/datasets/multi_stream_data_sampler.py index e38d518da..c1fc0ff05 100644 --- a/src/weathergen/datasets/multi_stream_data_sampler.py +++ b/src/weathergen/datasets/multi_stream_data_sampler.py @@ -22,6 +22,7 @@ str_to_datetime64, ) from weathergen.datasets.data_reader_fesom import DataReaderFesom +from weathergen.datasets.data_reader_seviri import DataReaderSeviri from weathergen.datasets.data_reader_obs import DataReaderObs from weathergen.datasets.masking import Masker from weathergen.datasets.stream_data import StreamData, spoof @@ -145,6 +146,9 @@ def __init__( case "fesom": dataset = DataReaderFesom datapath = cf.data_path_fesom + case "msg_lst": + dataset = DataReaderSeviri + datapath = cf.data_path_obs case type_name: reader_entry = get_extra_reader(type_name, cf) if reader_entry is not None: From b8a19e0933dcfcc875a6ca83b670135e41129dda Mon Sep 17 00:00:00 2001 From: iluise <72020169+iluise@users.noreply.github.com> Date: Tue, 9 Dec 2025 09:17:50 +0100 Subject: [PATCH 11/24] update evaluation package in main branch (#1430) * cherry pick evaluation * add evaluation 1.0 --- config/evaluate/eval_config.yml | 7 + integration_tests/small1_test.py | 2 +- packages/common/src/weathergen/common/io.py | 2 +- packages/evaluate/README.md | 73 +++ packages/evaluate/pyproject.toml | 4 +- .../weathergen/evaluate/export/cf_utils.py | 26 + .../weathergen/evaluate/export/export_core.py | 2 + .../evaluate/export/export_inference.py | 22 +- .../weathergen/evaluate/export/io_utils.py | 3 + .../evaluate/export/parsers/netcdf_parser.py | 161 +++--- .../evaluate/export/parsers/quaver_parser.py | 3 +- .../src/weathergen/evaluate/export/reshape.py | 466 +++++++++++++++- .../src/weathergen/evaluate/io/csv_reader.py | 162 ++++++ .../src/weathergen/evaluate/io/io_reader.py | 314 +++++++++++ .../{io_reader.py => io/wegen_reader.py} | 501 ++---------------- .../evaluate/{ => plotting}/plot_utils.py | 26 +- .../evaluate/{ => plotting}/plotter.py | 299 +++++++---- .../src/weathergen/evaluate/run_evaluation.py | 335 ++++++++---- .../weathergen/evaluate/{ => scores}/score.py | 9 +- .../weathergen/evaluate/scores/score_utils.py | 38 ++ .../evaluate/{ => utils}/clim_utils.py | 0 .../evaluate/{ => utils}/derived_channels.py | 0 .../{score_utils.py => utils/regions.py} | 24 +- .../weathergen/evaluate/{ => utils}/utils.py | 207 ++++---- 24 files changed, 1767 insertions(+), 919 deletions(-) create mode 100644 packages/evaluate/README.md create mode 100644 packages/evaluate/src/weathergen/evaluate/io/csv_reader.py create mode 100644 packages/evaluate/src/weathergen/evaluate/io/io_reader.py rename packages/evaluate/src/weathergen/evaluate/{io_reader.py => io/wegen_reader.py} (51%) rename packages/evaluate/src/weathergen/evaluate/{ => plotting}/plot_utils.py (89%) rename packages/evaluate/src/weathergen/evaluate/{ => plotting}/plotter.py (84%) rename packages/evaluate/src/weathergen/evaluate/{ => scores}/score.py (99%) create mode 100644 packages/evaluate/src/weathergen/evaluate/scores/score_utils.py rename packages/evaluate/src/weathergen/evaluate/{ => utils}/clim_utils.py (100%) rename packages/evaluate/src/weathergen/evaluate/{ => utils}/derived_channels.py (100%) rename packages/evaluate/src/weathergen/evaluate/{score_utils.py => utils/regions.py} (86%) rename packages/evaluate/src/weathergen/evaluate/{ => utils}/utils.py (78%) diff --git a/config/evaluate/eval_config.yml b/config/evaluate/eval_config.yml index 85157728d..cf8d3ce5a 100644 --- a/config/evaluate/eval_config.yml +++ b/config/evaluate/eval_config.yml @@ -3,6 +3,7 @@ #global_plotting_options: # image_format : "png" #options: "png", "pdf", "svg", "eps", "jpg" .. # dpi_val : 300 +# fps: 2 # ERA5: # marker_size: 2 # scale_marker_size: 1 @@ -26,6 +27,9 @@ evaluation: log_scale: false add_grid: false score_cards: false + bar_plots: false + num_processes: 0 #options: int, "auto", 0 means no parallelism (default) + # baseline: "ar40mckx" run_ids : ar40mckx: @@ -45,6 +49,7 @@ run_ids : forecast_step: [1,3, 2] ensemble: [0,2,5] #supported: "all", "mean", [0,1,2] plot_maps: true + plot_target: false plot_histograms: true plot_animations: true CERRA: @@ -56,6 +61,7 @@ run_ids : sample: [2, 3, 0] forecast_step: [1,3, 4, 5] plot_maps: true + plot_target: false plot_histograms: true plot_animations: true @@ -76,5 +82,6 @@ run_ids : forecast_step: [1,3, 2] ensemble: "mean" plot_maps: true + plot_target: false plot_histograms: true plot_animations: true \ No newline at end of file diff --git a/integration_tests/small1_test.py b/integration_tests/small1_test.py index 6a7d398ef..c6d6aba5b 100644 --- a/integration_tests/small1_test.py +++ b/integration_tests/small1_test.py @@ -135,7 +135,7 @@ def evaluate_results(run_id): } ) # Not passing the mlflow client for tests. - evaluate_from_config(cfg, None) + evaluate_from_config(cfg, None, None) def load_metrics(run_id): diff --git a/packages/common/src/weathergen/common/io.py b/packages/common/src/weathergen/common/io.py index a95419b1c..0aa11fea1 100644 --- a/packages/common/src/weathergen/common/io.py +++ b/packages/common/src/weathergen/common/io.py @@ -433,7 +433,7 @@ def forecast_steps(self) -> list[int]: _, example_sample = next(self.data_root.groups()) _, example_stream = next(example_sample.groups()) - all_steps = list(example_stream.group_keys()) + all_steps = sorted(list(example_stream.group_keys())) if self.forecast_offset == 1: return all_steps[1:] # exclude fstep with no targets/preds else: diff --git a/packages/evaluate/README.md b/packages/evaluate/README.md new file mode 100644 index 000000000..35a06cf80 --- /dev/null +++ b/packages/evaluate/README.md @@ -0,0 +1,73 @@ +# The `WeatherGenerator-FastEvaluation` package + +A modular evaluation and visualization package for verifying forecast data and model outputs in the WeatherGenerator project. This package reads the output of the inference stage from the WeatherGenerator and provides statistics or exports to other tools. + + +--- + +## Overview + +The `WeatherGenerator-FastEvaluation` tool is designed for rapid diagnostics of WeatherGenerator model outputs, in contrast to the evaluation frameworks used in national meteorological services. + +The key motivations behing this package are: + +- **Rapid turnaround**: Enables fast scoring and plotting directly from WeatherGenerator output, supporting short development cycles without waiting for operational verification pipelines. +- **No format conversion**: Works directly on native model outputs (e.g., Zarr). Avoids costly GRIB/netCDF conversion steps required by traditional tools, significantly reducing preprocessing time. +- **Stream-level analysis only**: Provides evaluation against existing WeatherGenerator streams. + +Because the scope is restricted to developer-oriented diagnostics, the evaluation workflow remains lightweight, and integrated into model development. + +--- + +#### Features + +The `WeatherGenerator-FastEvaluation` module has the following features: + +- compute performance metrics and diagnostics for forecast or model outputs +- produce maps, time‑series, and other visualizations for qualitative & quantitative evaluation +- handle gridded and non gridded data (observations / reanalysis) +- export the WeatherGenerator output into grib/netCDF files suitable to be used by the project partners. + + +--- + +## Input + +The `WeatherGenerator-FastEvaluation` package supports multiple inputs: +- The WeatherGenerator `.zarr` files generated by the inference step of the WeatherGenerator chain. It reads both `Target` and `Prediction` from local files (`WeatherGenReader`). +- CSV files with pre-computed scores for an easy comparison with the other models. the scores should be stored as: + +```` +,parameter,level,number,score,step,date,domain_name,value +0,t,925,0,mef,0 days 12:00:00,2022-10-01 00:00:00,n.hem,0.031371469251538386 +1,t,925,0,mef,0 days 12:00:00,2022-10-01 12:00:00,n.hem,-0.010387031341104752 +2,t,925,0,mef,0 days 12:00:00,2022-10-02 00:00:00,n.hem,0.030255780718550083 +3,t,925,0,mef,0 days 12:00:00,2022-10-02 12:00:00,n.hem,-0.028894746338016246 +```` + +The structure is flexible and new readers can be added on demand. + +--- + +## Stability and documentation +The package is functionally stable, and its core evaluation workflows are used in active development. +However, documentation is currently internal: design notes, detailed examples, and workflow guides remain private and will be opened gradually as they are consolidated. + +## Quick Start — Running the Evaluation Workflow + +After the inference step you can run evaluation (on CPUs) as: +``` +uv run evaluate --config +``` + +The default config file is at: `WeatherGenerator/configs/evaluate/eval_config.yml` + +More instructions can be found here: https://gitlab.jsc.fz-juelich.de/esde/WeatherGenerator-private/-/wikis/home/Common-workflows/fast-evaluation + +--- + +## Licence +This package is licensed under the Apache‑2.0 License. + + + diff --git a/packages/evaluate/pyproject.toml b/packages/evaluate/pyproject.toml index 862358e5d..224f85881 100644 --- a/packages/evaluate/pyproject.toml +++ b/packages/evaluate/pyproject.toml @@ -1,8 +1,8 @@ [project] name = "weathergen-evaluate" -version = "0.1.0" +version = "1.0" description = "The WeatherGenerator Machine Learning Earth System Model" -readme = "../../README.md" +readme = "./README.md" requires-python = ">=3.12,<3.13" dependencies = [ "cartopy>=0.24.1", diff --git a/packages/evaluate/src/weathergen/evaluate/export/cf_utils.py b/packages/evaluate/src/weathergen/evaluate/export/cf_utils.py index e905015fe..201ffa168 100644 --- a/packages/evaluate/src/weathergen/evaluate/export/cf_utils.py +++ b/packages/evaluate/src/weathergen/evaluate/export/cf_utils.py @@ -2,6 +2,7 @@ from pathlib import Path import numpy as np +import xarray as xr _logger = logging.getLogger(__name__) _logger.setLevel(logging.INFO) @@ -29,6 +30,7 @@ def __init__(self, config, **kwargs): self.config = config self.file_extension = _get_file_extension(self.output_format) self.fstep_hours = np.timedelta64(self.fstep_hours, "h") + self.mapping = config.get("variables", {}) def get_output_filename(self) -> Path: """ @@ -49,6 +51,30 @@ def process_sample(self, fstep_iterator_results: iter, ref_time: np.datetime64): """ pass + def scale_data(self, data: xr.DataArray, var_short: str) -> xr.DataArray: + """ + Scale data based on variable configuration. + Parameters + ---------- + data : xr.DataArray + Input data array. + var_short : str + Variable name. + Returns + ------- + xr.DataArray + Scaled data array. + """ + var_config = self.mapping.get(var_short, {}) + raw = var_config.get("scale_factor", "1.0") + parts = raw.split("/") + scale_factor = float(parts[0]) / float(parts[1]) if len(parts) == 2 else float(parts[0]) + + add_offset = var_config.get("add_offset", 0.0) + + scaled_data = data * scale_factor + add_offset + return scaled_data + ########################################## diff --git a/packages/evaluate/src/weathergen/evaluate/export/export_core.py b/packages/evaluate/src/weathergen/evaluate/export/export_core.py index a33c1561f..c51a999e3 100644 --- a/packages/evaluate/src/weathergen/evaluate/export/export_core.py +++ b/packages/evaluate/src/weathergen/evaluate/export/export_core.py @@ -204,6 +204,8 @@ def export_model_outputs(data_type: str, config: OmegaConf, **kwargs) -> None: Epoch number to identify the Zarr store. rank : int Rank number to identify the Zarr store. + regrid_degree : float + If specified, regrid the data to a regular lat/lon grid with the given degree output_dir : str Directory to save the NetCDF files. output_format : str diff --git a/packages/evaluate/src/weathergen/evaluate/export/export_inference.py b/packages/evaluate/src/weathergen/evaluate/export/export_inference.py index 139375df4..47ad677a6 100755 --- a/packages/evaluate/src/weathergen/evaluate/export/export_inference.py +++ b/packages/evaluate/src/weathergen/evaluate/export/export_inference.py @@ -10,9 +10,9 @@ # weathergen-common = { path = "../../../../../packages/common" } # weathergen = { path = "../../../../../" } # /// -## Example USAGE: uv run export --run-id grwnhykd --stream ERA5 \ -## --output-dir /p/home/jusers/owens1/jureca/WeatherGen/test_output1 \ -## --format netcdf --type prediction target --fsteps 1 --samples 1 +## Example USAGE: uv run export --run-id o8b60tgh --stream ERA5 +# --output-dir ../o8b60tgh --format netcdf +# --regrid-degree 0.25 --regrid-type regular_ll import argparse import logging import sys @@ -156,6 +156,22 @@ def parse_args(args: list) -> argparse.Namespace: required=False, ) + parser.add_argument( + "--regrid-degree", + type=float, + default=None, + help="""If specified, regrid the data to a regular lat/lon grid with the given degree, + (e.g., 0.25 for 0.25x0.25 degree grid) or O/N Gaussian grid (e.g., 63 for N63 grid).""", + ) + + parser.add_argument( + "--regrid-type", + type=str, + choices=["regular_ll", "O", "N"], + default=None, + help="Type of grid to regrid to (only used if --regrid-degree is specified)", + ) + args, unknown_args = parser.parse_known_args(args) if unknown_args: _logger.warning(f"Unknown arguments: {unknown_args}") diff --git a/packages/evaluate/src/weathergen/evaluate/export/io_utils.py b/packages/evaluate/src/weathergen/evaluate/export/io_utils.py index 98cdbb04d..c7b03fe68 100644 --- a/packages/evaluate/src/weathergen/evaluate/export/io_utils.py +++ b/packages/evaluate/src/weathergen/evaluate/export/io_utils.py @@ -17,6 +17,7 @@ def output_filename( output_dir: str, output_format: str, forecast_ref_time: np.datetime64, + regrid_degree: float, ) -> Path: """ Generate output filename based on prefix (should refer to type e.g. pred/targ), run_id, sample @@ -40,6 +41,8 @@ def output_filename( ) file_extension = "nc" frt = np.datetime_as_string(forecast_ref_time, unit="h") + if regrid_degree is not None: + run_id += f"_regular{regrid_degree, regrid_degree}" out_fname = Path(output_dir) / f"{prefix}_{frt}_{run_id}.{file_extension}" return out_fname diff --git a/packages/evaluate/src/weathergen/evaluate/export/parsers/netcdf_parser.py b/packages/evaluate/src/weathergen/evaluate/export/parsers/netcdf_parser.py index fa58d90b7..fe7655fbe 100644 --- a/packages/evaluate/src/weathergen/evaluate/export/parsers/netcdf_parser.py +++ b/packages/evaluate/src/weathergen/evaluate/export/parsers/netcdf_parser.py @@ -7,7 +7,7 @@ from omegaconf import OmegaConf from weathergen.evaluate.export.cf_utils import CfParser -from weathergen.evaluate.export.reshape import find_pl +from weathergen.evaluate.export.reshape import Regridder, find_pl _logger = logging.getLogger(__name__) _logger.setLevel(logging.INFO) @@ -80,9 +80,11 @@ def process_sample( if da_fs: da_fs = self.concatenate(da_fs) - da_fs = self.assign_coords(da_fs, ref_time) + da_fs = self.assign_frt(da_fs, ref_time) da_fs = self.add_attrs(da_fs) da_fs = self.add_metadata(da_fs) + da_fs = self.add_encoding(da_fs) + da_fs = self.regrid(da_fs) self.save(da_fs, ref_time) def get_output_filename(self, forecast_ref_time: np.datetime64) -> Path: @@ -160,6 +162,26 @@ def reshape(self, data: xr.DataArray) -> xr.Dataset: return reshaped_dataset + def regrid(self, ds: xr.Dataset) -> xr.Dataset: + """ + Regrid a single xarray Dataset to specified grid type and degree. + Parameters + ---------- + output_grid_type : Type of grid to regrid to (e.g., 'regular_ll'). + degree : Degree of the grid; for regular grids, this is the lat/lon degree spacing; + for Gaussian grids, this is the N number (e.g., 63 for N63). + Returns + ------- + Regridded xarray Dataset. + """ + if self.regrid_degree is None or self.regrid_type is None: + _logger.info("No regridding specified, skipping regridding step.") + return ds + nc_regridder = Regridder(ds, output_grid_type=self.regrid_type, degree=self.regrid_degree) + + regrid_ds = nc_regridder.regrid_ds() + return regrid_ds + def concatenate( self, array_list, @@ -209,7 +231,7 @@ def concatenate( return data - def assign_coords(self, ds: xr.Dataset, reference_time: np.datetime64) -> xr.Dataset: + def assign_frt(self, ds: xr.Dataset, reference_time: np.datetime64) -> xr.Dataset: """ Assign forecast reference time coordinate to the dataset. @@ -222,14 +244,13 @@ def assign_coords(self, ds: xr.Dataset, reference_time: np.datetime64) -> xr.Dat ------- xarray Dataset with assigned forecast reference time coordinate. """ - ds = ds.assign_coords(forecast_ref_time=reference_time) + ds = ds.assign_coords(forecast_reference_time=reference_time) if "sample" in ds.coords: ds = ds.drop_vars("sample") n_hours = self.fstep_hours.astype("int64") - ds["forecast_period"] = ds["forecast_step"] * n_hours - + ds["forecast_step"] = ds["forecast_step"] * n_hours return ds def add_attrs(self, ds: xr.Dataset) -> xr.Dataset: @@ -244,12 +265,6 @@ def add_attrs(self, ds: xr.Dataset) -> xr.Dataset: xarray Dataset with CF-compliant variable attributes. """ - ds["forecast_period"].attrs = { - "standard_name": "forecast_period", - "long_name": "time since forecast_reference_time", - "units": "hours", - } - if self.grid_type == "gaussian": variables = self._attrs_gaussian_grid(ds) else: @@ -257,9 +272,36 @@ def add_attrs(self, ds: xr.Dataset) -> xr.Dataset: dataset = xr.merge(variables.values()) dataset.attrs = ds.attrs - return dataset + def add_encoding(self, ds: xr.Dataset) -> xr.Dataset: + """ + Add time encoding to the dataset variables. + Add aux coordinates to forecast_period + + Parameters + ---------- + ds : xarray Dataset to add time encoding to. + Returns + ------- + xarray Dataset with time encoding added. + """ + time_encoding = { + "units": "hours since 1970-01-01 00:00:00", + "calendar": "gregorian", + } + + if "valid_time" in ds.coords: + ds["valid_time"].encoding.update(time_encoding) + + if "forecast_reference_time" in ds.coords: + ds["forecast_reference_time"].encoding.update(time_encoding) + + if "forecast_period" in ds.coords: + ds["forecast_period"].encoding.update({"coordinates": "forecast_reference_time"}) + + return ds + def _attrs_gaussian_grid(self, ds: xr.Dataset) -> xr.Dataset: """ Assign CF-compliant attributes to variables in a Gaussian grid dataset. @@ -273,30 +315,28 @@ def _attrs_gaussian_grid(self, ds: xr.Dataset) -> xr.Dataset: Dataset with CF-compliant variable attributes. """ variables = {} - + dims_cfg = self.config.get("dimensions", {}) + ds, ds_attrs = self._assign_dim_attrs(ds, dims_cfg) for var_name, da in ds.data_vars.items(): - if var_name in ["lat", "lon"]: - continue - mapped_info = self.mapping.get(var_name, {}) mapped_name = mapped_info.get("var", var_name) + coords = self._build_coordinate_mapping(ds, mapped_info, ds_attrs) + attributes = { "standard_name": mapped_info.get("std", var_name), "units": mapped_info.get("std_unit", "unknown"), - "coordinates": "lat lon", } - + if "long" in mapped_info: + attributes["long_name"] = mapped_info["long"] variables[mapped_name] = xr.DataArray( data=da.values, - dims=list(da.dims), - coords={coord: ds.coords[coord] for coord in da.coords if coord in ds.coords}, + dims=da.dims, + coords=coords, attrs=attributes, name=mapped_name, ) - self._assign_latlon_attrs(ds) - return variables def _attrs_regular_grid(self, ds: xr.Dataset) -> xr.Dataset: @@ -304,7 +344,6 @@ def _attrs_regular_grid(self, ds: xr.Dataset) -> xr.Dataset: Assign CF-compliant attributes to variables in a regular grid dataset. Parameters ---------- - ds : xr.Dataset Input dataset. Returns @@ -313,67 +352,43 @@ def _attrs_regular_grid(self, ds: xr.Dataset) -> xr.Dataset: Dataset with CF-compliant variable attributes. """ variables = {} - dims = self.config.get("dimensions", {}) - ds_attrs = self._assign_dim_attrs(ds, dims) - mapping = self.mapping - + dims_cfg = self.config.get("dimensions", {}) + ds, ds_attrs = self._assign_dim_attrs(ds, dims_cfg) + dims_list = ["pressure", "latitude", "longitude", "valid_time"] for var_name, da in ds.data_vars.items(): - var_cfg = mapping.get(var_name) - if var_cfg is None: - continue - - dims = ["pressure", "valid_time", "latitude", "longitude"] - if var_cfg.get("level_type") == "sfc": + mapped_info = self.mapping.get(var_name, {}) + mapped_name = mapped_info.get("var", var_name) + dims = dims_list.copy() + if mapped_info.get("level_type") == "sfc": dims.remove("pressure") - coords = self._build_coordinate_mapping(ds, var_cfg, ds_attrs) + coords = self._build_coordinate_mapping(ds, mapped_info, ds_attrs) - attrs = { - "standard_name": var_cfg.get("std", var_name), - "units": var_cfg.get("std_unit", "unknown"), + attributes = { + "standard_name": mapped_info.get("std", var_name), + "units": mapped_info.get("std_unit", "unknown"), } - - mapped_name = var_cfg.get("var", var_name) + if "long" in mapped_info: + attributes["long_name"] = mapped_info["long"] variables[mapped_name] = xr.DataArray( data=da.values, dims=dims, coords={**coords, "valid_time": ds["valid_time"].values}, - attrs=attrs, + attrs=attributes, name=mapped_name, ) + if da.encoding.get("coordinates"): + variables[mapped_name].encoding["coordinates"] = ( + da.encoding["coordinates"] + .replace(" lat ", " latitude ") + .replace(" lon ", " longitude "), + ) return variables - def _assign_latlon_attrs(self, ds: xr.Dataset) -> None: - """Add CF-compliant attributes to lat/lon coordinates if they exist. - Parameters - ---------- - ds : xr.Dataset - Input dataset. - Returns - ------- - None - """ - if "lat" in ds.coords: - ds.coords["lat"].attrs.update( - { - "standard_name": "latitude", - "long_name": "latitude", - "units": "degrees_north", - } - ) - if "lon" in ds.coords: - ds.coords["lon"].attrs.update( - { - "standard_name": "longitude", - "long_name": "longitude", - "units": "degrees_east", - } - ) - def _assign_dim_attrs( self, ds: xr.Dataset, dim_cfg: dict[str, Any] - ) -> dict[str, dict[str, str]]: + ) -> tuple[xr.Dataset, dict[str, dict[str, str]]]: """ Assign CF attributes from given config file. Parameters @@ -386,6 +401,8 @@ def _assign_dim_attrs( ------- Dict[str, Dict[str, str]]: Attributes for each dimension. + xr.Dataset: + Dataset with renamed dimensions. """ ds_attrs = {} @@ -397,9 +414,11 @@ def _assign_dim_attrs( dim_attrs = {"standard_name": meta.get("std", wg_name)} if meta.get("std_unit"): dim_attrs["units"] = meta["std_unit"] + if meta.get("long"): + dim_attrs["long_name"] = meta["long"] ds_attrs[wg_name] = dim_attrs - return ds_attrs + return ds, ds_attrs def _build_coordinate_mapping( self, ds: xr.Dataset, var_cfg: dict[str, Any], attrs: dict[str, dict[str, str]] @@ -485,6 +504,8 @@ def add_metadata(self, ds: xr.Dataset) -> xr.Dataset: + np.datetime_as_string(np.datetime64("now"), unit="s") ) ds.attrs["Conventions"] = "CF-1.12" + # drop stream now it's in title + ds = ds.drop_vars("stream") return ds def save(self, ds: xr.Dataset, forecast_ref_time: np.datetime64) -> None: diff --git a/packages/evaluate/src/weathergen/evaluate/export/parsers/quaver_parser.py b/packages/evaluate/src/weathergen/evaluate/export/parsers/quaver_parser.py index d54aad4e4..198affafa 100644 --- a/packages/evaluate/src/weathergen/evaluate/export/parsers/quaver_parser.py +++ b/packages/evaluate/src/weathergen/evaluate/export/parsers/quaver_parser.py @@ -55,8 +55,6 @@ def __init__(self, config: OmegaConf, **kwargs): self.pl_file = ekd.create_target("file", self.get_output_filename("pl")) self.sf_file = ekd.create_target("file", self.get_output_filename("sfc")) - self.mapping = config.get("variables", {}) - self.template_cache = self.cache_templates() def process_sample( @@ -96,6 +94,7 @@ def process_sample( _logger.info(f"[Worker] Encoding var={var}, level={level}") field_data = da_fs.sel(channel=var) + field_data = self.scale_data(field_data, var) template_field = self.template_cache.get((var, level), None) if template_field is None: _logger.error(f"Template for var={var}, level={level} not found. Skipping.") diff --git a/packages/evaluate/src/weathergen/evaluate/export/reshape.py b/packages/evaluate/src/weathergen/evaluate/export/reshape.py index 67e7385ed..3c2b5cd30 100644 --- a/packages/evaluate/src/weathergen/evaluate/export/reshape.py +++ b/packages/evaluate/src/weathergen/evaluate/export/reshape.py @@ -1,8 +1,11 @@ +import contextlib import logging import re +from itertools import product import numpy as np import xarray as xr +from earthkit.regrid import interpolate _logger = logging.getLogger(__name__) _logger.setLevel(logging.INFO) @@ -75,5 +78,466 @@ def find_pl(vars: list) -> tuple[dict[str, list[str]], list[int]]: var_dict.setdefault(var_name, []).append(var) else: var_dict.setdefault(var, []).append(var) - pl = list(set(pl)) + pl = sorted(set(pl)) return var_dict, pl + + +class Regridder: + """ + Class to handle regridding of xarray Datasets using earthkit regrid options available. + """ + + def __init__(self, ds, output_grid_type: str, degree: float): + self.output_grid_type = output_grid_type + self.degree = degree + self.dataset = ds + self.indices = self.find_lat_lon_ordering() # to store lat/lon ordering indices + + self.earthkit_input: str = "" + self.earthkit_output: str = "" + self.grid_shape: tuple[int] = [] + self.input_grid_type: str = "" + + def find_lat_lon_ordering(self) -> list[int]: + """ + Find all the the latitude and longitude ordering for CF-parsed WeatherGenerator data + Ordering from North West to South East. + Returns the indices required to reorder the data. + Returns + ------- + indices: list of indices to reorder the data from original to lat/lon ordered. + """ + ds = self.dataset + x = ds["longitude"].values[:, 0] + y = ds["latitude"].values[:, 0] + tuples = list(zip(x, y, strict=False)) + ordered_tuples = sorted(tuples, key=lambda t: (-t[1], t[0])) + indices = [tuples.index(t) for t in ordered_tuples] + return indices + + def detect_input_grid_type(self) -> str: + """ + Detect whether data is on a regular lat/lon grid or Gaussian grid. + Returns + ------- + str + String with the grid type. + Supported options at the moment: "regular", "gaussian" + """ + data = self.dataset + # check dataset attributes first + if "grid_type" in data.attrs: + return data.attrs["grid_type"] + elif "ncells" in data.dims: + return "gaussian" + elif "latitude" in data.coords and "longitude" in data.coords: # skeptical- check! + return "regular_ll" + else: + raise ValueError("Unable to detect grid type from data attributes or dimensions.") + + def define_earthkit_input(self): + """ + Define the input grid type for earthkit regrid based on detected input grid type.""" + ds = self.dataset + if self.input_grid_type == "gaussian": + # fix all other indices except ncells + lat_ds_dims = len(ds["latitude"].shape) + pos = ds["latitude"].dims.index("ncells") + selected_indices = np.zeros(lat_ds_dims, dtype=int).tolist() + selected_indices[pos] = slice(None) + lat_ds = ds["latitude"].values[tuple(selected_indices)] + + # find type of Gaussian grid + n_lats = len(set(lat_ds)) // 2 ## UNEXPECTED LOGIC + num_cells = len(ds["ncells"]) + if num_cells == 4 * n_lats**2: + return f"N{n_lats}" + else: + return f"O{n_lats}" + _logger.info(f"Detected Gaussian grid type: {self.earthkit_input}") + if self.input_grid_type == "regular_ll": + ## Needs to be tested properly when there are regular grids + _logger.warning("Regular lat/lon grid input detection not fully tested yet.") + n_lats = len(ds["latitude"].shape) + degree = int(180 / (n_lats - 1)) + return [degree, degree] + + def define_earthkit_output(self): + """ + Define the output grid type and shape based on desired output grid type and degree. + Returns + ------- + output_grid_type : str + Type of grid to regrid to (e.g., 'regular_ll'). + grid_shape : list + Shape of the output grid. + """ + if self.output_grid_type == "regular_ll": + earthkit_output = [self.degree, self.degree] + grid_shape = [int(180 // self.degree + 1), int(360 // self.degree)] + return earthkit_output, grid_shape + elif self.output_grid_type in ["N", "O"]: + earthkit_output = self.output_grid_type + str(int(self.degree)) + grid_shape = self.find_num_cells() + return earthkit_output, grid_shape + else: + raise ValueError(f"Unsupported output grid type: {self.output_grid_type}") + # TODO add other grid types if needed + + def gaussian_regular_da(self, data: xr.DataArray) -> xr.DataArray: + """ + Regrid a single xarray Dataset to regular lat/lon grid. + Requires a change in number of dimensions (not just size), so handled separately. + + Parameters + ---------- + data : Input xarray DataArray containing the inference data on native grid. + Returns + ------- + Regridded xarray DataArray. + """ + + # set coords + new_coords = data.coords.copy() + new_coords.update( + { + "valid_time": data["valid_time"].values, + "latitude": np.linspace(-90, 90, self.grid_shape[0]), + "longitude": np.linspace(0, 360 - self.degree, self.grid_shape[1]), + } + ) + new_coords._drop_coords(["ncells"]) + + # set attrs + attrs = data.attrs.copy() + with contextlib.suppress(KeyError): + del attrs["ncells"] + + # find new dims and loop through extra dimensions + original_shape = data.shape + new_shape = list(original_shape) + pos = data.dims.index("ncells") + new_shape[pos : pos + 1] = [self.grid_shape[0], self.grid_shape[1]] + new_shape = tuple(new_shape) + + original_index = [list(range(original_shape_i)) for original_shape_i in original_shape] + original_index[pos] = [slice(None)] # :placeholder + + regridded_values = np.empty(new_shape) + result = product(*original_index) + for item in result: + original_data_slice = data.values[item] + regridded_slice = interpolate( + original_data_slice, {"grid": self.earthkit_input}, {"grid": self.earthkit_output} + ) + # sSet in regridded_values + new_index = list(item) + new_index[pos : pos + 1] = [slice(None), slice(None)] + regridded_values[tuple(new_index)] = regridded_slice + + dims = list(data.dims) + pos = dims.index("ncells") + dims[pos : pos + 1] = ["latitude", "longitude"] + dims = tuple(dims) + + regrid_data = xr.DataArray( + data=regridded_values, dims=dims, coords=new_coords, attrs=attrs, name=data.name + ) + + return regrid_data + + def regular_gaussian_da(self, data: xr.DataArray) -> xr.DataArray: + """ + Regrid a single xarray Dataset to Gaussian grid. + Requires a change in number of dimensions (not just size), so handled separately. + + Parameters + ---------- + data : Input xarray DataArray containing the inference data on native grid. + Returns + ------- + Regridded xarray DataArray. + """ + raise NotImplementedError( + "Regridding from regular lat/lon grids to Gaussian grids is not implemented yet." + ) + + # set coords + new_coords = data.coords.copy() + new_coords.update( + { + "ncells": np.arange(self.find_num_cells()), + # "valid_time": data["valid_time"].values, + } + ) + ####THIS IS GOING TO BE COMPLICATED AS LAT LON SHOULD BE DEFINED BY NCELLS#### + # set attrs + attrs = data.attrs.copy() + + # find lat, lon position + original_shape = data.shape + new_shape = list(original_shape) + lat_pos = data.dims.index("latitude") + lon_pos = data.dims.index("longitude") + ####COULD BE RISKY IF LAT/LON NOT NEXT TO EACH OTHER#### + new_shape[lat_pos : lon_pos + 1] = [self.find_num_cells()] + new_shape = tuple(new_shape) + # find indices + original_index = [list(range(original_shape_i)) for original_shape_i in original_shape] + original_index[lat_pos, lon_pos] = [slice(None), slice(None)] # :placeholder + + regridded_values = np.empty(new_shape) + result = product(*original_index) + for item in result: + original_data_slice = data.values[item] + regridded_slice = interpolate( + original_data_slice, {"grid": self.earthkit_input}, {"grid": self.earthkit_output} + ) + # sSet in regridded_values + new_index = list(item) + new_index[lat_pos] = slice(None) + new_index[lon_pos] = slice(None) + regridded_values[tuple(new_index)] = regridded_slice + + dims = list(data.dims) + dims[lat_pos : lon_pos + 1] = ["ncells"] + dims = tuple(dims) + + regrid_data = xr.DataArray( + data=regridded_values, dims=dims, coords=new_coords, attrs=attrs, name=data.name + ) + + return regrid_data + + def regular_regular_da(self, data: xr.DataArray) -> xr.DataArray: + _logger.warning("Regridding between different regular grids has not been tested.") + + """ + Regrid a single xarray Dataset to regular lat/lon grid. + Parameters + ---------- + data : Input xarray DataArray containing the inference data on native grid. + Returns + ------- + Regridded xarray DataArray. + """ + # set coords + new_coords = data.coords.copy() + new_coords.update( + { + "valid_time": data["valid_time"].values, + "latitude": np.linspace(-90, 90, self.grid_shape[0]), + "longitude": np.linspace(0, 360 - self.degree, self.grid_shape[1]), + } + ) + + # set attrs + attrs = data.attrs.copy() + + # find new dims and loop through extra dimensions + original_shape = data.shape + new_shape = list(original_shape) + lat_pos = data.dims.index("latitude") + lon_pos = data.dims.index("longitude") + new_shape[lat_pos] = self.grid_shape[0] + new_shape[lon_pos] = self.grid_shape[1] + new_shape = tuple(new_shape) + + original_index = [list(range(original_shape_i)) for original_shape_i in original_shape] + original_index[lat_pos, lon_pos] = [slice(None), slice(None)] # :placeholder + + regridded_values = np.empty(new_shape) + result = product(*original_index) + for item in result: + original_data_slice = data.values[item] + regridded_slice = interpolate( + original_data_slice, {"grid": self.earthkit_input}, {"grid": self.earthkit_output} + ) + # sSet in regridded_values + new_index = list(item) + new_index[lat_pos] = slice(None) + new_index[lon_pos] = slice(None) + regridded_values[tuple(new_index)] = regridded_slice + + regrid_data = xr.DataArray( + data=regridded_values, dims=data.dims, coords=new_coords, attrs=attrs, name=data.name + ) + + return regrid_data + + def find_num_cells(self) -> int: + """ + Find number of cells in the (output) Gaussian grid based on N or O number. + Returns + ------- + num_cells : int + Number of cells in the Gaussian grid. + """ + if self.output_grid_type[0] == "N": + n_lats = int(re.findall(r"\d+", self.earthkit_input)[0]) + num_cells = 4 * n_lats**2 + return num_cells + elif self.output_grid_type[0] == "O": + n_lats = int(re.findall(r"\d+", self.earthkit_input)[0]) + num_cells = 2 * n_lats * (n_lats + 1) + return num_cells + else: + raise ValueError("Input grid type is not Gaussian, cannot find number of cells.") + + def gaussian_gaussian_da(self, data: xr.DataArray) -> xr.DataArray: + """ + Regrid a single xarray Dataset to Gaussian grid. + Parameters + ---------- + data : Input xarray DataArray containing the inference data on native grid. + Returns + ------- + Regridded xarray DataArray. + """ + _logger.warning("Regridding between different Gaussian grids has not been tested.") + # set coords + new_coords = data.coords.copy() + new_coords.update( + { + "ncells": np.arange(self.grid_shape), + # "valid_time": data["valid_time"].values, + } + ) + # set attrs + attrs = data.attrs.copy() + + # find ncells position + original_shape = data.shape + new_shape = list(original_shape) + pos = data.dims.index("ncells") + new_shape[pos] = self.grid_shape + new_shape = tuple(new_shape) + # find indices + original_index = [list(range(original_shape_i)) for original_shape_i in original_shape] + original_index[pos] = [slice(None)] # :placeholder + + regridded_values = np.empty(new_shape) + result = product(*original_index) + for item in result: + original_data_slice = data.values[item] + regridded_slice = interpolate( + original_data_slice, {"grid": self.earthkit_input}, {"grid": self.earthkit_output} + ) + # sSet in regridded_values + new_index = list(item) + new_index[pos] = slice(None) + regridded_values[tuple(new_index)] = regridded_slice + + regrid_data = xr.DataArray( + data=regridded_values, dims=data.dims, coords=new_coords, attrs=attrs, name=data.name + ) + + return regrid_data + + def prepare_data( + self, + ) -> None: + """ + Prepare data for regridding. + """ + if self.input_grid_type == "gaussian": + ds = self.dataset + # reorder everything except ncells + original_ncells = ds["ncells"] + ds = ds.isel(ncells=self.indices) + ds["ncells"] = original_ncells + self.dataset = ds + else: + pass + + def add_attrs(self, regrid_ds: xr.Dataset) -> xr.Dataset: + """ + Preserve original coordinates after regridding. + Parameters + ---------- + regrid_ds : xr.Dataset + Regridded xarray Dataset. + Returns + ------- + regrid_ds : xr.Dataset + xarray Dataset with coordinates. + """ + ds = self.dataset + + if self.input_grid_type == "gaussian" and self.output_grid_type == "regular_ll": + for coord in ds.coords: + if coord not in ["latitude", "longitude"]: + if "ncells" not in ds[coord].dims: + regrid_ds.coords[coord] = ds[coord] + else: + # preserve CF attributes + regrid_ds.coords[coord].attrs = ds[coord].attrs + if self.input_grid_type == "regular_ll" and self.output_grid_type == "gaussian": + raise NotImplementedError( + "Preserving coordinates when regridding from regular lat/lon grids " + "to Gaussian grids is not implemented yet." + ) + + # keep global attrs + regrid_ds.attrs = ds.attrs + # change grid_type + regrid_ds.attrs["grid_type"] = self.output_grid_type + regrid_ds.attrs["history"] += ( + f" and regridded from {self.earthkit_input} to {self.earthkit_output} using earthkit" + ) + + return regrid_ds + + def regrid_ds( + self, + ) -> xr.Dataset: + """ + Regrids an xarray Dataset from native grid to chosen grid. + Returns + ------- + Regridded xarray Dataset. + """ + self.input_grid_type = self.detect_input_grid_type() + self.earthkit_input = self.define_earthkit_input() + self.earthkit_output, self.grid_shape = self.define_earthkit_output() + _logger.info(f"Attempting to regrid from {self.earthkit_input} to {self.earthkit_output}") + # No regridding needed if both input and output are same degree + if self.input_grid_type == self.output_grid_type: + if self.earthkit_input == self.earthkit_output: + _logger.info("Input and output grid types are the same, skipping regridding step.") + return self.dataset + self.prepare_data() + + ds = self.dataset + + regrid_vars = {} + for var in ds.data_vars: + regrid_vars[var] = self.regrid_da(ds[var]) + regrid_ds = xr.Dataset(regrid_vars) + regrid_ds = self.add_attrs(regrid_ds) + + return regrid_ds + + def regrid_da(self, da: xr.DataArray) -> xr.DataArray: + """ + Regrid a single xarray DataArray from input grid to output grid. + + Parameters + ---------- + da : Input xarray DataArray containing the inference data on native grid. + Returns + Regridded xarray DataArray. + ------- + """ + if self.input_grid_type == "gaussian" and self.output_grid_type == "regular_ll": + regrid_da = self.gaussian_regular_da(da) + elif self.input_grid_type == "regular_ll" and self.output_grid_type == "gaussian": + regrid_da = self.regular_gaussian_da(da) + elif self.input_grid_type == self.output_grid_type: + regrid_da = self.same_grid_da(da) + else: + raise NotImplementedError( + f"""Regridding from {self.earthkit_input} to {self.earthkit_output} grid + is not implemented yet.""" + ) + return regrid_da diff --git a/packages/evaluate/src/weathergen/evaluate/io/csv_reader.py b/packages/evaluate/src/weathergen/evaluate/io/csv_reader.py new file mode 100644 index 000000000..fbd659242 --- /dev/null +++ b/packages/evaluate/src/weathergen/evaluate/io/csv_reader.py @@ -0,0 +1,162 @@ +# (C) Copyright 2025 WeatherGenerator contributors. +# +# This software is licensed under the terms of the Apache Licence Version 2.0 +# which can be obtained at http://www.apache.org/licenses/LICENSE-2.0. +# +# In applying this licence, ECMWF does not waive the privileges and immunities +# granted to it by virtue of its status as an intergovernmental organisation +# nor does it submit to any jurisdiction. + +# Standard library +import logging +import re +from pathlib import Path + +# Third-party +import numpy as np +import pandas as pd +import xarray as xr + +# Local application / package +from weathergen.evaluate.io.io_reader import Reader + +_logger = logging.getLogger(__name__) +_logger.setLevel(logging.INFO) + + +class CsvReader(Reader): + """ + Reader class to read evaluation data from CSV files and convert to xarray DataArray. + """ + + def __init__(self, eval_cfg: dict, run_id: str, private_paths: dict | None = None): + """ + Initialize the CsvReader. + + Parameters + ---------- + eval_cfg : + config with plotting and evaluation options for that run id + run_id : str + run id of the model + private_paths: + list of private paths for the supported HPC + """ + + super().__init__(eval_cfg, run_id, private_paths) + self.csv_path = eval_cfg.get("csv_path") + assert self.csv_path is not None, "CSV path must be provided in the config." + + pd_data = pd.read_csv(self.csv_path, index_col=0) + + self.data = _rename_channels(pd_data) + self.metrics_base_dir = Path(self.csv_path).parent + # for backward compatibility allow metric_dir to be specified in the run config + self.metrics_dir = Path( + self.eval_cfg.get("metrics_dir", self.metrics_base_dir / self.run_id / "evaluation") + ) + + assert len(eval_cfg.streams.keys()) == 1, "CsvReader only supports one stream." + self.stream = list(eval_cfg.streams.keys())[0] + self.channels = self.data.index.tolist() + self.samples = [0] + self.forecast_steps = [int(col.split()[0]) for col in self.data.columns] + self.npoints_per_sample = [0] + self.epoch = eval_cfg.get("epoch", 0) + self.metric = eval_cfg.get("metric") + self.region = eval_cfg.get("region") + + def get_samples(self) -> set[int]: + """get set of samples for the retrieved scores (initialisation times)""" + return set(self.samples) # Placeholder implementation + + def get_forecast_steps(self) -> set[int]: + """get set of forecast steps""" + return set(self.forecast_steps) # Placeholder implementation + + # TODO: get this from config + def get_channels(self, stream: str | None = None) -> list[str]: + """get set of channels""" + assert stream == self.stream, "streams do not match in CSVReader." + return list(self.channels) # Placeholder implementation + + def get_values(self) -> xr.DataArray: + """get score values in the right format""" + return self.data.values[np.newaxis, :, :, np.newaxis].T + + def load_scores(self, stream: str, region: str, metric: str) -> xr.DataArray: + """ + Load the existing scores for a given run, stream and metric. + + Parameters + ---------- + reader : + Reader object containing all info for a specific run_id + stream : + Stream name. + region : + Region name. + metric : + Metric name. + + Returns + ------- + The metric DataArray. + """ + + available_data = self.check_availability(stream, mode="evaluation") + + # fill it only for matching metric + if metric == self.metric and region == self.region and stream == self.stream: + data = self.get_values() + else: + data = np.full( + ( + len(available_data.samples), + len(available_data.fsteps), + len(available_data.channels), + 1, + ), + np.nan, + ) + + da = xr.DataArray( + data.astype(np.float32), + dims=("sample", "forecast_step", "channel", "metric"), + coords={ + "sample": available_data.samples, + "forecast_step": available_data.fsteps, + "channel": available_data.channels, + "metric": [metric], + }, + attrs={"npoints_per_sample": self.npoints_per_sample}, + ) + + return da + + +##### Helper function for CSVReader #### +def _rename_channels(data) -> pd.DataFrame: + """ + The scores downloaded from Quaver have a different convention. Need renaming. + Rename channel names to include underscore between letters and digits. + E.g., 'z500' -> 'z_500', 't850' -> 't_850', '2t' -> '2t', '10ff' -> '10ff' + + Parameters + ---------- + name : + Original channel name. + + Returns + ------- + Dataset with renamed channel names. + """ + for name in list(data.index): + # If it starts with digits (surface vars like 2t, 10ff) → leave unchanged + if re.match(r"^\d", name): + continue + + # Otherwise, insert underscore between letters and digits + data = data.rename(index={name: re.sub(r"([a-zA-Z])(\d+)", r"\1_\2", name)}) + + return data diff --git a/packages/evaluate/src/weathergen/evaluate/io/io_reader.py b/packages/evaluate/src/weathergen/evaluate/io/io_reader.py new file mode 100644 index 000000000..2dd12c27a --- /dev/null +++ b/packages/evaluate/src/weathergen/evaluate/io/io_reader.py @@ -0,0 +1,314 @@ +# (C) Copyright 2025 WeatherGenerator contributors. +# +# This software is licensed under the terms of the Apache Licence Version 2.0 +# which can be obtained at http://www.apache.org/licenses/LICENSE-2.0. +# +# In applying this licence, ECMWF does not waive the privileges and immunities +# granted to it by virtue of its status as an intergovernmental organisation +# nor does it submit to any jurisdiction. + +# Standard library +import logging +import re +from dataclasses import dataclass + +# Third-party +import xarray as xr + +_logger = logging.getLogger(__name__) +_logger.setLevel(logging.INFO) + + +@dataclass +class ReaderOutput: + """ + Dataclass to hold the output of the Reader.get_data method. + Attributes + ---------- + target : dict[str, xr.Dataset] + Dictionary of xarray Datasets for targets, indexed by forecast step. + prediction : dict[str, xr.Dataset] + Dictionary of xarray Datasets for predictions, indexed by forecast step. + points_per_sample : xr.DataArray | None + xarray DataArray containing the number of points per sample, if `return_counts` is True + """ + + target: dict[str, xr.Dataset] + prediction: dict[str, xr.Dataset] + points_per_sample: xr.DataArray | None + + +@dataclass +class DataAvailability: + """ + Dataclass to hold information about data availability in the input files. + Attributes + ---------- + score_availability: bool + True if the metric file contains the requested combination. + channels: + List of channels requested + fsteps: + List of forecast steps requested + samples: + List of samples requested + """ + + score_availability: bool + channels: list[str] | None + fsteps: list[int] | None + samples: list[int] | None + ensemble: list[str] | None = None + + +class Reader: + def __init__(self, eval_cfg: dict, run_id: str, private_paths: dict[str, str] | None = None): + """ + Generic data reader class. + + Parameters + ---------- + eval_cfg : + config with plotting and evaluation options for that run id + run_id : + run id of the model + private_paths: + dictionary of private paths for the supported HPC + """ + self.eval_cfg = eval_cfg + self.run_id = run_id + self.private_paths = private_paths + self.streams = eval_cfg.streams.keys() + # TODO: propagate it to the other functions using global plotting opts + self.global_plotting_options = eval_cfg.get("global_plotting_options", {}) + + # If results_base_dir and model_base_dir are not provided, default paths are used + self.model_base_dir = self.eval_cfg.get("model_base_dir", None) + + self.results_base_dir = self.eval_cfg.get( + "results_base_dir", None + ) # base directory where results will be stored + + def get_stream(self, stream: str): + """ + returns the dictionary associated to a particular stream + + Parameters + ---------- + stream: str + the stream name + + Returns + ------- + dict + the config dictionary associated to that stream + """ + return self.eval_cfg.streams.get(stream, {}) + + def get_samples(self) -> set[int]: + """Placeholder implementation of sample getter. Override in subclass.""" + return set() + + def get_forecast_steps(self) -> set[int]: + """Placeholder implementation forecast step getter. Override in subclass.""" + return set() + + # TODO: get this from config + def get_channels(self, stream: str | None = None) -> list[str]: + """Placeholder implementation channel names getter. Override in subclass.""" + return list() + + def get_ensemble(self, stream: str | None = None) -> list[str]: + """Placeholder implementation ensemble member names getter. Override in subclass.""" + return list() + + def is_regular(self, stream: str) -> bool: + """ + Placeholder implementation to check if lat/lon are regularly spaced. + Override in subclass. + """ + return True + + def load_scores(self, stream: str, region: str, metric: str) -> xr.DataArray: + """Placeholder to load pre-computed scores for a given run, stream, metric""" + return None + + def check_availability( + self, + stream: str, + available_data: dict | None = None, + mode: str = "", + ) -> DataAvailability: + """ + Check if requested channels, forecast steps and samples are + i) available in the previously saved metric file if specified (return False otherwise) + ii) available in the source file (e.g. the Zarr file, return error otherwise) + Additionally, if channels, forecast steps or samples is None/'all', it will + i) set the variable to all available vars in source file + ii) return True only if the respective variable contains the same indeces in metric file + and source file (return False otherwise) + + Parameters + ---------- + stream : + The stream considered. + available_data : + The available data loaded from metric file. + Returns + ------- + DataAvailability + A dataclass containing: + - channels: list of channels or None if 'all' + - fsteps: list of forecast steps or None if 'all' + - samples: list of samples or None if 'all' + """ + + # fill info for requested channels, fsteps, samples + requested_data = self._get_channels_fsteps_samples(stream, mode) + + channels = requested_data.channels + fsteps = requested_data.fsteps + samples = requested_data.samples + ensemble = requested_data.ensemble + requested = { + "channel": set(channels) if channels is not None else None, + "fstep": set(fsteps) if fsteps is not None else None, + "sample": set(samples) if samples is not None else None, + "ensemble": set(ensemble) if ensemble is not None else None, + } + + # fill info from available metric file (if provided) + available = { + "channel": ( + set(available_data["channel"].values.ravel()) + if available_data is not None + else set() + ), + "fstep": ( + set(available_data["forecast_step"].values.ravel()) + if available_data is not None + else set() + ), + "sample": ( + set(available_data.coords["sample"].values.ravel()) + if available_data is not None + else set() + ), + "ensemble": ( + set(available_data["ens"].values.ravel()) + if available_data is not None and "ens" in available_data.coords + else set() + ), + } + + # fill info from reader + reader_data = { + "fstep": set(int(f) for f in self.get_forecast_steps()), + "sample": set(int(s) for s in self.get_samples()), + "channel": set(self.get_channels(stream)), + "ensemble": set(self.get_ensemble(stream)), + } + + check_score = True + corrected = False + for name in ["channel", "fstep", "sample", "ensemble"]: + if requested[name] is None: + # Default to all in Zarr + requested[name] = reader_data[name] + # If file with metrics exists, must exactly match + if available_data is not None and reader_data[name] != available[name]: + _logger.info( + f"Requested all {name}s for {mode}, but previous config was a " + "strict subset. Recomputing." + ) + check_score = False + + # Must be subset of Zarr + if not requested[name] <= reader_data[name]: + missing = requested[name] - reader_data[name] + + if name == "ensemble" and "mean" in missing: + missing.remove("mean") + if missing: + _logger.info( + f"Requested {name}(s) {missing} do(es) not exist in Zarr. " + f"Removing missing {name}(s) for {mode}." + ) + requested[name] = requested[name] & reader_data[name] + corrected = True + + # Must be a subset of available_data (if provided) + if available_data is not None and not requested[name] <= available[name]: + missing = requested[name] - available[name] + _logger.info( + f"{name.capitalize()}(s) {missing} missing in previous evaluation. Recomputing." + ) + check_score = False + + if check_score and not corrected: + scope = "metric file" if available_data is not None else "Zarr file" + _logger.info( + f"All checks passed – All channels, samples, fsteps requested for {mode} are " + f"present in {scope}..." + ) + + return DataAvailability( + score_availability=check_score, + channels=sorted(list(requested["channel"])), + fsteps=sorted(list(requested["fstep"])), + samples=sorted(list(requested["sample"])), + ensemble=sorted(list(requested["ensemble"])), + ) + + def _get_channels_fsteps_samples(self, stream: str, mode: str) -> DataAvailability: + """ + Get channels, fsteps and samples for a given run and stream from the config. + Replace 'all' with None. + + Parameters + ---------- + stream: str + The stream considered. + mode: str + if plotting or evaluation mode + + Returns + ------- + DataAvailability + A dataclass containing: + - channels: list of channels or None if 'all' + - fsteps: list of forecast steps or None if 'all' + - samples: list of samples or None if 'all' + """ + assert mode == "plotting" or mode == "evaluation", ( + "get_channels_fsteps_samples:: Mode should be either 'plotting' or 'evaluation'" + ) + + stream_cfg = self.get_stream(stream) + assert stream_cfg.get(mode, False), "Mode does not exist in stream config. Please add it." + + samples = stream_cfg[mode].get("sample", None) + fsteps = stream_cfg[mode].get("forecast_step", None) + channels = stream_cfg.get("channels", None) + ensemble = stream_cfg[mode].get("ensemble", None) + if ensemble == "mean": + ensemble = ["mean"] + + if isinstance(fsteps, str) and fsteps != "all": + assert re.match(r"^\d+-\d+$", fsteps), ( + "String format for forecast_step in config must be 'digit-digit' or 'all'" + ) + fsteps = list(range(int(fsteps.split("-")[0]), int(fsteps.split("-")[1]) + 1)) + if isinstance(samples, str) and samples != "all": + assert re.match(r"^\d+-\d+$", samples), ( + "String format for sample in config must be 'digit-digit' or 'all'" + ) + samples = list(range(int(samples.split("-")[0]), int(samples.split("-")[1]) + 1)) + + return DataAvailability( + score_availability=True, + channels=None if (channels == "all" or channels is None) else list(channels), + fsteps=None if (fsteps == "all" or fsteps is None) else list(fsteps), + samples=None if (samples == "all" or samples is None) else list(samples), + ensemble=None if (ensemble == "all" or ensemble is None) else list(ensemble), + ) diff --git a/packages/evaluate/src/weathergen/evaluate/io_reader.py b/packages/evaluate/src/weathergen/evaluate/io/wegen_reader.py similarity index 51% rename from packages/evaluate/src/weathergen/evaluate/io_reader.py rename to packages/evaluate/src/weathergen/evaluate/io/wegen_reader.py index e6f409da6..6425d3837 100644 --- a/packages/evaluate/src/weathergen/evaluate/io_reader.py +++ b/packages/evaluate/src/weathergen/evaluate/io/wegen_reader.py @@ -7,462 +7,32 @@ # granted to it by virtue of its status as an intergovernmental organisation # nor does it submit to any jurisdiction. +# Standard library import json import logging -import re -from dataclasses import dataclass from pathlib import Path +# Third-party import numpy as np import omegaconf as oc -import pandas as pd import xarray as xr from tqdm import tqdm -from weathergen.common.config import get_shared_wg_path, load_config, load_model_config +# Local application / package +from weathergen.common.config import ( + get_shared_wg_path, + load_config, + load_model_config, +) from weathergen.common.io import ZarrIO -from weathergen.evaluate.derived_channels import DeriveChannels -from weathergen.evaluate.score_utils import RegionBoundingBox, to_list +from weathergen.evaluate.io.io_reader import Reader, ReaderOutput +from weathergen.evaluate.scores.score_utils import to_list +from weathergen.evaluate.utils.derived_channels import DeriveChannels _logger = logging.getLogger(__name__) _logger.setLevel(logging.INFO) -@dataclass -class ReaderOutput: - """ - Dataclass to hold the output of the Reader.get_data method. - Attributes - ---------- - target : dict[str, xr.Dataset] - Dictionary of xarray Datasets for targets, indexed by forecast step. - prediction : dict[str, xr.Dataset] - Dictionary of xarray Datasets for predictions, indexed by forecast step. - points_per_sample : xr.DataArray | None - xarray DataArray containing the number of points per sample, if `return_counts` is True - """ - - target: dict[str, xr.Dataset] - prediction: dict[str, xr.Dataset] - points_per_sample: xr.DataArray | None - - -@dataclass -class DataAvailability: - """ - Dataclass to hold information about data availability in the input files. - Attributes - ---------- - score_availability: bool - True if the metric file contains the requested combination. - channels: list[str] - List of channels requested - fsteps: list[int] - List of forecast steps requested - samples: list[int] - List of samples requested - """ - - score_availability: bool - channels: list[str] | None - fsteps: list[int] | None - samples: list[int] | None - ensemble: list[str] | None = None - - -class Reader: - def __init__(self, eval_cfg: dict, run_id: str, private_paths: dict[str, str] | None = None): - """ - Generic data reader class. - - Parameters - ---------- - eval_cfg : dir - config with plotting and evaluation options for that run id - run_id : str - run id of the model - private_paths: dict[srt, str] - dictionary of private paths for the supported HPC - """ - self.eval_cfg = eval_cfg - self.run_id = run_id - self.private_paths = private_paths - self.streams = eval_cfg.streams.keys() - # TODO: propagate it to the other functions using global plotting opts - self.global_plotting_options = eval_cfg.get("global_plotting_options", {}) - - # If results_base_dir and model_base_dir are not provided, default paths are used - self.model_base_dir = self.eval_cfg.get("model_base_dir", None) - - self.results_base_dir = self.eval_cfg.get( - "results_base_dir", None - ) # base directory where results will be stored - - def get_stream(self, stream: str): - """ - returns the dictionary associated to a particular stream - - Parameters - ---------- - stream: str - the stream name - - Returns - ------- - dict - the config dictionary associated to that stream - """ - return self.eval_cfg.streams.get(stream, {}) - - def get_samples(self) -> set[int]: - """Placeholder implementation of sample getter. Override in subclass.""" - return set() - - def get_forecast_steps(self) -> set[int]: - """Placeholder implementation forecast step getter. Override in subclass.""" - return set() - - # TODO: get this from config - def get_channels(self, stream: str | None = None) -> list[str]: - """Placeholder implementation channel names getter. Override in subclass.""" - return list() - - def get_ensemble(self, stream: str | None = None) -> list[str]: - """Placeholder implementation ensemble member names getter. Override in subclass.""" - return list() - - def is_regular(self, stream: str) -> bool: - """ - Placeholder implementation to check if lat/lon are regularly spaced. - Override in subclass. - """ - return True - - def load_scores(self, stream: str, region: str, metric: str) -> xr.DataArray: - """Placeholder to load pre-computed scores for a given run, stream, metric""" - return None - - def check_availability( - self, - stream: str, - available_data: dict | None = None, - mode: str = "", - ) -> DataAvailability: - """ - Check if requested channels, forecast steps and samples are - i) available in the previously saved metric file if specified (return False otherwise) - ii) available in the source file (e.g. the Zarr file, return error otherwise) - Additionally, if channels, forecast steps or samples is None/'all', it will - i) set the variable to all available vars in source file - ii) return True only if the respective variable contains the same indeces in metric file - and source file (return False otherwise) - - Parameters - ---------- - stream : str - The stream considered. - available_data : dict, optional - The available data loaded from metric file. - Returns - ------- - DataAvailability - A dataclass containing: - - channels: list of channels or None if 'all' - - fsteps: list of forecast steps or None if 'all' - - samples: list of samples or None if 'all' - """ - - # fill info for requested channels, fsteps, samples - requested_data = self._get_channels_fsteps_samples(stream, mode) - - channels = requested_data.channels - fsteps = requested_data.fsteps - samples = requested_data.samples - ensemble = requested_data.ensemble - requested = { - "channel": set(channels) if channels is not None else None, - "fstep": set(fsteps) if fsteps is not None else None, - "sample": set(samples) if samples is not None else None, - "ensemble": set(ensemble) if ensemble is not None else None, - } - - # fill info from available metric file (if provided) - available = { - "channel": ( - set(available_data["channel"].values.ravel()) - if available_data is not None - else set() - ), - "fstep": ( - set(available_data["forecast_step"].values.ravel()) - if available_data is not None - else set() - ), - "sample": ( - set(available_data.coords["sample"].values.ravel()) - if available_data is not None - else set() - ), - "ensemble": ( - set(available_data["ens"].values.ravel()) - if available_data is not None and "ens" in available_data.coords - else set() - ), - } - - # fill info from reader - reader_data = { - "fstep": set(int(f) for f in self.get_forecast_steps()), - "sample": set(int(s) for s in self.get_samples()), - "channel": set(self.get_channels(stream)), - "ensemble": set(self.get_ensemble(stream)), - } - - check_score = True - corrected = False - for name in ["channel", "fstep", "sample", "ensemble"]: - if requested[name] is None: - # Default to all in Zarr - requested[name] = reader_data[name] - # If file with metrics exists, must exactly match - if available_data is not None and reader_data[name] != available[name]: - _logger.info( - f"Requested all {name}s for {mode}, but previous config was a " - "strict subset. Recomputing." - ) - check_score = False - - # Must be subset of Zarr - if not requested[name] <= reader_data[name]: - missing = requested[name] - reader_data[name] - - if name == "ensemble" and "mean" in missing: - missing.remove("mean") - if missing: - _logger.info( - f"Requested {name}(s) {missing} do(es) not exist in Zarr. " - f"Removing missing {name}(s) for {mode}." - ) - requested[name] = requested[name] & reader_data[name] - corrected = True - - # Must be a subset of available_data (if provided) - if available_data is not None and not requested[name] <= available[name]: - missing = requested[name] - available[name] - _logger.info( - f"{name.capitalize()}(s) {missing} missing in previous evaluation. Recomputing." - ) - check_score = False - - if check_score and not corrected: - scope = "metric file" if available_data is not None else "Zarr file" - _logger.info( - f"All checks passed – All channels, samples, fsteps requested for {mode} are " - f"present in {scope}..." - ) - - return DataAvailability( - score_availability=check_score, - channels=sorted(list(requested["channel"])), - fsteps=sorted(list(requested["fstep"])), - samples=sorted(list(requested["sample"])), - ensemble=sorted(list(requested["ensemble"])), - ) - - def _get_channels_fsteps_samples(self, stream: str, mode: str) -> DataAvailability: - """ - Get channels, fsteps and samples for a given run and stream from the config. - Replace 'all' with None. - - Parameters - ---------- - stream: str - The stream considered. - mode: str - if plotting or evaluation mode - - Returns - ------- - DataAvailability - A dataclass containing: - - channels: list of channels or None if 'all' - - fsteps: list of forecast steps or None if 'all' - - samples: list of samples or None if 'all' - """ - assert mode == "plotting" or mode == "evaluation", ( - "get_channels_fsteps_samples:: Mode should be either 'plotting' or 'evaluation'" - ) - - stream_cfg = self.get_stream(stream) - assert stream_cfg.get(mode, False), "Mode does not exist in stream config. Please add it." - - samples = stream_cfg[mode].get("sample", None) - fsteps = stream_cfg[mode].get("forecast_step", None) - channels = stream_cfg.get("channels", None) - ensemble = stream_cfg[mode].get("ensemble", None) - if ensemble == "mean": - ensemble = ["mean"] - - if isinstance(fsteps, str) and fsteps != "all": - assert re.match(r"^\d+-\d+$", fsteps), ( - "String format for forecast_step in config must be 'digit-digit' or 'all'" - ) - fsteps = list(range(int(fsteps.split("-")[0]), int(fsteps.split("-")[1]) + 1)) - if isinstance(samples, str) and samples != "all": - assert re.match(r"^\d+-\d+$", samples), ( - "String format for sample in config must be 'digit-digit' or 'all'" - ) - samples = list(range(int(samples.split("-")[0]), int(samples.split("-")[1]) + 1)) - - return DataAvailability( - score_availability=True, - channels=None if (channels == "all" or channels is None) else list(channels), - fsteps=None if (fsteps == "all" or fsteps is None) else list(fsteps), - samples=None if (samples == "all" or samples is None) else list(samples), - ensemble=None if (ensemble == "all" or ensemble is None) else list(ensemble), - ) - - -##### Helper function for CSVReader #### -def _rename_channels(data) -> pd.DataFrame: - """ - The scores downloaded from Quaver have a different convention. Need renaming. - Rename channel names to include underscore between letters and digits. - E.g., 'z500' -> 'z_500', 't850' -> 't_850', '2t' -> '2t', '10ff' -> '10ff' - - Parameters - ---------- - name : str - Original channel name. - - Returns - ------- - pd.DataFrame - Dataset with renamed channel names. - """ - for name in list(data.index): - # If it starts with digits (surface vars like 2t, 10ff) → leave unchanged - if re.match(r"^\d", name): - continue - - # Otherwise, insert underscore between letters and digits - data = data.rename(index={name: re.sub(r"([a-zA-Z])(\d+)", r"\1_\2", name)}) - - return data - - -class CsvReader(Reader): - """ - Reader class to read evaluation data from CSV files and convert to xarray DataArray. - """ - - def __init__(self, eval_cfg: dict, run_id: str, private_paths: dict | None = None): - """ - Initialize the CsvReader. - - Parameters - ---------- - eval_cfg : dir - config with plotting and evaluation options for that run id - run_id : str - run id of the model - private_paths: lists - list of private paths for the supported HPC - """ - - super().__init__(eval_cfg, run_id, private_paths) - self.csv_path = eval_cfg.get("csv_path") - assert self.csv_path is not None, "CSV path must be provided in the config." - - pd_data = pd.read_csv(self.csv_path, index_col=0) - - self.data = _rename_channels(pd_data) - self.metrics_base_dir = Path(self.csv_path).parent - # for backward compatibility allow metric_dir to be specified in the run config - self.metrics_dir = Path( - self.eval_cfg.get("metrics_dir", self.metrics_base_dir / self.run_id / "evaluation") - ) - - assert len(eval_cfg.streams.keys()) == 1, "CsvReader only supports one stream." - self.stream = list(eval_cfg.streams.keys())[0] - self.channels = self.data.index.tolist() - self.samples = [0] - self.forecast_steps = [int(col.split()[0]) for col in self.data.columns] - self.npoints_per_sample = [0] - self.epoch = eval_cfg.get("epoch", 0) - self.metric = eval_cfg.get("metric") - self.region = eval_cfg.get("region") - - def get_samples(self) -> set[int]: - """get set of samples for the retrieved scores (initialisation times)""" - return set(self.samples) # Placeholder implementation - - def get_forecast_steps(self) -> set[int]: - """get set of forecast steps""" - return set(self.forecast_steps) # Placeholder implementation - - # TODO: get this from config - def get_channels(self, stream: str | None = None) -> list[str]: - """get set of channels""" - assert stream == self.stream, "streams do not match in CSVReader." - return list(self.channels) # Placeholder implementation - - def get_values(self) -> xr.DataArray: - """get score values in the right format""" - return self.data.values[np.newaxis, :, :, np.newaxis].T - - def load_scores(self, stream: str, region: str, metric: str) -> xr.DataArray: - """ - Load the existing scores for a given run, stream and metric. - - Parameters - ---------- - reader : - Reader object containing all info for a specific run_id - stream : - Stream name. - region : - Region name. - metric : - Metric name. - - Returns - ------- - xr.DataArray - The metric DataArray. - """ - - available_data = self.check_availability(stream, mode="evaluation") - - # fill it only for matching metric - if metric == self.metric and region == self.region and stream == self.stream: - data = self.get_values() - else: - data = np.full( - ( - len(available_data.samples), - len(available_data.fsteps), - len(available_data.channels), - 1, - ), - np.nan, - ) - - da = xr.DataArray( - data.astype(np.float32), - dims=("sample", "forecast_step", "channel", "metric"), - coords={ - "sample": available_data.samples, - "forecast_step": available_data.fsteps, - "channel": available_data.channels, - "metric": [metric], - }, - attrs={"npoints_per_sample": self.npoints_per_sample}, - ) - - return da - - class WeatherGenReader(Reader): def __init__(self, eval_cfg: dict, run_id: str, private_paths: dict | None = None): """Data reader class for WeatherGenerator model outputs stored in Zarr format.""" @@ -547,7 +117,6 @@ def get_inference_config(self): def get_data( self, stream: str, - region: str = "global", samples: list[int] | None = None, fsteps: list[str] | None = None, channels: list[str] | None = None, @@ -566,8 +135,6 @@ def get_data( Expected scheme `/`. stream : Stream name to retrieve data for. - region : - Region name to retrieve data for. Possible values: "global", "shem", "nhem", "tropics" samples : List of sample indices to retrieve. If None, all samples are retrieved. fsteps : @@ -587,8 +154,6 @@ def get_data( if `return_counts` is True. """ - bbox = RegionBoundingBox.from_region_name(region) - with ZarrIO(self.fname_zarr) as zio: stream_cfg = self.get_stream(stream) all_channels = self.get_channels(stream) @@ -631,15 +196,15 @@ def get_data( for sample in tqdm(samples, desc=f"Processing {self.run_id} - {stream} - {fstep}"): out = zio.get_data(sample, stream, fstep) - target, pred = out.target.as_xarray(), out.prediction.as_xarray() - if region != "global": - _logger.debug( - f"Applying bounding box mask for region '{region}' to targets " - "and predictions..." + if out.target is None or out.prediction is None: + _logger.info( + f"Skipping {stream} sample {sample} forecast step: {fstep}. " + "No data found." ) - target = bbox.apply_mask(target) - pred = bbox.apply_mask(pred) + continue + + target, pred = out.target.as_xarray(), out.prediction.as_xarray() npoints = len(target.ipoint) pps.append(npoints) @@ -689,7 +254,10 @@ def get_data( _logger.debug("Repeating sample coordinate for single-sample case.") for da in (da_tars_fs, da_preds_fs): da.assign_coords( - sample=("ipoint", np.repeat(da.sample.values, da.sizes["ipoint"])) + sample=( + "ipoint", + np.repeat(da.sample.values, da.sizes["ipoint"]), + ) ) if set(channels) != set(all_channels): @@ -725,11 +293,10 @@ def get_climatology_filename(self, stream: str) -> str | None: Get the climatology filename for a given stream from the inference configuration. Parameters ---------- - stream : str + stream : Name of the data stream. Returns ------- - str or None Climatology filename if specified, otherwise None. """ @@ -765,13 +332,12 @@ def get_stream(self, stream: str): Parameters ---------- - stream: str + stream: the stream name Returns ------- - dict - the config dictionary associated to that stream + The config dictionary associated to that stream """ stream_dict = {} with ZarrIO(self.fname_zarr) as zio: @@ -795,12 +361,11 @@ def get_channels(self, stream: str) -> list[str]: Parameters ---------- - stream : str + stream : The name of the stream to get channels for. Returns ------- - list[str] A list of channel names. """ _logger.debug(f"Getting channels for stream {stream}...") @@ -812,12 +377,11 @@ def get_ensemble(self, stream: str | None = None) -> list[str]: """Get the list of ensemble member names for a given stream from the config. Parameters ---------- - stream : str + stream : The name of the stream to get channels for. Returns ------- - list[str] A list of ensemble members. """ _logger.debug(f"Getting ensembles for stream {stream}...") @@ -832,12 +396,11 @@ def is_regular(self, stream: str) -> bool: """Check if the latitude and longitude coordinates are regularly spaced for a given stream. Parameters ---------- - stream : str + stream : The name of the stream to get channels for. Returns ------- - bool True if the stream is regularly spaced. False otherwise. """ _logger.debug(f"Checking regular spacing for stream {stream}...") @@ -885,7 +448,6 @@ def load_scores(self, stream: str, region: str, metric: str) -> xr.DataArray | N Returns ------- - xr.DataArray The metric DataArray or None if the file does not exist. """ score_path = ( @@ -907,11 +469,11 @@ def get_inference_stream_attr(self, stream_name: str, key: str, default=None): Parameters: ------------ - config: dict + config: The full configuration dictionary. - stream_name: str + stream_name: The name of the stream (e.g. 'ERA5'). - key: str + key: The key to look up (e.g. 'tokenize_spacetime'). default: Optional Value to return if not found (default: None). @@ -938,7 +500,6 @@ def _force_consistent_grids(ref: list[xr.DataArray]) -> xr.DataArray: Input dataset Returns ------- - xr.DataArray Returns a Dataset where all samples have the same lat lon and ipoint ordering """ diff --git a/packages/evaluate/src/weathergen/evaluate/plot_utils.py b/packages/evaluate/src/weathergen/evaluate/plotting/plot_utils.py similarity index 89% rename from packages/evaluate/src/weathergen/evaluate/plot_utils.py rename to packages/evaluate/src/weathergen/evaluate/plotting/plot_utils.py index 361ab73c4..cc5f663d6 100644 --- a/packages/evaluate/src/weathergen/evaluate/plot_utils.py +++ b/packages/evaluate/src/weathergen/evaluate/plotting/plot_utils.py @@ -143,34 +143,20 @@ def score_card_metric_region( for stream in streams_set: selected_data, run_ids = [], [] - channels_common = None - for _, data in scores_dict[metric][region].get(stream, {}).items(): - channels_per_run = [] - for ch in channels_set: - if ch not in np.atleast_1d(data.channel.values) or data.isnull().all(): - continue - else: - channels_per_run.append(ch) - - if channels_common is None: - channels_common = set(channels_per_run) - else: - channels_common = set(channels_common).intersection(set(channels_per_run)) - - if not channels_common: - continue - for run_id, data in scores_dict[metric][region].get(stream, {}).items(): - selected_data.append(data.sel(channel=list(channels_common))) + if data.isnull().all(): + continue + selected_data.append(data) run_ids.append(run_id) if selected_data and len(selected_data) > 1.0: _logger.info(f"Creating score cards for {metric} - {region} - {stream}.") name = "_".join([metric, region, stream]) - sc_plotter.plot(selected_data, run_ids, metric, channels_common, name) + sc_plotter.plot(selected_data, run_ids, metric, channels_set, name) else: _logger.info( - f"Only one run_id under stream: {stream}. Creating score card is skipped..." + f"Only one run_id for ({region}) region under stream : {stream}. " + "Creating bar plot is skipped..." ) diff --git a/packages/evaluate/src/weathergen/evaluate/plotter.py b/packages/evaluate/src/weathergen/evaluate/plotting/plotter.py similarity index 84% rename from packages/evaluate/src/weathergen/evaluate/plotter.py rename to packages/evaluate/src/weathergen/evaluate/plotting/plotter.py index cb15e6f24..884ac059f 100644 --- a/packages/evaluate/src/weathergen/evaluate/plotter.py +++ b/packages/evaluate/src/weathergen/evaluate/plotting/plotter.py @@ -16,9 +16,10 @@ from scipy.stats import wilcoxon from weathergen.common.config import _load_private_conf -from weathergen.evaluate.plot_utils import ( +from weathergen.evaluate.plotting.plot_utils import ( DefaultMarkerSize, ) +from weathergen.evaluate.utils.regions import RegionBoundingBox work_dir = Path(_load_private_conf(None)["path_shared_working_dir"]) / "assets/cartopy" @@ -68,6 +69,7 @@ def __init__(self, plotter_cfg: dict, output_basedir: str | Path, stream: str | self.dpi_val = plotter_cfg.get("dpi_val") self.fig_size = plotter_cfg.get("fig_size") self.fps = plotter_cfg.get("fps") + self.regions = plotter_cfg.get("regions") self.plot_subtimesteps = plotter_cfg.get( "plot_subtimesteps", False ) # True if plots are created for each valid time separately @@ -82,7 +84,6 @@ def __init__(self, plotter_cfg: dict, output_basedir: str | Path, stream: str | self.sample = None self.stream = stream self.fstep = None - self.select = {} def update_data_selection(self, select: dict): @@ -352,38 +353,46 @@ def create_maps_per_sample( _logger.info(f"Creating dir {map_output_dir}") os.makedirs(map_output_dir) - plot_names = [] - for var in variables: - select_var = self.select | {"channel": var} - da = self.select_from_da(data, select_var).compute() + for region in self.regions: + if region != "global": + bbox = RegionBoundingBox.from_region_name(region) + reg_data = bbox.apply_mask(data) + else: + reg_data = data - if self.plot_subtimesteps: - ntimes_unique = len(np.unique(da.valid_time)) - _logger.info( - f"Creating maps for {ntimes_unique} valid times of variable {var} - {tag}" - ) + plot_names = [] + for var in variables: + select_var = self.select | {"channel": var} + da = self.select_from_da(reg_data, select_var).compute() - groups = da.groupby("valid_time") - else: - _logger.info(f"Creating maps for all valid times of {var} - {tag}") - groups = [(None, da)] # single dummy group + if self.plot_subtimesteps: + ntimes_unique = len(np.unique(da.valid_time)) + _logger.info( + f"Creating maps for {ntimes_unique} valid times of variable {var} - {tag}" + ) - for valid_time, da_t in groups: - if valid_time is not None: - _logger.debug(f"Plotting map for {var} at valid_time {valid_time}") - - da_t = da_t.dropna(dim="ipoint") - assert da_t.size > 0, "Data array must not be empty or contain only NAs" - - name = self.scatter_plot( - da_t, - map_output_dir, - var, - tag=tag, - map_kwargs=dict(map_kwargs.get(var, {})) | map_kwargs_global, - title=f"{self.stream}, {var} : fstep = {self.fstep:03} ({valid_time})", - ) - plot_names.append(name) + groups = da.groupby("valid_time") + else: + _logger.info(f"Creating maps for all valid times of {var} - {tag}") + groups = [(None, da)] # single dummy group + + for valid_time, da_t in groups: + if valid_time is not None: + _logger.debug(f"Plotting map for {var} at valid_time {valid_time}") + + da_t = da_t.dropna(dim="ipoint") + assert da_t.size > 0, "Data array must not be empty or contain only NAs" + + name = self.scatter_plot( + da_t, + map_output_dir, + var, + region, + tag=tag, + map_kwargs=dict(map_kwargs.get(var, {})) | map_kwargs_global, + title=f"{self.stream}, {var} : fstep = {self.fstep:03} ({valid_time})", + ) + plot_names.append(name) self.clean_data_selection() @@ -394,6 +403,7 @@ def scatter_plot( data: xr.DataArray, map_output_dir: Path, varname: str, + regionname: str | None, tag: str = "", map_kwargs: dict | None = None, title: str | None = None, @@ -409,6 +419,8 @@ def scatter_plot( Directory where the map will be saved varname: str Name of the variable to be plotted + regionname: str + Name of the region to be plotted tag: str Any tag you want to add to the plot map_kwargs: dict | None @@ -453,7 +465,12 @@ def scatter_plot( # Create figure and axis objects fig = plt.figure(dpi=self.dpi_val) - ax = fig.add_subplot(1, 1, 1, projection=ccrs.Robinson()) + + proj = ccrs.PlateCarree() + if regionname == "global": + proj = ccrs.Robinson() + + ax = fig.add_subplot(1, 1, 1, projection=proj) ax.coastlines() assert data["lon"].shape == data["lat"].shape == data.shape, ( @@ -476,13 +493,22 @@ def scatter_plot( plt.colorbar(scatter_plt, ax=ax, orientation="horizontal", label=f"Variable: {varname}") plt.title(title) - ax.set_global() + if regionname == "global": + ax.set_global() + else: + region_extent = [ + data["lon"].min().item(), + data["lon"].max().item(), + data["lat"].min().item(), + data["lat"].max().item(), + ] + ax.set_extent(region_extent, crs=ccrs.PlateCarree()) ax.gridlines(draw_labels=False, linestyle="--", color="black", linewidth=1) # TODO: make this nicer parts = ["map", self.run_id, tag] - if self.sample: + if self.sample is not None: parts.append(str(self.sample)) if "valid_time" in data.coords: @@ -499,6 +525,7 @@ def scatter_plot( if self.stream: parts.append(self.stream) + parts.append(regionname) parts.append(varname) if self.fstep is not None: @@ -542,42 +569,45 @@ def animation(self, samples, fsteps, variables, select, tag) -> list[str]: # Convert FPS to duration in milliseconds duration_ms = int(1000 / self.fps) if self.fps > 0 else 400 - for _, sa in enumerate(samples): - for _, var in enumerate(variables): - _logger.info(f"Creating animation for {var} sample: {sa} - {tag}") - image_paths = [] - for _, fstep in enumerate(fsteps): - # TODO: refactor to avoid code duplication with scatter_plot - parts = [ - "map", - self.run_id, - tag, - str(sa), - "*", - self.stream, - var, - "fstep", - str(fstep).zfill(3), - ] - - name = "_".join(filter(None, parts)) - fname = f"{map_output_dir.joinpath(name)}.{self.image_format}" - - names = glob.glob(fname) - image_paths += names - - if image_paths: - images = [Image.open(path) for path in image_paths] - images[0].save( - f"{map_output_dir}/animation_{self.run_id}_{tag}_{sa}_{self.stream}_{var}.gif", - save_all=True, - append_images=images[1:], - duration=duration_ms, - loop=0, - ) - - else: - _logger.warning(f"No images found for animation {var} sample {sa}") + for region in self.regions: + for _, sa in enumerate(samples): + for _, var in enumerate(variables): + _logger.info(f"Creating animation for {var} sample: {sa} - {tag}") + image_paths = [] + for _, fstep in enumerate(fsteps): + # breakpoint() + # TODO: refactor to avoid code duplication with scatter_plot + parts = [ + "map", + self.run_id, + tag, + str(sa), + "*", + self.stream, + region, + var, + "fstep", + str(fstep).zfill(3), + ] + + name = "_".join(filter(None, parts)) + fname = f"{map_output_dir.joinpath(name)}.{self.image_format}" + + names = glob.glob(fname) + image_paths += names + + if image_paths: + images = [Image.open(path) for path in image_paths] + images[0].save( + f"{map_output_dir}/animation_{self.run_id}_{tag}_{sa}_{self.stream}_{region}_{var}.gif", + save_all=True, + append_images=images[1:], + duration=duration_ms, + loop=0, + ) + + else: + _logger.warning(f"No images found for animation {var} sample {sa}") return image_paths @@ -921,6 +951,7 @@ def __init__(self, plotter_cfg: dict, output_basedir: str | Path) -> None: self.dpi_val = plotter_cfg.get("dpi_val") self.improvement = plotter_cfg.get("improvement_scale", 0.2) self.out_plot_dir = Path(output_basedir) / "score_cards" + self.baseline = plotter_cfg.get("baseline") if not os.path.exists(self.out_plot_dir): _logger.info(f"Creating dir {self.out_plot_dir}") os.makedirs(self.out_plot_dir, exist_ok=True) @@ -950,15 +981,24 @@ def plot( tag: Tag to be added to the plot title and filename """ - n_runs, n_vars = len(runs), len(channels) - fig, ax = plt.subplots(figsize=(2 * n_runs, 1.2 * n_vars)) + n_runs = len(runs) + + if self.baseline and self.baseline in runs: + baseline_idx = runs.index(self.baseline) + runs = [runs[baseline_idx]] + runs[:baseline_idx] + runs[baseline_idx + 1 :] + data = [data[baseline_idx]] + data[:baseline_idx] + data[baseline_idx + 1 :] + + common_channels, n_common_channels = self.extract_common_channels(data, channels, n_runs) + + fig, ax = plt.subplots(figsize=(2 * n_runs, 1.2 * n_common_channels)) baseline = data[0] skill_models = [] - for run_index in range(1, n_runs): skill_model = 0.0 - for var_index, var in enumerate(channels): + for var_index, var in enumerate(common_channels): + if var not in data[0].channel.values or var not in data[run_index].channel.values: + continue diff, avg_diff, avg_skill = self.compare_models( data, baseline, run_index, var, metric ) @@ -974,36 +1014,37 @@ def plot( ax.scatter(x, y, marker=triangle, color=color, s=size.values, zorder=3) # Perform Wilcoxon test - stat, p = wilcoxon(diff, alternative=alt) - - # Draw rectangle border for significance - if p < 0.05: - lw = 2 if p < 0.01 else 1 - rect_color = color - rect = plt.Rectangle( - (x - 0.25, y - 0.25), - 0.5, - 0.5, - fill=False, - edgecolor=rect_color, - linewidth=lw, - zorder=2, - ) - ax.add_patch(rect) - - skill_models.append(skill_model / n_vars) + if diff["forecast_step"].item() > 1.0: + stat, p = wilcoxon(diff, alternative=alt) + + # Draw rectangle border for significance + if p < 0.05: + lw = 2 if p < 0.01 else 1 + rect_color = color + rect = plt.Rectangle( + (x - 0.25, y - 0.25), + 0.5, + 0.5, + fill=False, + edgecolor=rect_color, + linewidth=lw, + zorder=2, + ) + ax.add_patch(rect) + + skill_models.append(skill_model / n_common_channels) # Set axis labels ylabels = [ f"{var}\n({baseline.coords['metric'].item().upper()}={baseline.sel(channel=var).mean().values.squeeze():.3f})" - for var in channels + for var in common_channels ] xlabels = [ f"{model_name}\nSkill: {skill_models[i]:.3f}" for i, model_name in enumerate(runs[1::]) ] ax.set_xticks(np.arange(1, n_runs)) ax.set_xticklabels(xlabels, fontsize=10) - ax.set_yticks(np.arange(n_vars) + 0.5) + ax.set_yticks(np.arange(n_common_channels) + 0.5) ax.set_yticklabels(ylabels, fontsize=10) for label in ax.get_yticklabels(): label.set_horizontalalignment("center") @@ -1017,7 +1058,7 @@ def plot( for x in np.arange(0.5, n_runs - 1, 1): ax.axvline(x, color="gray", linestyle="--", linewidth=0.5, zorder=0, alpha=0.5) ax.set_xlim(0.5, n_runs - 0.5) - ax.set_ylim(0, n_vars) + ax.set_ylim(0, n_common_channels) legend = [ Line2D( @@ -1043,6 +1084,17 @@ def plot( ) plt.close(fig) + def extract_common_channels(self, data, channels, n_runs): + common_channels = [] + for run_index in range(1, n_runs): + for var in channels: + if var not in data[0].channel.values or var not in data[run_index].channel.values: + continue + common_channels.append(var) + common_channels = list(set(common_channels)) + n_vars = len(common_channels) + return common_channels, n_vars + def compare_models( self, data: list[xr.DataArray], @@ -1233,6 +1285,7 @@ def __init__(self, plotter_cfg: dict, output_basedir: str | Path) -> None: self.dpi_val = plotter_cfg.get("dpi_val") self.cmap = plotter_cfg.get("cmap", "bwr") self.out_plot_dir = Path(output_basedir) / "bar_plots" + self.baseline = plotter_cfg.get("baseline") _logger.info(f"Saving bar plots to: {self.out_plot_dir}") if not os.path.exists(self.out_plot_dir): _logger.info(f"Creating dir {self.out_plot_dir}") @@ -1273,27 +1326,43 @@ def plot( ) ax = ax.flatten() + if self.baseline and self.baseline in runs: + baseline_idx = runs.index(self.baseline) + runs = [runs[baseline_idx]] + runs[:baseline_idx] + runs[baseline_idx + 1 :] + data = [data[baseline_idx]] + data[:baseline_idx] + data[baseline_idx + 1 :] + for run_index in range(1, len(runs)): ratio_score, channels_per_comparison = self.calc_ratio_per_run_id( data, channels, run_index ) - - ax[run_index - 1].barh( - np.arange(len(ratio_score)), - ratio_score, - color=self.colors(ratio_score, metric), - align="center", - edgecolor="black", - linewidth=0.5, - ) - ax[run_index - 1].set_yticks( - np.arange(len(ratio_score)), labels=channels_per_comparison - ) - ax[run_index - 1].invert_yaxis() - ax[run_index - 1].set_xlabel( - f"Relative {data[0].coords['metric'].item().upper()}: " - f"Target Model ({runs[run_index]}) / Reference Model ({runs[0]})" - ) + if len(ratio_score) > 0: + ax[run_index - 1].barh( + np.arange(len(ratio_score)), + ratio_score, + color=self.colors(ratio_score, metric), + align="center", + edgecolor="black", + linewidth=0.5, + ) + ax[run_index - 1].set_yticks( + np.arange(len(ratio_score)), labels=channels_per_comparison + ) + ax[run_index - 1].invert_yaxis() + ax[run_index - 1].set_xlabel( + f"Relative {data[0].coords['metric'].item().upper()}: " + f"Target Model ({runs[run_index]}) / Reference Model ({runs[0]})" + ) + else: + ax[run_index - 1].set_visible(False) # or annotate as missing + # Or show a message: + ax[run_index - 1].text( + 0.5, + 0.5, + "No Data", + ha="center", + va="center", + transform=ax[run_index - 1].transAxes, + ) _logger.info(f"Saving bar plots to: {self.out_plot_dir}") parts = ["bar_plot_compare", tag] + runs @@ -1403,9 +1472,7 @@ def calculate_average_over_dim( ] if non_zero_dims: - _logger.info( - f"LinePlot:: Found multiple entries for dimensions: {non_zero_dims}. Averaging..." - ) + _logger.info(f"Found multiple entries for dimensions: {non_zero_dims}. Averaging...") baseline_score = baseline_var.mean( dim=[dim for dim in baseline_var.dims if dim != x_dim], skipna=True @@ -1417,4 +1484,4 @@ def calculate_average_over_dim( def lower_is_better(metric: str) -> bool: # Determine whether lower or higher is better - return metric in {"l1", "l2", "mse", "rmse", "vrmse", "bias", "crps", "spread"} + return metric in {"l1", "l2", "mae", "mse", "rmse", "vrmse", "bias", "crps", "spread"} diff --git a/packages/evaluate/src/weathergen/evaluate/run_evaluation.py b/packages/evaluate/src/weathergen/evaluate/run_evaluation.py index 3bf198d07..0c0313419 100755 --- a/packages/evaluate/src/weathergen/evaluate/run_evaluation.py +++ b/packages/evaluate/src/weathergen/evaluate/run_evaluation.py @@ -9,26 +9,31 @@ # weathergen-evaluate = { path = "../../../../../packages/evaluate" } # /// +# Standard library import argparse import logging +import multiprocessing as mp import sys from collections import defaultdict +from logging.handlers import QueueHandler, QueueListener from pathlib import Path +# Third-party import mlflow from mlflow.client import MlflowClient -from omegaconf import OmegaConf -from xarray import DataArray +from omegaconf import DictConfig, OmegaConf +# Local application / package from weathergen.common.config import _REPO_ROOT from weathergen.common.platform_env import get_platform_env -from weathergen.evaluate.io_reader import CsvReader, WeatherGenReader -from weathergen.evaluate.plot_utils import collect_channels -from weathergen.evaluate.utils import ( +from weathergen.evaluate.io.csv_reader import CsvReader +from weathergen.evaluate.io.wegen_reader import WeatherGenReader +from weathergen.evaluate.plotting.plot_utils import collect_channels +from weathergen.evaluate.utils.utils import ( calc_scores_per_stream, - metric_list_to_json, plot_data, plot_summary, + triple_nested_dict, ) from weathergen.metrics.mlflow_utils import ( MlFlowUpload, @@ -37,21 +42,75 @@ setup_mlflow, ) -_logger = logging.getLogger(__name__) - _DEFAULT_PLOT_DIR = _REPO_ROOT / "plots" +_logger = logging.getLogger(__name__) _platform_env = get_platform_env() -def evaluate() -> None: - # By default, arguments from the command line are read. - evaluate_from_args(sys.argv[1:]) +def setup_main_logger(log_file: str | None, log_queue: mp.Queue) -> QueueListener: + """Set up main process logger with QueueListener + + Parameters + ---------- + log_file: str + Name of + """ + + console_handler = logging.StreamHandler() + console_handler.setFormatter( + logging.Formatter("%(asctime)s [%(processName)s] %(levelname)s: %(message)s") + ) + + handlers: list[logging.Handler] = [console_handler] + if log_file: + file_handler = logging.FileHandler(log_file) + file_handler.setFormatter( + logging.Formatter("%(asctime)s [%(processName)s] %(levelname)s: %(message)s") + ) + handlers.append(file_handler) + + listener = QueueListener(log_queue, *handlers) + listener.start() + return listener -def evaluate_from_args(argl: list[str]) -> None: +def setup_worker_logger(log_queue: mp.Queue) -> logging.Logger: + """""" + qh = QueueHandler(log_queue) + logger = logging.getLogger() + logger.setLevel(logging.INFO) + logger.handlers.clear() + logger.addHandler(qh) + return logger + + +################################################################# + + +def evaluate() -> None: + """entry point for evaluation script.""" + # By default, arguments from the command line are read. + log_queue: mp.Queue = mp.Queue() + listener = setup_main_logger("evaluation.log", log_queue) + try: + evaluate_from_args(sys.argv[1:], log_queue) + finally: + listener.stop() + log_queue.close() + log_queue.join_thread() + + +def evaluate_from_args(argl: list[str], log_queue: mp.Queue) -> None: + """ + Wrapper of evaluate_from_config. + + Parameters + ---------- + argl: + List of arguments passed from terminal + """ # configure logging - logging.basicConfig(level=logging.INFO) parser = argparse.ArgumentParser(description="Fast evaluation of WeatherGenerator runs.") parser.add_argument( "--config", @@ -80,123 +139,175 @@ def evaluate_from_args(argl: list[str]) -> None: assert hpc_conf is not None private_home = Path(hpc_conf) private_cf = OmegaConf.load(private_home) + assert isinstance(private_cf, DictConfig) mlflow_client = setup_mlflow(private_cf) _logger.info(f"MLFlow client set up: {mlflow_client}") - evaluate_from_config(OmegaConf.load(config), mlflow_client) + cf = OmegaConf.load(config) + assert isinstance(cf, DictConfig) + evaluate_from_config(cf, mlflow_client, log_queue) + + +def _process_stream_wrapper( + args: dict[str, object], +) -> tuple[str, str, dict[str, dict[str, dict[str, float]]]]: + return _process_stream(**args) + + +def _process_stream( + run_id: str, + run: dict, + stream: str, + private_paths: dict[str, str], + global_plotting_opts: dict[str, object], + regions: list[str], + metrics: list[str], + plot_score_maps: bool, +) -> tuple[str, str, dict[str, dict[str, dict[str, float]]]]: + """ + Worker function for a single stream of a single run. + Returns a dictionary with the scores instead of modifying shared dict. + Parameters + ---------- + + run_id: + Run identification string. + run: + Configuration dictionary for the given run. + stream: + String to be processed + private_paths: + List of private paths to be used to retrieve directories + global_plotting_opts: + Dictionary containing all common plotting options + regions: + List of regions to be processed. + metrics: + List of metrics to be processed. + plot_score_maps: + Bool to define if the score maps need to be plotted or not. + + """ + try: + type_ = run.get("type", "zarr") + reader = ( + WeatherGenReader(run, run_id, private_paths) + if type_ == "zarr" + else CsvReader(run, run_id, private_paths) + ) + stream_dict = reader.get_stream(stream) + if not stream_dict: + return run_id, stream, {} -def evaluate_from_config(cfg, mlflow_client: MlflowClient | None) -> None: - # load configuration + # Parallel plotting + if stream_dict.get("plotting"): + plot_data(reader, stream, global_plotting_opts) - runs = cfg.run_ids + # Scoring per stream + if not stream_dict.get("evaluation"): + return run_id, stream, {} - _logger.info(f"Detected {len(runs)} runs") + stream_scores = calc_scores_per_stream(reader, stream, regions, metrics, plot_score_maps) + + return run_id, stream, stream_scores - # Directory to store the summary plots - private_paths = cfg.get("private_paths", None) - summary_dir = Path( - cfg.evaluation.get("summary_dir", _DEFAULT_PLOT_DIR) - ) # base directory where summary plots will be stored + except Exception as e: + _logger.error(f"Error processing {run_id} - {stream}: {e}") + return run_id, stream, {} + +# Weird typing error from python: mp.Queue is seen as a method with a "|" operator => this fai +def evaluate_from_config( + cfg: dict, mlflow_client: MlflowClient | None, log_queue: "mp.Queue | None" +) -> None: + """ + Main function that controls evaluation plotting and scoring. + Parameters + ---------- + cfg: + Configuration input stored as dictionary. + """ + runs = cfg.run_ids + _logger.info(f"Detected {len(runs)} runs") + private_paths = cfg.get("private_paths") + summary_dir = Path(cfg.evaluation.get("summary_dir", _DEFAULT_PLOT_DIR)) metrics = cfg.evaluation.metrics regions = cfg.evaluation.get("regions", ["global"]) plot_score_maps = cfg.evaluation.get("plot_score_maps", False) - global_plotting_opts = cfg.get("global_plotting_options", {}) + use_parallel = cfg.evaluation.get("num_processes", 0) + if use_parallel == "auto": + num_processes = mp.cpu_count() + elif isinstance(use_parallel, int): + if use_parallel > 0: + num_processes = min(use_parallel, mp.cpu_count()) + else: + # Using the main process only + num_processes = 0 + else: + raise ValueError("parallel option must be 'auto' or an non-negative integer") - # to get a structure like: scores_dict[metric][region][stream][run_id] = plot - scores_dict = defaultdict(lambda: defaultdict(lambda: defaultdict(dict))) + if num_processes > 1: + _logger.info("Using %d processes for evaluation", num_processes) + else: + _logger.info("Using main process for evaluation") - for run_id, run in runs.items(): - _logger.info(f"RUN {run_id}: Getting data...") - - type = run.get("type", "zarr") - if type == "zarr": - reader = WeatherGenReader(run, run_id, private_paths) - elif type == "csv": - reader = CsvReader(run, run_id, private_paths) - else: - raise ValueError(f"Unknown run type {type} for run {run_id}. Supported: zarr, csv.") + scores_dict = defaultdict(triple_nested_dict) # metric -> region -> stream -> run + tasks = [] + # Build tasks per stream + for run_id, run in runs.items(): + type_ = run.get("type", "zarr") + reader = ( + WeatherGenReader(run, run_id, private_paths) + if type_ == "zarr" + else CsvReader(run, run_id, private_paths) + ) for stream in reader.streams: - _logger.info(f"RUN {run_id}: Processing stream {stream}...") - - stream_dict = reader.get_stream(stream) - if not stream_dict: - _logger.info( - f"Stream {stream} does not exist in source data or config file is empty. " - "Skipping." - ) - continue - - if stream_dict.get("plotting"): - _logger.info(f"RUN {run_id}: Plotting stream {stream}...") - _ = plot_data(reader, stream, global_plotting_opts) - - if stream_dict.get("evaluation"): - _logger.info(f"Retrieve or compute scores for {run_id} - {stream}...") - - for region in regions: - metrics_to_compute = [] - - for metric in metrics: - metric_data = reader.load_scores( - stream, - region, - metric, - ) - - if metric_data is None or plot_score_maps: - metrics_to_compute.append(metric) - continue - - available_data = reader.check_availability( - stream, metric_data, mode="evaluation" - ) - - if not available_data.score_availability: - metrics_to_compute.append(metric) - else: - # simply select the chosen eval channels, samples, fsteps here... - scores_dict[metric][region][stream][run_id] = metric_data.sel( - sample=available_data.samples, - channel=available_data.channels, - forecast_step=available_data.fsteps, - ) - - if metrics_to_compute: - all_metrics, points_per_sample = calc_scores_per_stream( - reader, stream, region, metrics_to_compute, plot_score_maps - ) - - metric_list_to_json( - reader, - [all_metrics], - [points_per_sample], - [stream], - region, - ) - - for metric in metrics_to_compute: - scores_dict[metric][region][stream][run_id] = all_metrics.sel( - {"metric": metric} - ) + tasks.append( + { + "run_id": run_id, + "run": run, + "stream": stream, + "private_paths": private_paths, + "global_plotting_opts": global_plotting_opts, + "regions": regions, + "metrics": metrics, + "plot_score_maps": plot_score_maps, + } + ) - if mlflow_client: - # Reorder scores_dict to push to MLFlow per run_id: - # Create a new defaultdict with the target structure: [run_id][metric][region][stream] - reordered_dict: dict[str, dict[str, dict[str, dict[str, DataArray]]]] = defaultdict( - lambda: defaultdict(lambda: defaultdict(dict)) - ) + scores_dict = defaultdict(lambda: defaultdict(lambda: defaultdict(dict))) + if num_processes == 0: + if log_queue is not None: + setup_worker_logger(log_queue) + results = [_process_stream(**task) for task in tasks] + else: + with mp.Pool( + processes=num_processes, + initializer=setup_worker_logger, + initargs=(log_queue,), + ) as pool: + results = pool.map( + _process_stream_wrapper, + tasks, + ) + + for _, stream, stream_scores in results: + for metric, regions_dict in stream_scores.items(): + for region, streams_dict in regions_dict.items(): + for stream, runs_dict in streams_dict.items(): + scores_dict[metric][region][stream].update(runs_dict) - # Iterate through the original dictionary to get all keys and the final value + # MLFlow logging + if mlflow_client: + reordered_dict = defaultdict(triple_nested_dict) for metric, regions_dict in scores_dict.items(): for region, streams_dict in regions_dict.items(): for stream, runs_dict in streams_dict.items(): - for run_id, final_dict in runs_dict.items(): - # Assign the final_dict to the new structure using the reordered keys - reordered_dict[run_id][metric][region][stream] = final_dict + for run_id, data in runs_dict.items(): + reordered_dict[run_id][metric][region][stream] = data channels_set = collect_channels(scores_dict, metric, region, runs) @@ -211,18 +322,18 @@ def evaluate_from_config(cfg, mlflow_client: MlflowClient | None) -> None: run_name=f"{phase}_{from_run_id}_{run_id}", parent_run_id=parent_run.info.run_id, nested=True, - ) as run: + ) as mlflow_run: mlflow.set_tags(MlFlowUpload.run_tags(run_id, phase, from_run_id)) log_scores( reordered_dict[run_id], mlflow_client, - run.info.run_id, + mlflow_run.info.run_id, channels_set, ) - # plot summary + # summary plots if scores_dict and cfg.evaluation.get("summary_plots", True): - _logger.info("Started creating summary plots..") + _logger.info("Started creating summary plots...") plot_summary(cfg, scores_dict, summary_dir) diff --git a/packages/evaluate/src/weathergen/evaluate/score.py b/packages/evaluate/src/weathergen/evaluate/scores/score.py similarity index 99% rename from packages/evaluate/src/weathergen/evaluate/score.py rename to packages/evaluate/src/weathergen/evaluate/scores/score.py index 88d48a098..3099b0bd4 100755 --- a/packages/evaluate/src/weathergen/evaluate/score.py +++ b/packages/evaluate/src/weathergen/evaluate/scores/score.py @@ -16,7 +16,7 @@ import xarray as xr from scipy.spatial import cKDTree -from weathergen.evaluate.score_utils import to_list +from weathergen.evaluate.scores.score_utils import to_list # from common.io import MockIO @@ -168,7 +168,7 @@ def __init__( Returns ------- """ - self._agg_dims_in = self._validate_agg_dims(agg_dims) + self._agg_dims = self._validate_agg_dims(agg_dims) self._ens_dim = self._validate_ens_dim(ens_dim) self.det_metrics_dict = { @@ -257,18 +257,17 @@ def get_score( }" ) - if self._agg_dims_in == "all": + if self._agg_dims == "all": # Aggregate over all dimensions of the prediction data self._agg_dims = list(data.prediction.dims) else: # Check if _agg_dims is in prediction data - for dim in self._agg_dims_in: + for dim in self._agg_dims: if dim not in data.prediction.dims: raise ValueError( f"Average dimension '{dim}' not found in prediction data " f"dimensions: {data.prediction.dims}" ) - self._agg_dims = self._agg_dims_in arg_names: list[str] = inspect.getfullargspec(f).args[1:] diff --git a/packages/evaluate/src/weathergen/evaluate/scores/score_utils.py b/packages/evaluate/src/weathergen/evaluate/scores/score_utils.py new file mode 100644 index 000000000..4ad793d07 --- /dev/null +++ b/packages/evaluate/src/weathergen/evaluate/scores/score_utils.py @@ -0,0 +1,38 @@ +# (C) Copyright 2025 WeatherGenerator contributors. +# +# This software is licensed under the terms of the Apache Licence Version 2.0 +# which can be obtained at http://www.apache.org/licenses/LICENSE-2.0. +# +# In applying this licence, ECMWF does not waive the privileges and immunities +# granted to it by virtue of its status as an intergovernmental organisation +# nor does it submit to any jurisdiction. + +# Standard library +import logging +from typing import Any + +# Third-party +from omegaconf.listconfig import ListConfig + +_logger = logging.getLogger(__name__) +_logger.setLevel(logging.INFO) + + +def to_list(obj: Any) -> list: + """ + Convert given object to list if obj is not already a list. Sets are also transformed to a list. + + Parameters + ---------- + obj : Any + The object to transform into a list. + Returns + ------- + list + A list containing the object, or the object itself if it was already a list. + """ + if isinstance(obj, set | tuple | ListConfig): + obj = list(obj) + elif not isinstance(obj, list): + obj = [obj] + return obj diff --git a/packages/evaluate/src/weathergen/evaluate/clim_utils.py b/packages/evaluate/src/weathergen/evaluate/utils/clim_utils.py similarity index 100% rename from packages/evaluate/src/weathergen/evaluate/clim_utils.py rename to packages/evaluate/src/weathergen/evaluate/utils/clim_utils.py diff --git a/packages/evaluate/src/weathergen/evaluate/derived_channels.py b/packages/evaluate/src/weathergen/evaluate/utils/derived_channels.py similarity index 100% rename from packages/evaluate/src/weathergen/evaluate/derived_channels.py rename to packages/evaluate/src/weathergen/evaluate/utils/derived_channels.py diff --git a/packages/evaluate/src/weathergen/evaluate/score_utils.py b/packages/evaluate/src/weathergen/evaluate/utils/regions.py similarity index 86% rename from packages/evaluate/src/weathergen/evaluate/score_utils.py rename to packages/evaluate/src/weathergen/evaluate/utils/regions.py index a6339d009..db631a6af 100644 --- a/packages/evaluate/src/weathergen/evaluate/score_utils.py +++ b/packages/evaluate/src/weathergen/evaluate/utils/regions.py @@ -9,35 +9,14 @@ import logging from dataclasses import dataclass -from typing import Any, ClassVar +from typing import ClassVar import xarray as xr -from omegaconf.listconfig import ListConfig _logger = logging.getLogger(__name__) _logger.setLevel(logging.INFO) -def to_list(obj: Any) -> list: - """ - Convert given object to list if obj is not already a list. Sets are also transformed to a list. - - Parameters - ---------- - obj : Any - The object to transform into a list. - Returns - ------- - list - A list containing the object, or the object itself if it was already a list. - """ - if isinstance(obj, set | tuple | ListConfig): - obj = list(obj) - elif not isinstance(obj, list): - obj = [obj] - return obj - - class RegionLibrary: """ Predefined bounding boxes for known regions. @@ -48,6 +27,7 @@ class RegionLibrary: "nhem": (0.0, 90.0, -180.0, 180.0), "shem": (-90.0, 0.0, -180.0, 180.0), "tropics": (-30.0, 30.0, -180.0, 180.0), + "belgium": (49, 52, 2, 7), } diff --git a/packages/evaluate/src/weathergen/evaluate/utils.py b/packages/evaluate/src/weathergen/evaluate/utils/utils.py similarity index 78% rename from packages/evaluate/src/weathergen/evaluate/utils.py rename to packages/evaluate/src/weathergen/evaluate/utils/utils.py index 09ff2e9b7..0ecf9293a 100644 --- a/packages/evaluate/src/weathergen/evaluate/utils.py +++ b/packages/evaluate/src/weathergen/evaluate/utils/utils.py @@ -7,24 +7,29 @@ # granted to it by virtue of its status as an intergovernmental organisation # nor does it submit to any jurisdiction. +# Standard library import json import logging +from collections import defaultdict from pathlib import Path +# Third-party import numpy as np import omegaconf as oc import xarray as xr from tqdm import tqdm -from weathergen.evaluate.clim_utils import get_climatology -from weathergen.evaluate.io_reader import Reader -from weathergen.evaluate.plot_utils import ( +# Local application / package +from weathergen.evaluate.io.io_reader import Reader +from weathergen.evaluate.plotting.plot_utils import ( bar_plot_metric_region, plot_metric_region, score_card_metric_region, ) -from weathergen.evaluate.plotter import BarPlots, LinePlots, Plotter, ScoreCards -from weathergen.evaluate.score import VerifiedData, get_score +from weathergen.evaluate.plotting.plotter import BarPlots, LinePlots, Plotter, ScoreCards +from weathergen.evaluate.scores.score import VerifiedData, get_score +from weathergen.evaluate.utils.clim_utils import get_climatology +from weathergen.evaluate.utils.regions import RegionBoundingBox _logger = logging.getLogger(__name__) _logger.setLevel(logging.INFO) @@ -47,13 +52,7 @@ def get_next_data(fstep, da_preds, da_tars, fsteps): return preds_next, tars_next -def calc_scores_per_stream( - reader: Reader, - stream: str, - region: str, - metrics: list[str], - plot_score_maps: bool = False, -) -> tuple[xr.DataArray, xr.DataArray]: +def calc_scores_per_stream(reader, stream, regions, metrics, plot_score_maps=False): """ Calculate scores for a given run and stream using the specified metrics. @@ -63,8 +62,10 @@ def calc_scores_per_stream( Reader object containing all info about a particular run. stream : Stream name to calculate scores for. - region : - Region name to calculate scores for. + scores_dict: + Dictionary for scores with structure scores_dict[metric][region][stream][run_id] + regions : + List of regions to calculate scores on. metrics : List of metric names to calculate. plot_score_maps : @@ -75,8 +76,9 @@ def calc_scores_per_stream( the "ipoint" dimension. Returns ------- - Tuple of xarray DataArray containing the scores and the number of points per sample. + Dictionary containing scores for each metric and stream. """ + local_scores = {} # top-level dict: metric -> region -> stream -> run_id _logger.info(f"RUN {reader.run_id} - {stream}: Calculating scores for metrics {metrics}...") if plot_score_maps: @@ -88,7 +90,6 @@ def calc_scores_per_stream( _logger.info(f"RUN {reader.run_id} - {stream}: Saving plotted scores to {map_dir}") available_data = reader.check_availability(stream, mode="evaluation") - fsteps = available_data.fsteps samples = available_data.samples channels = available_data.channels @@ -98,102 +99,109 @@ def calc_scores_per_stream( output_data = reader.get_data( stream, - region=region, fsteps=fsteps, samples=samples, channels=channels, ensemble=ensemble, return_counts=True, ) - da_preds = output_data.prediction da_tars = output_data.target - points_per_sample = output_data.points_per_sample aligned_clim_data = get_climatology(reader, da_tars, stream) - metric_stream = xr.DataArray( - np.full( - (len(samples), len(fsteps), len(channels), len(metrics), len(ensemble)), - np.nan, - ), - coords={ - "sample": samples, - "forecast_step": fsteps, - "channel": channels, - "metric": metrics, - "ens": ensemble, - }, - ) - - for (fstep, tars), (_, preds) in zip(da_tars.items(), da_preds.items(), strict=False): - if preds.ipoint.size == 0: - _logger.warning( - f"No data for stream {stream} at fstep {fstep} in region {region}. Skipping." - ) - continue - - _logger.debug(f"Verifying data for stream {stream}...") - - preds_next, tars_next = get_next_data(fstep, da_preds, da_tars, fsteps) - - climatology = aligned_clim_data[fstep] if aligned_clim_data else None - score_data = VerifiedData(preds, tars, preds_next, tars_next, climatology) - # Build up computation graphs for all metrics - _logger.debug(f"Build computation graphs for metrics for stream {stream}...") - - # Add it only if it is not None - valid_scores = [] - for metric in metrics: - score = get_score( - score_data, - metric, - agg_dims="ipoint", - group_by_coord=group_by_coord, - ) - if score is not None: - valid_scores.append(score) - - # Keep only metrics corresponding to valid_scores - valid_metric_names = [ - metric - for metric, score in zip(metrics, valid_scores, strict=False) - if score is not None - ] + for region in regions: + bbox = RegionBoundingBox.from_region_name(region) + + metric_stream = xr.DataArray( + np.full( + (len(samples), len(fsteps), len(channels), len(metrics), len(ensemble)), + np.nan, + ), + coords={ + "sample": samples, + "forecast_step": fsteps, + "channel": channels, + "metric": metrics, + "ens": ensemble, + }, + ) - combined_metrics = xr.concat(valid_scores, dim="metric") - combined_metrics = combined_metrics.assign_coords(metric=valid_metric_names) + for (fstep, tars), (_, preds) in zip(da_tars.items(), da_preds.items(), strict=False): + if preds.ipoint.size == 0: + _logger.warning( + f"No data for stream {stream} at fstep {fstep} in region {region}. Skipping." + ) + continue - _logger.debug(f"Running computation of metrics for stream {stream}...") - combined_metrics = combined_metrics.compute() + _logger.debug(f"Verifying data for stream {stream}...") - for coord in ["channel", "sample", "ens"]: - combined_metrics = scalar_coord_to_dim(combined_metrics, coord) + preds_next, tars_next = get_next_data(fstep, da_preds, da_tars, fsteps) - assert int(combined_metrics.forecast_step) == int(fstep), ( - "Different steps in data and metrics. Please check." - ) + if region != "global": + _logger.debug( + f"Applying bounding box mask for region '{region}' to targets and predictions." + ) - criteria = { - "forecast_step": int(combined_metrics.forecast_step), - "sample": combined_metrics.sample, - "channel": combined_metrics.channel, - "metric": combined_metrics.metric, - } + tars, preds, tars_next, preds_next = [ + bbox.apply_mask(x) if x is not None else None + for x in (tars, preds, tars_next, preds_next) + ] + climatology = aligned_clim_data[fstep] if aligned_clim_data else None + score_data = VerifiedData(preds, tars, preds_next, tars_next, climatology) + # Build up computation graphs for all metrics + _logger.debug(f"Build computation graphs for metrics for stream {stream}...") + + # Add it only if it is not None + valid_scores = [] + for metric in metrics: + score = get_score( + score_data, metric, agg_dims="ipoint", group_by_coord=group_by_coord + ) + if score is not None: + valid_scores.append(score) + + valid_metric_names = [ + metric + for metric, score in zip(metrics, valid_scores, strict=False) + if score is not None + ] + if not valid_scores: + continue + + combined_metrics = xr.concat(valid_scores, dim="metric") + combined_metrics = combined_metrics.assign_coords(metric=valid_metric_names) + combined_metrics = combined_metrics.compute() + + for coord in ["channel", "sample", "ens"]: + combined_metrics = scalar_coord_to_dim(combined_metrics, coord) + + criteria = { + "forecast_step": int(fstep), + "sample": combined_metrics.sample, + "channel": combined_metrics.channel, + "metric": combined_metrics.metric, + } + if "ens" in combined_metrics.dims: + criteria["ens"] = combined_metrics.ens - if "ens" in combined_metrics.dims: - criteria["ens"] = combined_metrics.ens - metric_stream.loc[criteria] = combined_metrics + metric_stream.loc[criteria] = combined_metrics - ######### + if is_regular and plot_score_maps: + _logger.info(f"Plotting scores on a map {stream} - forecast step: {fstep}...") + _plot_score_maps_per_stream( + reader, map_dir, stream, region, score_data, metrics, fstep + ) - if is_regular and plot_score_maps: - _logger.info(f"Plotting scores on a map {stream} - forecast step: {fstep}...") - _plot_score_maps_per_stream(reader, map_dir, stream, region, score_data, metrics, fstep) + _logger.info(f"Scores for run {reader.run_id} - {stream} calculated successfully.") - _logger.info(f"Scores for run {reader.run_id} - {stream} calculated successfully.") + # Build local dictionary for this region + for metric in metrics: + local_scores.setdefault(metric, {}).setdefault(region, {}).setdefault(stream, {})[ + reader.run_id + ] = metric_stream.sel({"metric": metric}) - return metric_stream, points_per_sample + return local_scores def _plot_score_maps_per_stream( @@ -262,7 +270,7 @@ def _plot_score_maps_per_stream( for metric in plot_metrics.coords["metric"].values: for ens_val in tqdm(ens_values, f"Plotting metric - {metric}"): - tag = f"score_maps_{region}_{metric}_fstep_{fstep}" + ( + tag = f"score_maps_{metric}_fstep_{fstep}" + ( f"_ens_{ens_val}" if ens_val is not None else "" ) for channel in plot_metrics.coords["channel"].values: @@ -274,7 +282,7 @@ def _plot_score_maps_per_stream( title = f"{metric} - {channel}: fstep {fstep}" + ( f", ens {ens_val}" if ens_val is not None else "" ) - plotter.scatter_plot(data, map_dir, channel, tag=tag, title=title) + plotter.scatter_plot(data, map_dir, channel, region, tag=tag, title=title) def plot_data(reader: Reader, stream: str, global_plotting_opts: dict) -> None: @@ -314,9 +322,9 @@ def plot_data(reader: Reader, stream: str, global_plotting_opts: dict) -> None: "dpi_val": global_plotting_opts.get("dpi_val", 300), "fig_size": global_plotting_opts.get("fig_size", (8, 10)), "fps": global_plotting_opts.get("fps", 2), + "regions": global_plotting_opts.get("regions", ["global"]), "plot_subtimesteps": reader.get_inference_stream_attr(stream, "tokenize_spacetime", False), } - plotter = Plotter(plotter_cfg, reader.runplot_dir) available_data = reader.check_availability(stream, mode="plotting") @@ -501,6 +509,7 @@ def plot_summary(cfg: dict, scores_dict: dict, summary_dir: Path): "log_scale": eval_opt.get("log_scale", False), "add_grid": eval_opt.get("add_grid", False), "plot_ensemble": eval_opt.get("plot_ensemble", False), + "baseline": eval_opt.get("baseline", None), } plotter = LinePlots(plot_cfg, summary_dir) @@ -645,3 +654,13 @@ def scalar_coord_to_dim(da: xr.DataArray, name: str, axis: int = -1) -> xr.DataA da = da.drop_vars(name) da = da.expand_dims({name: [val]}, axis=axis) return da + + +def nested_dict(): + """Two-level nested dict factory: dict[key1][key2] = value""" + return defaultdict(dict) + + +def triple_nested_dict(): + """Three-level nested dict factory: dict[key1][key2][key3] = value""" + return defaultdict(nested_dict) From a6e07a40071fd24ae31fc5902e3c594e4b743236 Mon Sep 17 00:00:00 2001 From: Sorcha Owens <73587207+enssow@users.noreply.github.com> Date: Wed, 10 Dec 2025 08:23:33 +0000 Subject: [PATCH 12/24] [1440] update config_zarr2cf.yaml (#1441) * update config * linting * Revert "update config" This reverts commit fbf35c6475dedac0af45571e5bc28d4c1e92c903. * Revert "linting" This reverts commit 94dd2cc2a3d25654bffb73c3902f5f93d532b9ba. * messed up revert --- config/evaluate/config_zarr2cf.yaml | 41 ++++++++++++++--------------- 1 file changed, 20 insertions(+), 21 deletions(-) diff --git a/config/evaluate/config_zarr2cf.yaml b/config/evaluate/config_zarr2cf.yaml index cb111954a..3f28cc0b1 100644 --- a/config/evaluate/config_zarr2cf.yaml +++ b/config/evaluate/config_zarr2cf.yaml @@ -3,39 +3,40 @@ variables: q: var: q - long: specific_humidity_on_pressure_level + long: specific_humidity_at_pressure_levels std: specific_humidity wg_unit: kg kg**-1 std_unit: kg kg-1 level_type: pl t: var: t - long: temperature_on_pressure_levels + long: temperature_at_pressure_levels std: air_temperature wg_unit: K std_unit: K level_type: pl u: var: u - long: u_wind_on_pressure_levels + long: u_wind_at_pressure_levels std: x_wind wg_unit: m s**-1 std_unit: m s-1 level_type: pl v: var: v - long: v_wind_on_pressure_levels + long: v_wind_at_pressure_levels std: y_wind wg_unit: m s**-1 std_unit: m s-1 level_type: pl z: var: z - long: geopotential_height_on_pressure_levels - std: geopotential_height + long: geopotential_at_pressure_levels + std: geopotential wg_unit: m**2 s**-2 - std_unit: m + std_unit: m2 s-2 level_type: pl + scale_factor: 1/9.80665 10u: var: u10 long: u_wind_at_10m @@ -52,14 +53,14 @@ variables: level_type: sfc 2d: var: d2m - long: 2m_dewpoint_temperature + long: dew_point_temperature_at_screen_level std: dew_point_temperature wg_unit: K std_unit: K level_type: sfc 2t: var: t2m - long: 2m_temperature + long: temperature_at_screen_level std: air_temperature #near-surface (usually, 2 meter) : https://pcmdi.llnl.gov/mips/cmip3/variableList.html wg_unit: K @@ -97,7 +98,7 @@ coordinates: lon: longitude stream: stream forecast_step: forecast_period - forecast_ref_time: forecast_ref_time + forecast_reference_time: forecast_reference_time ncells: ncells pl: pressure_level: pressure @@ -106,7 +107,7 @@ coordinates: lon: longitude stream: stream forecast_step: forecast_period - forecast_ref_time: forecast_ref_time + forecast_reference_time: forecast_reference_time ncells: ncells dimensions: @@ -121,23 +122,21 @@ dimensions: wg: longitude std: longitude std_unit: degrees_east - pressure_level: + pressure_level: wg: pressure - std: air_pressure + std: pressure std_unit: hPa - forecast_ref_time: - wg: forecast_ref_time - std: forecast_ref_time + forecast_reference_time: + wg: forecast_reference_time + std: forecast_reference_time forecast_step: wg: forecast_period std: forecast_period + long: time since forecast_reference_time + std_unit: hours stream: wg: stream std: stream ncells: wg: ncells - std: ncells - #TODO maybe absorb stream as an attribute - # forecast_step: forecast_step - # don't want forecast_step anyway - + std: ncells \ No newline at end of file From 3404bd65d576ce53f86d07e94f50c7061a1c5c1a Mon Sep 17 00:00:00 2001 From: wesselkamp1 Date: Tue, 16 Dec 2025 15:41:50 +0100 Subject: [PATCH 13/24] initial data reader on juwels --- config/streams/seviri_lst/era5.yml | 36 ++++++++++ config/streams/seviri_lst/seviri_lst.yml | 18 +++-- src/weathergen/datasets/data_reader_base.py | 1 + src/weathergen/datasets/data_reader_seviri.py | 71 +++++++++++++------ .../datasets/multi_stream_data_sampler.py | 3 + 5 files changed, 104 insertions(+), 25 deletions(-) create mode 100644 config/streams/seviri_lst/era5.yml diff --git a/config/streams/seviri_lst/era5.yml b/config/streams/seviri_lst/era5.yml new file mode 100644 index 000000000..89b07f1f3 --- /dev/null +++ b/config/streams/seviri_lst/era5.yml @@ -0,0 +1,36 @@ +# (C) Copyright 2024 WeatherGenerator contributors. +# +# This software is licensed under the terms of the Apache Licence Version 2.0 +# which can be obtained at http://www.apache.org/licenses/LICENSE-2.0. +# +# In applying this licence, ECMWF does not waive the privileges and immunities +# granted to it by virtue of its status as an intergovernmental organisation +# nor does it submit to any jurisdiction. + +ERA5 : + type : anemoi + filenames : ['aifs-ea-an-oper-0001-mars-o96-1979-2022-6h-v6.zarr'] + source_exclude : ['w_', 'skt', 'tcw', 'cp', 'tp'] + target_exclude : ['w_', 'slor', 'sdor', 'tcw', 'cp', 'tp'] + loss_weight : 1. + masking_rate : 0.6 + masking_rate_none : 0.05 + token_size : 8 + tokenize_spacetime : True + max_num_targets: -1 + embed : + net : transformer + num_tokens : 1 + num_heads : 8 + dim_embed : 256 + num_blocks : 2 + embed_target_coords : + net : linear + dim_embed : 256 + target_readout : + type : 'obs_value' # token or obs_value + num_layers : 2 + num_heads : 4 + pred_head : + ens_size : 1 + num_layers : 1 diff --git a/config/streams/seviri_lst/seviri_lst.yml b/config/streams/seviri_lst/seviri_lst.yml index 9a185fa22..b3f6aeec4 100644 --- a/config/streams/seviri_lst/seviri_lst.yml +++ b/config/streams/seviri_lst/seviri_lst.yml @@ -1,13 +1,23 @@ +# (C) Copyright 2024 WeatherGenerator contributors. +# +# This software is licensed under the terms of the Apache Licence Version 2.0 +# which can be obtained at http://www.apache.org/licenses/LICENSE-2.0. +# +# In applying this licence, ECMWF does not waive the privileges and immunities +# granted to it by virtue of its status as an intergovernmental organisation +# nor does it submit to any jurisdiction. + + SEVIRI_LST : type : msg_lst filenames : ['mpg_seviri_2017_lst_v0'] data_start_time : "2017-01-01 00:00" data_end_time : "2017-12-31 00:00" - source_exclude: [] + source_exclude: [] #["LANDCOV", "quality_flag", "LST", "FVC", "LW_MASK"] target_exclude: ["LANDCOV", "quality_flag", "DEM", "FVC", "LW_MASK"] target: ["LST"] source: [] - geoinfos: ["DEM", "LANDCOV"] + geoinfos: [] #["DEM"] # "LANDCOV" metadata: "/p/scratch/weatherai/shared/weather_generator_data/mpg_seviri_2017_lst_v0/metadata" experiment: "johannesburg" loss_weight : 1.0 @@ -15,7 +25,7 @@ SEVIRI_LST : masking_rate_none : 0.05 token_size : 64 tokenize_spacetime : True - max_num_targets: -1 + max_num_targets: -1 #-1 embed : net : transformer num_tokens : 1 @@ -28,7 +38,7 @@ SEVIRI_LST : target_readout : type : 'obs_value' num_layers : 2 - num_heads : 8 + num_heads : 4 pred_head : ens_size : 1 num_layers : 1 \ No newline at end of file diff --git a/src/weathergen/datasets/data_reader_base.py b/src/weathergen/datasets/data_reader_base.py index 2b1bc9509..e16eb5834 100644 --- a/src/weathergen/datasets/data_reader_base.py +++ b/src/weathergen/datasets/data_reader_base.py @@ -577,6 +577,7 @@ def normalize_target_channels(self, target: NDArray[DType]) -> NDArray[DType]: Normalized data """ assert target.shape[-1] == len(self.target_idx), "incorrect number of target channels" + for i, ch in enumerate(self.target_idx): target[..., i] = (target[..., i] - self.mean[ch]) / self.stdev[ch] diff --git a/src/weathergen/datasets/data_reader_seviri.py b/src/weathergen/datasets/data_reader_seviri.py index 0f30faa76..7eca412bd 100644 --- a/src/weathergen/datasets/data_reader_seviri.py +++ b/src/weathergen/datasets/data_reader_seviri.py @@ -67,10 +67,13 @@ def __init__( assert False, "Frequency sub-sampling currently not supported" # checks length of time in dataset - data_start_time = time_ds.time[0].values - data_end_time = time_ds.time[20].values + idx_start = 0 + idx_end = 120 # len(time_ds.time) - 1 + data_start_time = time_ds.time[idx_start].values + data_end_time = time_ds.time[idx_end].values period = (data_end_time - data_start_time) + print("Data period: ", period) assert data_start_time is not None and data_end_time is not None, ( data_start_time, @@ -92,11 +95,13 @@ def __init__( return else: self.ds = ds - self.len = len(ds) + self.len = idx_end - idx_start #len(ds) - self.channels_file = [k for k in self.ds.keys()] + self.exclude = {"LWMASK", "_indices", "quality_flag"} # exclude these from channels because we don't have a statistics for them + self.channels_file = [k for k in self.ds.keys()] # caches lats and lons + # if you want a spatial subset, do it here lat_name = stream_info.get("latitude_name", "latitude") self.latitudes = _clip_lat(np.array(ds[lat_name], dtype=np32)) lon_name = stream_info.get("longitude_name", "longitude") @@ -106,36 +111,54 @@ def __init__( self.geoinfo_channels = stream_info.get("geoinfos", []) self.geoinfo_idx = [self.channels_file.index(ch) for ch in self.geoinfo_channels] # cache geoinfos - self.geoinfo_data = np.stack([np.array(ds[ch], dtype=np32) for ch in self.geoinfo_channels]) - self.geoinfo_data = self.geoinfo_data.transpose() + #self.geoinfo_data = np.stack([np.array(ds[ch], dtype=np32) for ch in self.geoinfo_channels]) + #self.geoinfo_data = self.geoinfo_data.transpose() # select/filter requested target channels # this will access the stream info, hence make sure to set it. - self.target_idx = self.select_channels(ds, "target") - self.target_channels = [self.channels_file[i] for i in self.target_idx] + self.target_idx, self.target_channels = self.select_channels(ds, "target") + #self.target_channels = [self.channels_file[i] for i in self.target_idx] - self.source_idx = self.select_channels(ds, "source") - self.source_channels = [self.channels_file[i] for i in self.source_idx] + self.source_idx, self.source_channels = self.select_channels(ds, "source") + #self.source_channels = [self.channels_file[i] for i in self.source_idx] + #print("Source channels:", self.source_channels) ds_name = stream_info["name"] _logger.info(f"{ds_name}: target channels: {self.target_channels}") + # what is this doing? self.properties = { "stream_id": 0, } # or your function to load or compute the statistics - self._create_statistics_lookup() + self.mean, self.stdev = self._create_statistics() - self.mean, self.stdev = self.mean_lookup[self.target_channels].values.astype(np32), self.std_lookup[self.target_channels].values.astype(np32) + self.mean_geoinfo, self.stdev_geoinfo = self.mean[self.geoinfo_idx], self.stdev[self.geoinfo_idx] - self.mean_geoinfo, self.stdev_geoinfo = self.mean_lookup[self.geoinfo_channels].values.astype(np32), self.std_lookup[self.geoinfo_channels].values.astype(np32) - - def _create_statistics_lookup(self): + def _create_statistics(self): statistics = Path(self.stream_info["metadata"]) / self.stream_info["experiment"] / "seviri_statistics.parquet" df_stats = pd.read_parquet(statistics) - self.mean_lookup = df_stats.set_index('variable')["mean"] - self.std_lookup = df_stats.set_index('variable')["std"] + mean_lookup = df_stats.set_index('variable')["mean"] + std_lookup = df_stats.set_index('variable')["std"] + + mean, stdev = [], [] + + for ch in self.channels_file: + if ch in self.exclude: + mean.append(0.0) # placeholder for those we don't have statistics for + stdev.append(1.0) + else: + mean.append(mean_lookup[ch].astype(np.float32)) + stdev.append(std_lookup[ch].astype(np.float32)) + + mean = np.array(mean) + stdev = np.array(stdev) + + print("Mean shape", mean.shape) + print("Means", mean) + + return mean, stdev @override def init_empty(self) -> None: @@ -148,14 +171,14 @@ def length(self) -> int: return self.len @override - def _get(self, idx: TIndex) -> ReaderData: + def _get(self, idx: TIndex, channels_idx: list[int]) -> ReaderData: """ Get data for window (for either source or target, through public interface) Parameters ---------- idx : int Index of temporal window - channels_idx : np.array + channels_idx : list[int] Selection of channels Returns ------- @@ -224,9 +247,15 @@ def select_channels(self, ds, ch_type: str) -> NDArray[np.int64]: stream_name = self.stream_info["name"] _logger.warning(f"No channel for {stream_name} for {ch_type}.") - chs_idx = np.sort([self.channels_file.index(ch) for ch in channels]) + if is_empty: + _logger.warning(f"No channel selected for {stream_name} for {ch_type}.") + chs_idx = np.empty(shape=[0], dtype=int) + channels = [] + else: + chs_idx = np.sort([self.channels_file.index(ch) for ch in channels]) + channels = [self.channels_file[i] for i in chs_idx] - return np.array(chs_idx) + return np.array(chs_idx), channels def _clip_lat(lats: NDArray) -> NDArray[np.float32]: diff --git a/src/weathergen/datasets/multi_stream_data_sampler.py b/src/weathergen/datasets/multi_stream_data_sampler.py index c1fc0ff05..db76066b6 100644 --- a/src/weathergen/datasets/multi_stream_data_sampler.py +++ b/src/weathergen/datasets/multi_stream_data_sampler.py @@ -13,6 +13,9 @@ import numpy as np import torch +# for interactive debugging +import code + from weathergen.common.io import IOReaderData from weathergen.datasets.data_reader_anemoi import DataReaderAnemoi from weathergen.datasets.data_reader_base import ( From e4c95e77ac3c7f625e813d4b5482e6aff9870641 Mon Sep 17 00:00:00 2001 From: Till Hauer Date: Tue, 20 Jan 2026 12:06:00 +0100 Subject: [PATCH 14/24] Add docstrings to facilitate newcomers getting started (#1465) * Add docstrings to facilitate newcomers (v01) * ruff * ruffed * small docstring changes * just docstrings without refactoring * Clarify coordinates parameter description in engines.py --------- Co-authored-by: Savvas Melidonis <79579567+SavvasMel@users.noreply.github.com> --- .../common/src/weathergen/common/config.py | 2 +- packages/dashboard/atmo_eval.py | 4 +- src/weathergen/model/attention.py | 208 ++++++++++++++++++ src/weathergen/model/engines.py | 158 +++++++++++-- src/weathergen/utils/validation_io.py | 2 +- 5 files changed, 352 insertions(+), 22 deletions(-) diff --git a/packages/common/src/weathergen/common/config.py b/packages/common/src/weathergen/common/config.py index c32732ba7..1b9e1928d 100644 --- a/packages/common/src/weathergen/common/config.py +++ b/packages/common/src/weathergen/common/config.py @@ -225,7 +225,7 @@ def load_config( # use OmegaConf.unsafe_merge if too slow c = OmegaConf.merge(base_config, private_config, *overwrite_configs) assert isinstance(c, Config) - + # Ensure the config has mini-epoch notation if hasattr(c, "samples_per_epoch"): c.samples_per_mini_epoch = c.samples_per_epoch diff --git a/packages/dashboard/atmo_eval.py b/packages/dashboard/atmo_eval.py index a98b32268..3dc077f6e 100644 --- a/packages/dashboard/atmo_eval.py +++ b/packages/dashboard/atmo_eval.py @@ -77,7 +77,9 @@ def get_score_step_48h(score_col: str) -> pl.DataFrame: .sort("start_time") .filter(pl.col(score_col).is_not_null()) ) - _logger.info(f"Getting score data for {score_col} at 48h (step={step_48h}): len={len(score_data)}") + _logger.info( + f"Getting score data for {score_col} at 48h (step={step_48h}): len={len(score_data)}" + ) # Iterate over the runs to get the metric at step 48h scores_dt: list[float | None] = [] diff --git a/src/weathergen/model/attention.py b/src/weathergen/model/attention.py index 39ed1c041..195889a0a 100644 --- a/src/weathergen/model/attention.py +++ b/src/weathergen/model/attention.py @@ -17,6 +17,14 @@ class MultiSelfAttentionHeadVarlen(torch.nn.Module): + """Multi-head self-attention with variable length sequences. + + This module implements multi-head self-attention for variable length sequences packed into a + single tensor. It leverages FlashAttention's variable length API (`flash_attn_varlen_func`) + to efficiently handle batches of sequences with differing lengths without padding, using + cumulative length indices to define sequence boundaries. + """ + def __init__( self, dim_embed, @@ -32,6 +40,21 @@ def __init__( norm_eps=1e-5, attention_dtype=torch.bfloat16, ): + """Initialize the MultiSelfAttentionHeadVarlen module. + + :param dim_embed: Embedding dimension. + :param num_heads: Number of attention heads. + :param dim_head_proj: Dimension of the projection head. + :param dropout_rate: Dropout rate. + :param with_residual: Whether to use residual connections. + :param with_qk_lnorm: Whether to use layer normalization for query and key. + :param with_flash: Whether to use flash attention. + :param norm_type: Type of normalization. + :param softcap: Softcap for attention. + :param dim_aux: Dimension of auxiliary data. + :param norm_eps: Epsilon for normalization. + :param attention_dtype: Data type for attention. + """ super(MultiSelfAttentionHeadVarlen, self).__init__() self.num_heads = num_heads @@ -69,6 +92,14 @@ def __init__( assert with_flash, "Only flash attention supported at the moment" def forward(self, x, x_lens, ada_ln_aux=None): + """Forward pass of the MultiSelfAttentionHeadVarlen module. + + :param x: Input tensor. + :param x_lens: Lengths of the input sequences. + :param ada_ln_aux: Auxiliary data for adaptive layer normalization. + + :return out: Output tensor. + """ if self.with_residual: x_in = x x = self.lnorm(x) if ada_ln_aux is None else self.lnorm(x, ada_ln_aux) @@ -106,6 +137,14 @@ def forward(self, x, x_lens, ada_ln_aux=None): class MultiSelfAttentionHeadVarlenFlex(torch.nn.Module): + """Multi-head self-attention with variable length sequences and flex attention. + + This module implements multi-head self-attention using PyTorch's FlexAttention. It allows + for defining custom sparse attention patterns via a score modification function. This is + particularly useful for optimizing attention mechanisms where full NxN interactions are not + required or desired, enabling flexible and efficient attention computations. + """ + def __init__( self, dim_embed, @@ -120,6 +159,20 @@ def __init__( norm_eps=1e-5, attention_dtype=torch.bfloat16, ): + """Initialize the MultiSelfAttentionHeadVarlenFlex module. + + :param dim_embed: Embedding dimension. + :param num_heads: Number of attention heads. + :param dim_head_proj: Dimension of the projection head. + :param dropout_rate: Dropout rate. + :param with_residual: Whether to use residual connections. + :param with_qk_lnorm: Whether to use layer normalization for query and key. + :param with_flash: Whether to use flash attention. + :param norm_type: Type of normalization. + :param softcap: Softcap for attention. + :param norm_eps: Epsilon for normalization. + :param attention_dtype: Data type for attention. + """ super(MultiSelfAttentionHeadVarlenFlex, self).__init__() self.num_heads = num_heads @@ -160,6 +213,13 @@ def sparsity_mask(score, b, h, q_idx, kv_idx): self.compiled_flex_attention = torch.compile(att, dynamic=False) def forward(self, x, x_lens=None): + """Forward pass of the MultiSelfAttentionHeadVarlenFlex module. + + :param x: Input tensor. + :param x_lens: Lengths of the input sequences. + + :return out: Output tensor. + """ if self.with_residual: x_in = x x = self.lnorm(x) @@ -181,6 +241,14 @@ def forward(self, x, x_lens=None): class MultiSelfAttentionHeadLocal(torch.nn.Module): + """Multi-head self-attention with local (block-wise) attention. + + This module implements local (block-wise) multi-head self-attention. It restricts attention + to local blocks defined by `block_factor`, meaning tokens only attend to other tokens within + the same block. This effectively reduces the computational complexity from quadratic to + linear with respect to the sequence length (for a fixed block size), making it suitable for + processing long sequences where local interactions dominate.""" + def __init__( self, dim_embed, @@ -198,6 +266,23 @@ def __init__( norm_eps=1e-5, attention_dtype=torch.bfloat16, ): + """Initialize the MultiSelfAttentionHeadLocal module. + + :param dim_embed: Embedding dimension. + :param num_heads: Number of attention heads. + :param qkv_len: Length of the query, key and value. + :param block_factor: Block factor. + :param dim_head_proj: Dimension of the projection head. + :param dropout_rate: Dropout rate. + :param with_residual: Whether to use residual connections. + :param with_qk_lnorm: Whether to use layer normalization for query and key. + :param with_flash: Whether to use flash attention. + :param norm_type: Type of normalization. + :param softcap: Softcap for attention. + :param dim_aux: Dimension of the auxiliary data. + :param norm_eps: Epsilon for normalization. + :param attention_dtype: Data type for attention. + """ super(MultiSelfAttentionHeadLocal, self).__init__() self.num_heads = num_heads @@ -243,6 +328,13 @@ def mask_block_local(batch, head, idx_q, idx_kv): self.flex_attention = torch.compile(flex_attention, dynamic=False) def forward(self, x, ada_ln_aux=None): + """Forward pass of the MultiSelfAttentionHeadLocal module. + + :param x: Input tensor. + :param ada_ln_aux: Auxiliary data for adaptive layer normalization. + + :return out: Output tensor. + """ if self.with_residual: x_in = x x = self.lnorm(x) if ada_ln_aux is None else self.lnorm(x, ada_ln_aux) @@ -263,6 +355,14 @@ def forward(self, x, ada_ln_aux=None): class MultiCrossAttentionHeadVarlen(torch.nn.Module): + """Multi-head cross-attention with variable length sequences. + + This module implements multi-head cross-attention for variable length sequences. Similar to + the self-attention variant, it uses FlashAttention (`flash_attn_varlen_func`) to handle + packed sequences of queries and keys/values with different lengths. It ensures correct masking + and efficient computation for cases where both source and target sequences vary in length. + """ + def __init__( self, dim_embed_q, @@ -279,6 +379,22 @@ def __init__( norm_eps=1e-5, attention_dtype=torch.bfloat16, ): + """Initialize the MultiCrossAttentionHeadVarlen module. + + :param dim_embed_q: Embedding dimension of the query. + :param dim_embed_kv: Embedding dimension of the key and value. + :param num_heads: Number of attention heads. + :param dim_head_proj: Dimension of the projection head. + :param dropout_rate: Dropout rate. + :param with_residual: Whether to use residual connections. + :param with_qk_lnorm: Whether to use layer normalization for query and key. + :param with_flash: Whether to use flash attention. + :param norm_type: Type of normalization. + :param softcap: Softcap for attention. + :param dim_aux: Dimension of the auxiliary data. + :param norm_eps: Epsilon for normalization. + :param attention_dtype: Data type for attention. + """ super(MultiCrossAttentionHeadVarlen, self).__init__() self.num_heads = num_heads @@ -321,6 +437,16 @@ def __init__( assert with_flash, "Only flash attention supported at the moment" def forward(self, x_q, x_kv, x_q_lens=None, x_kv_lens=None, ada_ln_aux=None): + """Forward pass of the MultiCrossAttentionHeadVarlen module. + + :param x_q: Query tensor. + :param x_kv: Key and value tensor. + :param x_q_lens: Lengths of the query sequences. + :param x_kv_lens: Lengths of the key and value sequences. + :param ada_ln_aux: Auxiliary data for adaptive layer normalization. + + :return outs: Output tensors. + """ if self.with_residual: x_q_in = x_q x_q = self.lnorm_in_q(x_q) if ada_ln_aux is None else self.lnorm_in_q(x_q, ada_ln_aux) @@ -362,6 +488,14 @@ def forward(self, x_q, x_kv, x_q_lens=None, x_kv_lens=None, ada_ln_aux=None): class MultiCrossAttentionHeadVarlenSlicedQ(torch.nn.Module): + """Multi-head cross-attention with variable length sequences and sliced queries. + + This module implements a memory-efficient variant of multi-head cross-attention where the + query projection is sliced into chunks. This allows processing extremely large query sets + (e.g., global queries against local latents) by computing attention for subsets of queries + sequentially. This approach reduces peak memory usage significantly, enabling the model to + scale to higher resolutions or larger query counts.""" + def __init__( self, dim_embed_q, @@ -379,6 +513,23 @@ def __init__( norm_eps=1e-5, attention_dtype=torch.bfloat16, ): + """Initialize the MultiCrossAttentionHeadVarlenSlicedQ module. + + :param dim_embed_q: Embedding dimension of the query. + :param dim_embed_kv: Embedding dimension of the key and value. + :param num_slices_q: Number of slices for the query. + :param num_heads: Number of attention heads. + :param dim_head_proj: Dimension of the projection head. + :param dropout_rate: Dropout rate. + :param with_residual: Whether to use residual connections. + :param with_qk_lnorm: Whether to use layer normalization for query and key. + :param with_flash: Whether to use flash attention. + :param norm_type: Type of normalization. + :param softcap: Softcap for attention. + :param dim_aux: Dimension of the auxiliary data. + :param norm_eps: Epsilon for normalization. + :param attention_dtype: Data type for attention. + """ super(MultiCrossAttentionHeadVarlenSlicedQ, self).__init__() self.num_slices_q = num_slices_q @@ -428,6 +579,16 @@ def __init__( assert with_flash, "Only flash attention supported at the moment" def forward(self, x_q, x_kv, x_q_lens=None, x_kv_lens=None, ada_ln_aux=None): + """Forward pass of the MultiCrossAttentionHeadVarlenSlicedQ module. + + :param x_q: Query tensor. + :param x_kv: Key and value tensor. + :param x_q_lens: Lengths of the query sequences. + :param x_kv_lens: Lengths of the key and value sequences. + :param ada_ln_aux: Auxiliary data for adaptive layer normalization. + + :return outs: Output tensors. + """ if self.with_residual: x_q_in = x_q x_q = self.lnorm_in_q(x_q) if ada_ln_aux is None else self.lnorm_in_q(x_q, ada_ln_aux) @@ -473,6 +634,8 @@ def forward(self, x_q, x_kv, x_q_lens=None, x_kv_lens=None, ada_ln_aux=None): class MultiSelfAttentionHead(torch.nn.Module): + """Multi-head self-attention.""" + def __init__( self, dim_embed, @@ -488,6 +651,21 @@ def __init__( norm_eps=1e-5, attention_dtype=torch.bfloat16, ): + """Initialize the MultiSelfAttentionHead module. + + :param dim_embed: Embedding dimension. + :param num_heads: Number of attention heads. + :param dim_head_proj: Dimension of the projection head. + :param dropout_rate: Dropout rate. + :param with_residual: Whether to use residual connections. + :param with_qk_lnorm: Whether to use layer normalization for query and key. + :param with_flash: Whether to use flash attention. + :param softcap: Softcap for attention. + :param norm_type: Type of normalization. + :param dim_aux: Dimension of the auxiliary data. + :param norm_eps: Epsilon for normalization. + :param attention_dtype: Data type for attention. + """ super(MultiSelfAttentionHead, self).__init__() self.num_heads = num_heads @@ -528,6 +706,13 @@ def __init__( self.softmax = torch.nn.Softmax(dim=-1) def forward(self, x, ada_ln_aux=None): + """Forward pass of the MultiSelfAttentionHead module. + + :param x: Input tensor. + :param ada_ln_aux: Auxiliary data for adaptive layer normalization. + + :return out: Output tensor. + """ if self.with_residual: x_in = x x = self.lnorm(x) if ada_ln_aux is None else self.lnorm(x, ada_ln_aux) @@ -553,6 +738,8 @@ def forward(self, x, ada_ln_aux=None): class MultiCrossAttentionHead(torch.nn.Module): + """Multi-head cross-attention.""" + def __init__( self, dim_embed_q, @@ -567,6 +754,20 @@ def __init__( norm_eps=1e-5, attention_dtype=torch.bfloat16, ): + """Initialize the MultiCrossAttentionHead module. + + :param dim_embed_q: Embedding dimension of the query. + :param dim_embed_kv: Embedding dimension of the key and value. + :param num_heads: Number of attention heads. + :param dim_head_proj: Dimension of the projection head. + :param dropout_rate: Dropout rate. + :param with_residual: Whether to use residual connections. + :param with_qk_lnorm: Whether to use layer normalization for query and key. + :param with_flash: Whether to use flash attention. + :param norm_type: Type of normalization. + :param norm_eps: Epsilon for normalization. + :param attention_dtype: Data type for attention. + """ super(MultiCrossAttentionHead, self).__init__() self.num_heads = num_heads @@ -607,6 +808,13 @@ def __init__( ######################################### def forward(self, x_q, x_kv): + """Forward pass of the MultiCrossAttentionHead module. + + :param x_q: Query tensor. + :param x_kv: Key and value tensor. + + :return outs: Output tensors. + """ if self.with_residual: x_q_in = x_q x_q, x_kv = self.lnorm_in_q(x_q), self.lnorm_in_kv(x_kv) diff --git a/src/weathergen/model/engines.py b/src/weathergen/model/engines.py index 7359d1403..c4930e291 100644 --- a/src/weathergen/model/engines.py +++ b/src/weathergen/model/engines.py @@ -30,18 +30,19 @@ class EmbeddingEngine(torch.nn.Module): + """Embedding engine for the model.""" + name: "EmbeddingEngine" def __init__(self, cf: Config, sources_size) -> None: - """ - Initialize the EmbeddingEngine with the configuration. + """Initialize the EmbeddingEngine with the configuration. :param cf: Configuration object containing parameters for the engine. - :param sources_size: List of source sizes for each stream. + :param sources_size: Tensor of number of channels for each stream """ super(EmbeddingEngine, self).__init__() self.cf = cf - self.sources_size = sources_size # KCT:iss130, what is this? + self.sources_size = sources_size self.embeds = torch.nn.ModuleList() for i, si in enumerate(self.cf.streams): @@ -81,6 +82,15 @@ def __init__(self, cf: Config, sources_size) -> None: raise ValueError("Unsupported embedding network type") def forward(self, streams_data, pe_embed, dtype, device): + """Forward pass of the embedding engine. + + :param streams_data: Tensor of streams data. + :param pe_embed: Positional encoding embeddings. + :param dtype: Data type for the embeddings. + :param device: Device to run the embeddings on. + + :return tokens_all: Embedded tokens. + """ source_tokens_lens = torch.stack( [ torch.stack( @@ -126,11 +136,20 @@ def forward(self, streams_data, pe_embed, dtype, device): class LocalAssimilationEngine(torch.nn.Module): + """Local assimilation engine for the model. + + The LocalAssimilationEngine is responsible for fusing information from different input + streams (e.g., satellite, station data) within each HEALPix cell. It operates locally, + meaning attention is computed only among tokens belonging to the same cell. This step + aggregates high-resolution, heterogeneous input data into a unified cell-level + representation before global interaction takes place. It uses a sequence of self-attention + blocks and MLPs. + """ + name: "LocalAssimilationEngine" def __init__(self, cf: Config) -> None: - """ - Initialize the LocalAssimilationEngine with the configuration. + """Initialize the LocalAssimilationEngine with the configuration. :param cf: Configuration object containing parameters for the engine. """ @@ -163,17 +182,26 @@ def __init__(self, cf: Config) -> None: ) def forward(self, tokens_c, cell_lens_c, use_reentrant): + """Forward pass of the local assimilation engine. + + :param tokens_c: Tokens to be assimilated. + :param cell_lens_c: Cell lengths for the tokens. + :param use_reentrant: Whether to use reentrant mode. + + :return tokens_c: Assimilated tokens. + """ for block in self.ae_local_blocks: tokens_c = checkpoint(block, tokens_c, cell_lens_c, use_reentrant=use_reentrant) return tokens_c class Local2GlobalAssimilationEngine(torch.nn.Module): + """Local2GlobalAssimilationEngine for the model.""" + name: "Local2GlobalAssimilationEngine" def __init__(self, cf: Config) -> None: - """ - Initialize the Local2GlobalAssimilationEngine with the configuration. + """Initialize the Local2GlobalAssimilationEngine with the configuration. :param cf: Configuration object containing parameters for the engine. """ @@ -225,6 +253,16 @@ def __init__(self, cf: Config) -> None: ) def forward(self, tokens_c, tokens_global_c, q_cells_lens_c, cell_lens_c, use_reentrant): + """Forward pass of the local to global assimilation engine. + + :param tokens_c: Tokens to be assimilated. + :param tokens_global_c: Global tokens to be assimilated. + :param q_cells_lens_c: Query cell lengths for the tokens. + :param cell_lens_c: Cell lengths for the tokens. + :param use_reentrant: Whether to use reentrant mode. + + :return tokens_global_c: Assimilated tokens. + """ for block in self.ae_adapter: tokens_global_c = checkpoint( block, @@ -238,11 +276,20 @@ def forward(self, tokens_c, tokens_global_c, q_cells_lens_c, cell_lens_c, use_re class GlobalAssimilationEngine(torch.nn.Module): + """Global assimilation engine for the model. + + The GlobalAssimilationEngine processes the unified cell-level representations generated by + the LocalAssimilationEngine. Its primary role is to model long-range dependencies and + physical interactions across the entire globe. It alternates between local attention + (focusing on neighboring cells) and global attention (fully connected or sparse global + patterns) to efficiently propagate information. This engine transforms the local latents + into a globally consistent state representation. + """ + name: "GlobalAssimilationEngine" def __init__(self, cf: Config, num_healpix_cells: int) -> None: - """ - Initialize the GlobalAssimilationEngine with the configuration. + """Initialize the GlobalAssimilationEngine with the configuration. :param cf: Configuration object containing parameters for the engine. :param num_healpix_cells: Number of healpix cells used for local queries. @@ -300,17 +347,25 @@ def __init__(self, cf: Config, num_healpix_cells: int) -> None: ) def forward(self, tokens, use_reentrant): + """Forward pass of the global assimilation engine. + + :param tokens: Tokens to be assimilated. + :param use_reentrant: Whether to use reentrant mode. + + :return tokens: Assimilated tokens. + """ for block in self.ae_global_blocks: tokens = checkpoint(block, tokens, use_reentrant=use_reentrant) return tokens class ForecastingEngine(torch.nn.Module): + """Forecasting engine for the model.""" + name: "ForecastingEngine" def __init__(self, cf: Config, num_healpix_cells: int) -> None: - """ - Initialize the ForecastingEngine with the configuration. + """Initialize the ForecastingEngine with the configuration. :param cf: Configuration object containing parameters for the engine. :param num_healpix_cells: Number of healpix cells used for local queries. @@ -368,6 +423,7 @@ def __init__(self, cf: Config, num_healpix_cells: int) -> None: ) def init_weights_final(m): + """Initialize the weights of the forecasting engine.""" if isinstance(m, torch.nn.Linear): torch.nn.init.normal_(m.weight, mean=0, std=0.001) if m.bias is not None: @@ -377,6 +433,13 @@ def init_weights_final(m): block.apply(init_weights_final) def forward(self, tokens, fstep): + """Forward pass of the forecasting engine. + + :param tokens: Tokens to be forecasted. + :param fstep: Forecast step. + + :return tokens: Forecasted tokens. + """ aux_info = torch.tensor([fstep], dtype=torch.float32, device="cuda") for block in self.fe_blocks: tokens = checkpoint(block, tokens, aux_info, use_reentrant=False) @@ -385,6 +448,8 @@ def forward(self, tokens, fstep): class EnsPredictionHead(torch.nn.Module): + """Ensemble prediction head for the model.""" + def __init__( self, dim_embed, @@ -396,7 +461,17 @@ def __init__( hidden_factor=2, final_activation: None | str = None, ): - """Constructor""" + """Initialize the EnsPredictionHead with the configuration. + + :param dim_embed: Dimension of the embedding. + :param dim_out: Dimension of the output. + :param ens_num_layers: Number of layers in the ensemble. + :param ens_size: Size of the ensemble. + :param stream_name: Name of the stream. + :param norm_type: Type of normalization. + :param hidden_factor: Hidden factor to create an internal dimension. + :param final_activation: Optional final activation function. + """ super(EnsPredictionHead, self).__init__() @@ -428,6 +503,12 @@ def __init__( ######################################### def forward(self, toks): + """Forward pass of the EnsPredictionHead. + + :param toks: Tokens to be predicted. + + :return preds: Ensemble predictions. + """ preds = [] for pred_head in self.pred_heads: cpred = toks @@ -440,6 +521,16 @@ def forward(self, toks): class TargetPredictionEngineClassic(nn.Module): + """Target prediction engine for the model. + + The TargetPredictionEngineClassic is a specialized decoding module that projects the global + latent states back to specific target coordinates (e.g., station locations). It typically + employs a PerceiverIO-style architecture where target coordinate embeddings query the + latent state via cross-attention. This engine is "Classic" in the sense that it strictly + follows the original design with coordinate conditioning and optional self-attention, + without the flexible decoder types found in the newer `TargetPredictionEngine`. + """ + def __init__( self, cf, @@ -451,11 +542,10 @@ def __init__( tro_type, stream_name: str, ): - """ - Initialize the TargetPredictionEngine with the configuration. + """Initialize the TargetPredictionEngine with the configuration. :param cf: Configuration object containing parameters for the engine. - :param dims_embed: List of embedding dimensions for each layer. + :param dims_embed: Tensor of embedding dimensions for each layer. :param dim_coord_in: Input dimension for coordinates. :param tr_dim_head_proj: Dimension for head projection. :param tr_mlp_hidden_factor: Hidden factor for the MLP layers. @@ -525,6 +615,16 @@ def __init__( ) def forward(self, latent, output, latent_lens, output_lens, coordinates): + """Forward pass of the TargetPredictionEngineClassic. + + :param latent: Latent tokens. + :param output: Output tokens. + :param latent_lens: Lengths of the latent tokens. + :param output_lens: Lengths of the output tokens. + :param coordinates: Target coordinates for auxiliary information. + + :returns tc_tokens: Output tokens. + """ tc_tokens = output tcs_lens = output_lens tokens_stream = latent @@ -548,6 +648,17 @@ def forward(self, latent, output, latent_lens, output_lens, coordinates): class TargetPredictionEngine(nn.Module): + """TargetPredictionEngine for the model. + + The TargetPredictionEngine handles the decoding of the latent representation into the target + observational space. Unlike the Classic version which solely relies on a fixed + PerceiverIO-like structure with coordinate conditioning, this engine is configurable via + `decoder_type`. It supports various conditioning mechanisms, allowing for experimentation + with how the latent state and auxiliary information (like coordinates) are fused to generate + predictions. It includes normalization, optional positional embeddings and a flexible + sequence of decoding blocks. + """ + def __init__( self, cf, @@ -559,11 +670,10 @@ def __init__( tro_type, stream_name: str, ): - """ - Initialize the TargetPredictionEngine with the configuration. + """Initialize the TargetPredictionEngine with the configuration. :param cf: Configuration object containing parameters for the engine. - :param dims_embed: List of embedding dimensions for each layer. + :param dims_embed: Tensor of embedding dimensions for each layer. :param dim_coord_in: Input dimension for coordinates. :param tr_dim_head_proj: Dimension for head projection. :param tr_mlp_hidden_factor: Hidden factor for the MLP layers. @@ -692,6 +802,16 @@ def __init__( ) def forward(self, latent, output, latent_lens, output_lens, coordinates): + """Forward pass of the TargetPredictionEngine. + + :param latent: Latent tokens. + :param output: Output tokens. + :param latent_lens: Lengths of the latent tokens. + :param output_lens: Lengths of the output tokens. + :param coordinates: Target coordinates for auxiliary information. + + :return output: Output tokens. + """ latent = ( self.dropout(self.latent_in_norm(latent + self.pos_embed)) if self.cf.decoder_type != "PerceiverIOCoordConditioning" diff --git a/src/weathergen/utils/validation_io.py b/src/weathergen/utils/validation_io.py index 355be0e51..f8a5a1cc5 100644 --- a/src/weathergen/utils/validation_io.py +++ b/src/weathergen/utils/validation_io.py @@ -30,7 +30,7 @@ def write_output( sample_idxs, ): stream_names = [stream.name for stream in cf.streams] - analysis_streams_output = cf.get( 'analysis_streams_output', None) + analysis_streams_output = cf.get("analysis_streams_output", None) if cf.streams_output is not None: output_stream_names = cf.streams_output elif analysis_streams_output is not None: # --- to be removed at some point --- From 49424b02737bc265c68bbc39dc5332ed5f8a8a9e Mon Sep 17 00:00:00 2001 From: Marieke Gertrud Karla Wesselkamp Date: Tue, 27 Jan 2026 13:55:06 +0100 Subject: [PATCH 15/24] implemented _get() in datareader --- config/lst_config.yml | 4 +- config/streams/seviri_lst/seviri_lst.yml | 14 +-- src/weathergen/datasets/data_reader_seviri.py | 98 ++++++++++++------- 3 files changed, 70 insertions(+), 46 deletions(-) diff --git a/config/lst_config.yml b/config/lst_config.yml index 21d64301e..4aa16ab0a 100644 --- a/config/lst_config.yml +++ b/config/lst_config.yml @@ -146,8 +146,8 @@ log_grad_norms: False start_date: 197901010000 end_date: 202012310000 -start_date_val: 202101010000 -end_date_val: 202201010000 +start_date_val: 201705010000 #202101010000 +end_date_val: 20170630000 #202201010000 len_hrs: 6 step_hrs: 6 input_window_steps: 1 diff --git a/config/streams/seviri_lst/seviri_lst.yml b/config/streams/seviri_lst/seviri_lst.yml index b3f6aeec4..b18207d36 100644 --- a/config/streams/seviri_lst/seviri_lst.yml +++ b/config/streams/seviri_lst/seviri_lst.yml @@ -10,15 +10,15 @@ SEVIRI_LST : type : msg_lst - filenames : ['mpg_seviri_2017_lst_v0'] - data_start_time : "2017-01-01 00:00" - data_end_time : "2017-12-31 00:00" - source_exclude: [] #["LANDCOV", "quality_flag", "LST", "FVC", "LW_MASK"] - target_exclude: ["LANDCOV", "quality_flag", "DEM", "FVC", "LW_MASK"] + filenames : ['lst_test.zarr'] + data_start_time : "2017-05-01 00:00" + data_end_time : "2017-06-30 00:00" + #source_exclude: ["LANDCOV", "quality_flag", "DEM", "FVC", "LW_MASK", "_indices", "AL-NI-DH", "standard_error", "LST"] #["LANDCOV", "quality_flag", "LST", "FVC", "LW_MASK"] + #target_exclude: ["LANDCOV", "quality_flag", "DEM", "FVC", "LW_MASK", "_indices", "AL-NI-DH", "standard_error"] target: ["LST"] - source: [] + #source: [] geoinfos: [] #["DEM"] # "LANDCOV" - metadata: "/p/scratch/weatherai/shared/weather_generator_data/mpg_seviri_2017_lst_v0/metadata" + metadata: "/leonardo_work/AIFAC_5C0_154/weathergen/data/mpg_seviri_l2_2017-18_v0/metadata_lst_test" experiment: "johannesburg" loss_weight : 1.0 masking_rate : 0.6 diff --git a/src/weathergen/datasets/data_reader_seviri.py b/src/weathergen/datasets/data_reader_seviri.py index 7eca412bd..b0b7a1cd8 100644 --- a/src/weathergen/datasets/data_reader_seviri.py +++ b/src/weathergen/datasets/data_reader_seviri.py @@ -48,15 +48,14 @@ def __init__( np32 = np.float32 # open the dataset the way we want it - time_ds = xr.open_zarr(filename, group= "era5") + #time_ds = xr.open_zarr(filename, group= "era5") ds = xr.open_zarr(filename, group= "seviri") - - #code.interact(local=locals()) - #pdb.breakpoint() - print("Max time: ", time_ds.time.max().values) + ds["time"] = ds["time"].astype("datetime64[ns]") + ds = ds.sel(time=slice(stream_info["data_start_time"], stream_info["data_end_time"])) + print("Selected time period: ", ds.time.min().values, " to ", ds.time.max().values) # check if the data overlaps with the time window, otherwise initialises as empty datareader - if tw_handler.t_start >= time_ds.time.max() or tw_handler.t_end <= time_ds.time.min(): + if tw_handler.t_start >= ds.time.max() or tw_handler.t_end <= ds.time.min(): name = stream_info["name"] _logger.warning(f"{name} is not supported over data loader window. Stream is skipped.") super().__init__(tw_handler, stream_info) @@ -66,14 +65,9 @@ def __init__( if "frequency" in stream_info: assert False, "Frequency sub-sampling currently not supported" - # checks length of time in dataset - idx_start = 0 - idx_end = 120 # len(time_ds.time) - 1 - data_start_time = time_ds.time[idx_start].values - data_end_time = time_ds.time[idx_end].values - - period = (data_end_time - data_start_time) - print("Data period: ", period) + period = (ds.time[1] - ds.time[0]).values + data_start_time = ds.time[0].values + data_end_time = ds.time[-1].values assert data_start_time is not None and data_end_time is not None, ( data_start_time, @@ -95,18 +89,23 @@ def __init__( return else: self.ds = ds - self.len = idx_end - idx_start #len(ds) + self.len = len(self.ds['time']) # len(ds), this returns the number of variables, not time steps self.exclude = {"LWMASK", "_indices", "quality_flag"} # exclude these from channels because we don't have a statistics for them self.channels_file = [k for k in self.ds.keys()] # caches lats and lons # if you want a spatial subset, do it here + index_path = Path(self.stream_info["metadata"]) / self.stream_info["experiment"] / "seviri_indices.parquet" + self.spatial_indices = pd.read_parquet(index_path) + ds = ds.isel(latitude=self.spatial_indices["lat_idx"], longitude=self.spatial_indices["lon_idx"]) + lat_name = stream_info.get("latitude_name", "latitude") self.latitudes = _clip_lat(np.array(ds[lat_name], dtype=np32)) lon_name = stream_info.get("longitude_name", "longitude") self.longitudes = _clip_lon(np.array(ds[lon_name], dtype=np32)) + # self.geoinfo_channels = stream_info.get("geoinfos", []) self.geoinfo_idx = [self.channels_file.index(ch) for ch in self.geoinfo_channels] @@ -119,10 +118,13 @@ def __init__( self.target_idx, self.target_channels = self.select_channels(ds, "target") #self.target_channels = [self.channels_file[i] for i in self.target_idx] - self.source_idx, self.source_channels = self.select_channels(ds, "source") - #self.source_channels = [self.channels_file[i] for i in self.source_idx] + self.source_channels = stream_info.get("source", []) + #self.source_idx, self.source_channels = self.select_channels(ds, "source") + self.source_idx = [self.channels_file.index(ch) for ch in self.source_channels] #print("Source channels:", self.source_channels) + #code.interact(local=locals()) + ds_name = stream_info["name"] _logger.info(f"{ds_name}: target channels: {self.target_channels}") @@ -187,7 +189,7 @@ def _get(self, idx: TIndex, channels_idx: list[int]) -> ReaderData: (t_idxs, dtr) = self._get_dataset_idxs(idx) - if self.ds is None or self.len == 0 or len(t_idxs) == 0: + if self.ds is None or self.len == 0 or len(t_idxs) == 0 or len(channels_idx) == 0: return ReaderData.empty( num_data_fields=len(channels_idx), num_geo_fields=len(self.geoinfo_idx) ) @@ -201,29 +203,51 @@ def _get(self, idx: TIndex, channels_idx: list[int]) -> ReaderData: # ds is a wrapper around zarr with get_coordinate_selection not being exposed since # subsetting is pushed to the ctor via frequency argument; this also ensures that no sub- # sampling is required here - sel_channels = [self.channels_file[i] for i in channels_idx] - data = self.ds[sel_channels].isel(time=slice(didx_start, didx_end)).to_array().values + + #print("channels_idx to _get:", channels_idx) + #sel_channels = [self.channels_file[i] for i in channels_idx] + #print("Selected channels:", sel_channels) + + sel_channels = "LST" + data = self.ds[sel_channels].isel(time=slice(didx_start, didx_end), + latitude=self.spatial_indices["lat_idx"], + longitude=self.spatial_indices["lon_idx"]).values + + print("Data shape after channel selection:", data.shape) # (time, lat, lon) (6, 1474, 1474) + + n_times = data.shape[0] + n_lats = data.shape[1] + n_lons = data.shape[2] + n_spatial = n_lats * n_lons + # flatten along time dimension - data = data.transpose([1, 2, 0]).reshape((data.shape[1] * data.shape[2], data.shape[0])) + data = data.transpose([1, 2, 0]).reshape((n_spatial * n_times, -1)) + + # print("Data shape after flattening time:", data.shape) # (2172676, 6) # set invalid values to NaN - mask = data == self.fillvalue - data[mask] = np.nan + #mask = data == self.fillvalue + #data[mask] = np.nan # construct lat/lon coords - latlon = np.concatenate( - [ - np.expand_dims(self.latitudes, 0), - np.expand_dims(self.longitudes, 0), - ], - axis=0, - ).transpose() - - # repeat len(t_idxs) times - coords = np.vstack((latlon,) * len(t_idxs)) - geoinfos = np.vstack((self.geoinfo_data,) * len(t_idxs)) - - # date time matching #data points of data - datetimes = np.repeat(self.ds.time[didx_start:didx_end].values, len(data) // len(t_idxs)) + lat2d, lon2d = np.meshgrid( + self.latitudes, + self.longitudes, + indexing="ij", + ) + lat_flat = lat2d.reshape(-1) # (2172676,) + lon_flat = lon2d.reshape(-1) # (2172676,) + + # Tile spatial coordinates for each timestep + coords = np.tile(np.column_stack((lat_flat, lon_flat)), (n_times, 1)) + + + datetimes = np.repeat( + self.ds.time[didx_start:didx_end].values, + n_spatial + ) + + # Empty Geoinfos + geoinfos = np.zeros((n_spatial * n_times, 0), dtype=np.float32) rd = ReaderData( coords=coords, From 31334bc543cdafa94d638dc3e62425815f972488 Mon Sep 17 00:00:00 2001 From: Marieke Gertrud Karla Wesselkamp Date: Wed, 28 Jan 2026 15:43:25 +0100 Subject: [PATCH 16/24] enable geoinfo loading through lazy zarr access --- config/streams/seviri_lst/seviri_lst.yml | 6 +- src/weathergen/datasets/data_reader_seviri.py | 216 ++++++++++-------- 2 files changed, 124 insertions(+), 98 deletions(-) diff --git a/config/streams/seviri_lst/seviri_lst.yml b/config/streams/seviri_lst/seviri_lst.yml index b18207d36..c345be779 100644 --- a/config/streams/seviri_lst/seviri_lst.yml +++ b/config/streams/seviri_lst/seviri_lst.yml @@ -13,11 +13,9 @@ SEVIRI_LST : filenames : ['lst_test.zarr'] data_start_time : "2017-05-01 00:00" data_end_time : "2017-06-30 00:00" - #source_exclude: ["LANDCOV", "quality_flag", "DEM", "FVC", "LW_MASK", "_indices", "AL-NI-DH", "standard_error", "LST"] #["LANDCOV", "quality_flag", "LST", "FVC", "LW_MASK"] - #target_exclude: ["LANDCOV", "quality_flag", "DEM", "FVC", "LW_MASK", "_indices", "AL-NI-DH", "standard_error"] target: ["LST"] - #source: [] - geoinfos: [] #["DEM"] # "LANDCOV" + source: ["LST"] + geoinfos: ["DEM"] #, "LANDCOV"] metadata: "/leonardo_work/AIFAC_5C0_154/weathergen/data/mpg_seviri_l2_2017-18_v0/metadata_lst_test" experiment: "johannesburg" loss_weight : 1.0 diff --git a/src/weathergen/datasets/data_reader_seviri.py b/src/weathergen/datasets/data_reader_seviri.py index b0b7a1cd8..b6b308419 100644 --- a/src/weathergen/datasets/data_reader_seviri.py +++ b/src/weathergen/datasets/data_reader_seviri.py @@ -14,6 +14,7 @@ import numpy as np import xarray as xr from numpy.typing import NDArray +import zarr # for interactive debugging import code @@ -33,7 +34,6 @@ _logger = logging.getLogger(__name__) - class DataReaderSeviri(DataReaderTimestep): """Data reader for SEVIRI satellite data.""" @@ -45,17 +45,54 @@ def __init__( ) -> None: """Initialize the SEVIRI data reader.""" + + self.fillvalue = np.nan np32 = np.float32 - # open the dataset the way we want it - #time_ds = xr.open_zarr(filename, group= "era5") - ds = xr.open_zarr(filename, group= "seviri") - ds["time"] = ds["time"].astype("datetime64[ns]") - ds = ds.sel(time=slice(stream_info["data_start_time"], stream_info["data_end_time"])) - print("Selected time period: ", ds.time.min().values, " to ", ds.time.max().values) + # set sampling parameters + self.stride_temporal = 6 # downsample to six hourly timesteps + self.stride_spatial = 8 # use every 8th point to reduce memory usage on workers + + index_path = Path(stream_info["metadata"]) / stream_info["experiment"] / "seviri_indices.parquet" + self.spatial_indices = pd.read_parquet(index_path) + + self._zarr_path = filename + self._ds = None # opened lazily + + # Open temporarily with xarray just for init metadata (time handling is easier) + ds_xr = xr.open_zarr(filename, group="seviri") + ds_xr["time"] = ds_xr["time"].astype("datetime64[ns]") + ds_xr = ds_xr.sel(time=slice(stream_info["data_start_time"], stream_info["data_end_time"])) + print("Selected time period: ", ds_xr.time.min().values, " to ", ds_xr.time.max().values) + + # Apply spatial subset + ds_xr = ds_xr.isel( + latitude=self.spatial_indices["lat_idx"][::self.stride_spatial], + longitude=self.spatial_indices["lon_idx"][::self.stride_spatial] + ) + + # Cache time values as numpy (avoid zarr access for time later) + self._time_values = np.array(ds_xr.time.values) + + # Cache spatial indices for zarr access + self._lat_idx = np.array(self.spatial_indices["lat_idx"][::self.stride_spatial]) + self._lon_idx = np.array(self.spatial_indices["lon_idx"][::self.stride_spatial]) + + # Find time indices in the full zarr that correspond to our time selection + ds_full = xr.open_zarr(filename, group="seviri") + ds_full["time"] = ds_full["time"].astype("datetime64[ns]") + full_times = ds_full.time.values + start_time = ds_xr.time.min().values + self._time_offset = int(np.searchsorted(full_times, start_time)) + + # caches lats and lons + lat_name = stream_info.get("latitude_name", "latitude") + self.latitudes = _clip_lat(np.array(ds_xr[lat_name], dtype=np32)) + lon_name = stream_info.get("longitude_name", "longitude") + self.longitudes = _clip_lon(np.array(ds_xr[lon_name], dtype=np32)) # check if the data overlaps with the time window, otherwise initialises as empty datareader - if tw_handler.t_start >= ds.time.max() or tw_handler.t_end <= ds.time.min(): + if tw_handler.t_start >= ds_xr.time.max() or tw_handler.t_end <= ds_xr.time.min(): name = stream_info["name"] _logger.warning(f"{name} is not supported over data loader window. Stream is skipped.") super().__init__(tw_handler, stream_info) @@ -65,9 +102,10 @@ def __init__( if "frequency" in stream_info: assert False, "Frequency sub-sampling currently not supported" - period = (ds.time[1] - ds.time[0]).values - data_start_time = ds.time[0].values - data_end_time = ds.time[-1].values + period = np.timedelta64(self.stride_temporal, "h") + + data_start_time = ds_xr.time[0].values + data_end_time = ds_xr.time[-1].values assert data_start_time is not None and data_end_time is not None, ( data_start_time, @@ -88,56 +126,56 @@ def __init__( self.init_empty() return else: - self.ds = ds - self.len = len(self.ds['time']) # len(ds), this returns the number of variables, not time steps - - self.exclude = {"LWMASK", "_indices", "quality_flag"} # exclude these from channels because we don't have a statistics for them - self.channels_file = [k for k in self.ds.keys()] - - # caches lats and lons - # if you want a spatial subset, do it here - index_path = Path(self.stream_info["metadata"]) / self.stream_info["experiment"] / "seviri_indices.parquet" - self.spatial_indices = pd.read_parquet(index_path) - ds = ds.isel(latitude=self.spatial_indices["lat_idx"], longitude=self.spatial_indices["lon_idx"]) - - lat_name = stream_info.get("latitude_name", "latitude") - self.latitudes = _clip_lat(np.array(ds[lat_name], dtype=np32)) - lon_name = stream_info.get("longitude_name", "longitude") - self.longitudes = _clip_lon(np.array(ds[lon_name], dtype=np32)) + self.len = len(ds_xr['time']) // self.stride_temporal - # + self.exclude = {"LWMASK", "_indices", "quality_flag"} + self.channels_file = [k for k in ds_xr.keys()] self.geoinfo_channels = stream_info.get("geoinfos", []) self.geoinfo_idx = [self.channels_file.index(ch) for ch in self.geoinfo_channels] + # cache geoinfos - #self.geoinfo_data = np.stack([np.array(ds[ch], dtype=np32) for ch in self.geoinfo_channels]) - #self.geoinfo_data = self.geoinfo_data.transpose() + if len(self.geoinfo_channels) != 0: + self.geoinfo_data = np.stack([np.array(ds_xr[ch], dtype=np32) for ch in self.geoinfo_channels]) + self._geoinfo_flat = self.geoinfo_data.transpose([1, 2, 0]).reshape( + (-1, len(self.geoinfo_channels)) + ) # select/filter requested target channels - # this will access the stream info, hence make sure to set it. - self.target_idx, self.target_channels = self.select_channels(ds, "target") - #self.target_channels = [self.channels_file[i] for i in self.target_idx] + self.target_idx, self.target_channels = self.select_channels(ds_xr, "target") self.source_channels = stream_info.get("source", []) - #self.source_idx, self.source_channels = self.select_channels(ds, "source") self.source_idx = [self.channels_file.index(ch) for ch in self.source_channels] - #print("Source channels:", self.source_channels) - - #code.interact(local=locals()) ds_name = stream_info["name"] _logger.info(f"{ds_name}: target channels: {self.target_channels}") - # what is this doing? self.properties = { "stream_id": 0, } - # or your function to load or compute the statistics self.mean, self.stdev = self._create_statistics() - self.mean_geoinfo, self.stdev_geoinfo = self.mean[self.geoinfo_idx], self.stdev[self.geoinfo_idx] + # Close xarray, force lazy zarr open in workers + ds_xr.close() + ds_full.close() + self._ds = None + + def _open_ds(self): + store = zarr.open(self._zarr_path, mode='r') + return store['seviri'] + + @property + def ds(self): + if self._ds is None: + self._ds = self._open_ds() + return self._ds + + @ds.setter + def ds(self, value): + self._ds = value + def _create_statistics(self): statistics = Path(self.stream_info["metadata"]) / self.stream_info["experiment"] / "seviri_statistics.parquet" df_stats = pd.read_parquet(statistics) @@ -148,7 +186,7 @@ def _create_statistics(self): for ch in self.channels_file: if ch in self.exclude: - mean.append(0.0) # placeholder for those we don't have statistics for + mean.append(0.0) stdev.append(1.0) else: mean.append(mean_lookup[ch].astype(np.float32)) @@ -165,7 +203,7 @@ def _create_statistics(self): @override def init_empty(self) -> None: super().init_empty() - self.ds = None + self._ds = None self.len = 0 @override @@ -176,44 +214,40 @@ def length(self) -> int: def _get(self, idx: TIndex, channels_idx: list[int]) -> ReaderData: """ Get data for window (for either source or target, through public interface) - Parameters - ---------- - idx : int - Index of temporal window - channels_idx : list[int] - Selection of channels - Returns - ------- - ReaderData providing coords, geoinfos, data, datetimes """ (t_idxs, dtr) = self._get_dataset_idxs(idx) - if self.ds is None or self.len == 0 or len(t_idxs) == 0 or len(channels_idx) == 0: + if self._ds is None and self.len == 0: + return ReaderData.empty( + num_data_fields=len(channels_idx), num_geo_fields=len(self.geoinfo_idx) + ) + + if len(t_idxs) == 0 or len(channels_idx) == 0: return ReaderData.empty( num_data_fields=len(channels_idx), num_geo_fields=len(self.geoinfo_idx) ) assert t_idxs[0] >= 0, "index must be non-negative" - didx_start = t_idxs[0] - # End is inclusive - didx_end = t_idxs[-1] + 1 - - # extract number of time steps and collapse ensemble dimension - # ds is a wrapper around zarr with get_coordinate_selection not being exposed since - # subsetting is pushed to the ctor via frequency argument; this also ensures that no sub- - # sampling is required here - - #print("channels_idx to _get:", channels_idx) - #sel_channels = [self.channels_file[i] for i in channels_idx] - #print("Selected channels:", sel_channels) - sel_channels = "LST" - data = self.ds[sel_channels].isel(time=slice(didx_start, didx_end), - latitude=self.spatial_indices["lat_idx"], - longitude=self.spatial_indices["lon_idx"]).values + # Convert to actual zarr indices (accounting for time offset and stride) + didx_start = self._time_offset + t_idxs[0] * self.stride_temporal + didx_end = self._time_offset + t_idxs[-1] * self.stride_temporal + 1 - print("Data shape after channel selection:", data.shape) # (time, lat, lon) (6, 1474, 1474) + sel_channels = [self.channels_file[i] for i in channels_idx] + + # Access zarr directly with numpy advanced indexing + data_list = [] + for ch in sel_channels: + # zarr array: shape is (time, lat, lon) + ch_data = self.ds[ch][ + didx_start:didx_end:self.stride_temporal, + self._lat_idx, + : + ][:, :, self._lon_idx] + data_list.append(ch_data) + + data = np.stack(data_list, axis=-1) # shape: (n_times, n_lats, n_lons, n_channels) n_times = data.shape[0] n_lats = data.shape[1] @@ -221,12 +255,13 @@ def _get(self, idx: TIndex, channels_idx: list[int]) -> ReaderData: n_spatial = n_lats * n_lons # flatten along time dimension - data = data.transpose([1, 2, 0]).reshape((n_spatial * n_times, -1)) + data = data.reshape((n_times * n_spatial, len(channels_idx))) - # print("Data shape after flattening time:", data.shape) # (2172676, 6) - # set invalid values to NaN - #mask = data == self.fillvalue - #data[mask] = np.nan + # prepare geoinfos + if len(self.geoinfo_channels) != 0: + geoinfos = np.tile(self._geoinfo_flat, (n_times, 1)) + else: + geoinfos = np.zeros((n_spatial * n_times, 0), dtype=np.float32) # construct lat/lon coords lat2d, lon2d = np.meshgrid( @@ -234,21 +269,23 @@ def _get(self, idx: TIndex, channels_idx: list[int]) -> ReaderData: self.longitudes, indexing="ij", ) - lat_flat = lat2d.reshape(-1) # (2172676,) - lon_flat = lon2d.reshape(-1) # (2172676,) + lat_flat = lat2d.reshape(-1) + lon_flat = lon2d.reshape(-1) # Tile spatial coordinates for each timestep coords = np.tile(np.column_stack((lat_flat, lon_flat)), (n_times, 1)) - + # Use cached time values + time_indices = slice( + t_idxs[0] * self.stride_temporal, + t_idxs[-1] * self.stride_temporal + 1, + self.stride_temporal + ) datetimes = np.repeat( - self.ds.time[didx_start:didx_end].values, + self._time_values[time_indices], n_spatial ) - # Empty Geoinfos - geoinfos = np.zeros((n_spatial * n_times, 0), dtype=np.float32) - rd = ReaderData( coords=coords, geoinfos=geoinfos, @@ -256,23 +293,19 @@ def _get(self, idx: TIndex, channels_idx: list[int]) -> ReaderData: datetimes=datetimes, ) check_reader_data(rd, dtr) - + return rd def select_channels(self, ds, ch_type: str) -> NDArray[np.int64]: - """Select channels based on stream info for either source or target.""" channels = self.stream_info.get(ch_type) assert channels is not None, f"{ch_type} channels need to be specified" - # sanity check + is_empty = len(channels) == 0 if channels is not None else False if is_empty: stream_name = self.stream_info["name"] _logger.warning(f"No channel for {stream_name} for {ch_type}.") - - if is_empty: - _logger.warning(f"No channel selected for {stream_name} for {ch_type}.") chs_idx = np.empty(shape=[0], dtype=int) channels = [] else: @@ -283,15 +316,10 @@ def select_channels(self, ds, ch_type: str) -> NDArray[np.int64]: def _clip_lat(lats: NDArray) -> NDArray[np.float32]: - """ - Clip latitudes to the range [-90, 90] and ensure periodicity. - """ + """Clip latitudes to the range [-90, 90] and ensure periodicity.""" return (2 * np.clip(lats, -90.0, 90.0) - lats).astype(np.float32) -# TODO: move to base class def _clip_lon(lons: NDArray) -> NDArray[np.float32]: - """ - Clip longitudes to the range [-180, 180] and ensure periodicity. - """ + """Clip longitudes to the range [-180, 180] and ensure periodicity.""" return ((lons + 180.0) % 360.0 - 180.0).astype(np.float32) \ No newline at end of file From c34c42e6dace12a0500582e7d391f24248400f3d Mon Sep 17 00:00:00 2001 From: Marieke Gertrud Karla Wesselkamp Date: Mon, 2 Feb 2026 10:22:59 +0100 Subject: [PATCH 17/24] stage before moving from xarray to zarr --- src/weathergen/datasets/data_reader_seviri.py | 7 ++++--- 1 file changed, 4 insertions(+), 3 deletions(-) diff --git a/src/weathergen/datasets/data_reader_seviri.py b/src/weathergen/datasets/data_reader_seviri.py index b6b308419..e526f59f6 100644 --- a/src/weathergen/datasets/data_reader_seviri.py +++ b/src/weathergen/datasets/data_reader_seviri.py @@ -22,6 +22,8 @@ # for reading the parquet files import pandas as pd +import os +os.environ['ZARR_V3_EXPERIMENTAL_API'] = '1' from weathergen.datasets.data_reader_base import ( DataReaderTimestep, @@ -33,7 +35,6 @@ _logger = logging.getLogger(__name__) - class DataReaderSeviri(DataReaderTimestep): """Data reader for SEVIRI satellite data.""" @@ -53,8 +54,8 @@ def __init__( self.stride_temporal = 6 # downsample to six hourly timesteps self.stride_spatial = 8 # use every 8th point to reduce memory usage on workers - index_path = Path(stream_info["metadata"]) / stream_info["experiment"] / "seviri_indices.parquet" - self.spatial_indices = pd.read_parquet(index_path) + index_path = Path(stream_info["metadata"]) + self.spatial_indices = np.load(index_path)["seviri_indices"] self._zarr_path = filename self._ds = None # opened lazily From 2a1e1dd43beac60ebbc640b943325034012b7365 Mon Sep 17 00:00:00 2001 From: Marieke Gertrud Karla Wesselkamp Date: Mon, 2 Feb 2026 15:06:06 +0100 Subject: [PATCH 18/24] updated statistics to npz load --- src/weathergen/datasets/data_reader_seviri.py | 62 ++++++++++++++----- 1 file changed, 45 insertions(+), 17 deletions(-) diff --git a/src/weathergen/datasets/data_reader_seviri.py b/src/weathergen/datasets/data_reader_seviri.py index e526f59f6..f4eee55ec 100644 --- a/src/weathergen/datasets/data_reader_seviri.py +++ b/src/weathergen/datasets/data_reader_seviri.py @@ -20,8 +20,6 @@ import code import pdb -# for reading the parquet files -import pandas as pd import os os.environ['ZARR_V3_EXPERIMENTAL_API'] = '1' @@ -54,7 +52,7 @@ def __init__( self.stride_temporal = 6 # downsample to six hourly timesteps self.stride_spatial = 8 # use every 8th point to reduce memory usage on workers - index_path = Path(stream_info["metadata"]) + index_path = Path(stream_info["metadata"]) / "train_scene_000.npz" self.spatial_indices = np.load(index_path)["seviri_indices"] self._zarr_path = filename @@ -66,19 +64,25 @@ def __init__( ds_xr = ds_xr.sel(time=slice(stream_info["data_start_time"], stream_info["data_end_time"])) print("Selected time period: ", ds_xr.time.min().values, " to ", ds_xr.time.max().values) + col_extent = ds_xr['longitude'].shape[0] + lat_idx = self.spatial_indices // col_extent + lon_idx = self.spatial_indices % col_extent + + # Cache spatial indices for zarr access + self._lat_idx = np.array(lat_idx[::self.stride_spatial]) + self._lon_idx = np.array(lon_idx[::self.stride_spatial]) + + #code.interact(local=locals()) + # Apply spatial subset ds_xr = ds_xr.isel( - latitude=self.spatial_indices["lat_idx"][::self.stride_spatial], - longitude=self.spatial_indices["lon_idx"][::self.stride_spatial] + latitude=self._lat_idx, + longitude=self._lon_idx ) # Cache time values as numpy (avoid zarr access for time later) self._time_values = np.array(ds_xr.time.values) - # Cache spatial indices for zarr access - self._lat_idx = np.array(self.spatial_indices["lat_idx"][::self.stride_spatial]) - self._lon_idx = np.array(self.spatial_indices["lon_idx"][::self.stride_spatial]) - # Find time indices in the full zarr that correspond to our time selection ds_full = xr.open_zarr(filename, group="seviri") ds_full["time"] = ds_full["time"].astype("datetime64[ns]") @@ -129,7 +133,7 @@ def __init__( else: self.len = len(ds_xr['time']) // self.stride_temporal - self.exclude = {"LWMASK", "_indices", "quality_flag"} + self.exclude = {"LWMASK", "LANDCOV", "_indices", "quality_flag"} self.channels_file = [k for k in ds_xr.keys()] self.geoinfo_channels = stream_info.get("geoinfos", []) @@ -178,10 +182,11 @@ def ds(self, value): self._ds = value def _create_statistics(self): - statistics = Path(self.stream_info["metadata"]) / self.stream_info["experiment"] / "seviri_statistics.parquet" - df_stats = pd.read_parquet(statistics) - mean_lookup = df_stats.set_index('variable')["mean"] - std_lookup = df_stats.set_index('variable')["std"] + statistics = Path(self.stream_info["metadata"]) / "statistics_global.npz" + df_stats = _assemble_statistics_from_npz(statistics) + + #mean_lookup = df_stats.set_index('variable')["mean"] + #std_lookup = df_stats.set_index('variable')["std"] mean, stdev = [], [] @@ -190,8 +195,8 @@ def _create_statistics(self): mean.append(0.0) stdev.append(1.0) else: - mean.append(mean_lookup[ch].astype(np.float32)) - stdev.append(std_lookup[ch].astype(np.float32)) + mean.append(df_stats[ch]["mean"]) + stdev.append(df_stats[ch]["std"]) mean = np.array(mean) stdev = np.array(stdev) @@ -323,4 +328,27 @@ def _clip_lat(lats: NDArray) -> NDArray[np.float32]: def _clip_lon(lons: NDArray) -> NDArray[np.float32]: """Clip longitudes to the range [-180, 180] and ensure periodicity.""" - return ((lons + 180.0) % 360.0 - 180.0).astype(np.float32) \ No newline at end of file + return ((lons + 180.0) % 360.0 - 180.0).astype(np.float32) + +def _assemble_statistics_from_npz(src: str | Path ) -> dict[str, dict[str, float]]: + """ + Loads statistics saved with `save_statistics_npz`. + Returns: + dict[var_name, dict[stat_name, value]] + """ + out: dict[str, dict[str, float]] = {} + + # If it's path-like, normalize to Path; otherwise assume it's file-like + if isinstance(src, (str, os.PathLike)): + src = Path(src) + + with np.load(src, allow_pickle=True) as z: + variables = list(z['variables']) + stat_names = [k for k in z.files if k != 'variables'] + + for i, var in enumerate(variables): + out[str(var)] = {} + for stat in stat_names: + out[str(var)][stat] = np.asarray(z[stat][i]).item() + + return out \ No newline at end of file From a7e134997770bdc241063b0751022d232cef9a82 Mon Sep 17 00:00:00 2001 From: Marieke Gertrud Karla Wesselkamp Date: Mon, 2 Feb 2026 15:06:17 +0100 Subject: [PATCH 19/24] adjusted path --- src/weathergen/datasets/multi_stream_data_sampler.py | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/src/weathergen/datasets/multi_stream_data_sampler.py b/src/weathergen/datasets/multi_stream_data_sampler.py index db76066b6..429527fbd 100644 --- a/src/weathergen/datasets/multi_stream_data_sampler.py +++ b/src/weathergen/datasets/multi_stream_data_sampler.py @@ -64,6 +64,7 @@ def collect_datasources(stream_datasets: list, idx: int, type: str) -> IOReaderD rdatas = [] for ds in stream_datasets: + #code.interact(local=locals()) if type == "source": get_reader_data = ds.get_source normalize_channels = ds.normalize_source_channels @@ -151,7 +152,7 @@ def __init__( datapath = cf.data_path_fesom case "msg_lst": dataset = DataReaderSeviri - datapath = cf.data_path_obs + datapath = cf.data_path_anemoi # on leonardo. case type_name: reader_entry = get_extra_reader(type_name, cf) if reader_entry is not None: From fc74b70d734387187a42b6f084b0df61c5825068 Mon Sep 17 00:00:00 2001 From: Marieke Gertrud Karla Wesselkamp Date: Wed, 4 Feb 2026 09:54:27 +0100 Subject: [PATCH 20/24] set up evaluation --- config/evaluate/eval_config_lst.yml | 41 +++++++++++++++++++ config/streams/seviri_lst/seviri_lst.yml | 11 ++--- src/weathergen/datasets/data_reader_seviri.py | 4 +- 3 files changed, 49 insertions(+), 7 deletions(-) create mode 100644 config/evaluate/eval_config_lst.yml diff --git a/config/evaluate/eval_config_lst.yml b/config/evaluate/eval_config_lst.yml new file mode 100644 index 000000000..4e958f640 --- /dev/null +++ b/config/evaluate/eval_config_lst.yml @@ -0,0 +1,41 @@ +image_format : "png" #options: "png", "pdf", "svg", "eps", "jpg" .. +dpi_val : 300 +summary_plots : true +print_summary: true + +evaluation: + metrics : ["rmse", "mae"] + regions: ["global", "nhem"] + summary_dir: "./plots/" + plot_score_maps: false #plot scores on a 2D maps. it slows down score computation + print_summary: false #print out score values on screen. it can be verbose + +run_ids : + + mrkv586w : # Inference run id. + label: "One-shot LST prediction" + mini_epoch: 0 + rank: 0 + streams: + # ERA5: + # channels: ["2t", "10u"] #, "10v", "z_500", "t_850", "u_850", "v_850", "q_850", ] + # evaluation: + # forecast_step: "all" + # sample: "all" + # ensemble: "all" #supported: "all", "mean", [0,1,2] + # plotting: + # sample: [1, 3] + # forecast_step: [1,3, 2] + # ensemble: [0,2,5] #supported: "all", "mean", [0,1,2] + # plot_maps: true + # plot_target: false + SEVIRI_LST: + channels: ["LST"] #["2t", "q_850", ] #["LST"] # ["LST"] #["2t", "q_850", ] + evaluation: + sample: "all" + forecast_step: "all" + plotting: + sample: [0, 1, 2] + forecast_step: [ 1, 2 ] #, 2, 3, 4] #, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24] + plot_maps: true + plot_histograms: true \ No newline at end of file diff --git a/config/streams/seviri_lst/seviri_lst.yml b/config/streams/seviri_lst/seviri_lst.yml index c345be779..016feef5e 100644 --- a/config/streams/seviri_lst/seviri_lst.yml +++ b/config/streams/seviri_lst/seviri_lst.yml @@ -10,20 +10,21 @@ SEVIRI_LST : type : msg_lst - filenames : ['lst_test.zarr'] - data_start_time : "2017-05-01 00:00" - data_end_time : "2017-06-30 00:00" + filenames : ['mpg_seviri_l2_2017-18_v0/lst_test.zarr'] # use ['mpg_seviri_l2_2017-18_v0/seviri.zarr'] after zarr3 is enabled + data_start_time : "2017-01-01 00:00" + data_end_time : "2017-09-30 00:00" target: ["LST"] source: ["LST"] geoinfos: ["DEM"] #, "LANDCOV"] - metadata: "/leonardo_work/AIFAC_5C0_154/weathergen/data/mpg_seviri_l2_2017-18_v0/metadata_lst_test" - experiment: "johannesburg" + metadata: "/leonardo_work/AIFAC_5C0_154/weathergen/data/mpg_seviri_l2_2017-18_v1/metadata" # uses one scene over south africa for finetuning loss_weight : 1.0 masking_rate : 0.6 masking_rate_none : 0.05 token_size : 64 tokenize_spacetime : True max_num_targets: -1 #-1 + val_source_channels: ["LST"] + val_target_channels: ["LST"] embed : net : transformer num_tokens : 1 diff --git a/src/weathergen/datasets/data_reader_seviri.py b/src/weathergen/datasets/data_reader_seviri.py index f4eee55ec..ee4bee2cd 100644 --- a/src/weathergen/datasets/data_reader_seviri.py +++ b/src/weathergen/datasets/data_reader_seviri.py @@ -21,7 +21,7 @@ import pdb import os -os.environ['ZARR_V3_EXPERIMENTAL_API'] = '1' +os.environ['ZARR_V3_EXPERIMENTAL_API'] = '1' # doesn't seem to work from weathergen.datasets.data_reader_base import ( DataReaderTimestep, @@ -50,7 +50,7 @@ def __init__( # set sampling parameters self.stride_temporal = 6 # downsample to six hourly timesteps - self.stride_spatial = 8 # use every 8th point to reduce memory usage on workers + self.stride_spatial = 0 # use every 8th point to reduce memory usage on workers index_path = Path(stream_info["metadata"]) / "train_scene_000.npz" self.spatial_indices = np.load(index_path)["seviri_indices"] From 8b36af9ca4431d2f14d9ff88ef5b919c4316c50e Mon Sep 17 00:00:00 2001 From: MWesselkamp Date: Wed, 4 Feb 2026 16:36:32 +0100 Subject: [PATCH 21/24] chore: update author info From d17ed642ec4f622201288531c4761da6d79cbda7 Mon Sep 17 00:00:00 2001 From: MWesselkamp Date: Wed, 4 Feb 2026 18:25:09 +0100 Subject: [PATCH 22/24] chore: linting --- src/weathergen/datasets/data_reader_seviri.py | 111 ++++++++---------- .../datasets/multi_stream_data_sampler.py | 10 +- 2 files changed, 55 insertions(+), 66 deletions(-) diff --git a/src/weathergen/datasets/data_reader_seviri.py b/src/weathergen/datasets/data_reader_seviri.py index ee4bee2cd..ed66257b9 100644 --- a/src/weathergen/datasets/data_reader_seviri.py +++ b/src/weathergen/datasets/data_reader_seviri.py @@ -7,21 +7,18 @@ # granted to it by virtue of its status as an intergovernmental organisation # nor does it submit to any jurisdiction. +# for interactive debugging import logging +import os from pathlib import Path from typing import override import numpy as np import xarray as xr -from numpy.typing import NDArray import zarr +from numpy.typing import NDArray -# for interactive debugging -import code -import pdb - -import os -os.environ['ZARR_V3_EXPERIMENTAL_API'] = '1' # doesn't seem to work +os.environ["ZARR_V3_EXPERIMENTAL_API"] = "1" # doesn't seem to work from weathergen.datasets.data_reader_base import ( DataReaderTimestep, @@ -33,6 +30,7 @@ _logger = logging.getLogger(__name__) + class DataReaderSeviri(DataReaderTimestep): """Data reader for SEVIRI satellite data.""" @@ -42,17 +40,16 @@ def __init__( filename: Path, stream_info: dict, ) -> None: - """Initialize the SEVIRI data reader.""" self.fillvalue = np.nan np32 = np.float32 # set sampling parameters - self.stride_temporal = 6 # downsample to six hourly timesteps - self.stride_spatial = 0 # use every 8th point to reduce memory usage on workers + self.stride_temporal = 6 # downsample to six hourly timesteps + self.stride_spatial = 0 # use every 8th point to reduce memory usage on workers - index_path = Path(stream_info["metadata"]) / "train_scene_000.npz" + index_path = Path(stream_info["metadata"]) / "train_scene_000.npz" self.spatial_indices = np.load(index_path)["seviri_indices"] self._zarr_path = filename @@ -62,23 +59,19 @@ def __init__( ds_xr = xr.open_zarr(filename, group="seviri") ds_xr["time"] = ds_xr["time"].astype("datetime64[ns]") ds_xr = ds_xr.sel(time=slice(stream_info["data_start_time"], stream_info["data_end_time"])) - print("Selected time period: ", ds_xr.time.min().values, " to ", ds_xr.time.max().values) - - col_extent = ds_xr['longitude'].shape[0] + + col_extent = ds_xr["longitude"].shape[0] lat_idx = self.spatial_indices // col_extent lon_idx = self.spatial_indices % col_extent # Cache spatial indices for zarr access - self._lat_idx = np.array(lat_idx[::self.stride_spatial]) - self._lon_idx = np.array(lon_idx[::self.stride_spatial]) + self._lat_idx = np.array(lat_idx[:: self.stride_spatial]) + self._lon_idx = np.array(lon_idx[:: self.stride_spatial]) - #code.interact(local=locals()) + # code.interact(local=locals()) # Apply spatial subset - ds_xr = ds_xr.isel( - latitude=self._lat_idx, - longitude=self._lon_idx - ) + ds_xr = ds_xr.isel(latitude=self._lat_idx, longitude=self._lon_idx) # Cache time values as numpy (avoid zarr access for time later) self._time_values = np.array(ds_xr.time.values) @@ -103,7 +96,7 @@ def __init__( super().__init__(tw_handler, stream_info) self.init_empty() return - + if "frequency" in stream_info: assert False, "Frequency sub-sampling currently not supported" @@ -131,17 +124,19 @@ def __init__( self.init_empty() return else: - self.len = len(ds_xr['time']) // self.stride_temporal + self.len = len(ds_xr["time"]) // self.stride_temporal self.exclude = {"LWMASK", "LANDCOV", "_indices", "quality_flag"} - self.channels_file = [k for k in ds_xr.keys()] + self.channels_file = [k for k in ds_xr.keys()] self.geoinfo_channels = stream_info.get("geoinfos", []) self.geoinfo_idx = [self.channels_file.index(ch) for ch in self.geoinfo_channels] - + # cache geoinfos if len(self.geoinfo_channels) != 0: - self.geoinfo_data = np.stack([np.array(ds_xr[ch], dtype=np32) for ch in self.geoinfo_channels]) + self.geoinfo_data = np.stack( + [np.array(ds_xr[ch], dtype=np32) for ch in self.geoinfo_channels] + ) self._geoinfo_flat = self.geoinfo_data.transpose([1, 2, 0]).reshape( (-1, len(self.geoinfo_channels)) ) @@ -160,7 +155,10 @@ def __init__( } self.mean, self.stdev = self._create_statistics() - self.mean_geoinfo, self.stdev_geoinfo = self.mean[self.geoinfo_idx], self.stdev[self.geoinfo_idx] + self.mean_geoinfo, self.stdev_geoinfo = ( + self.mean[self.geoinfo_idx], + self.stdev[self.geoinfo_idx], + ) # Close xarray, force lazy zarr open in workers ds_xr.close() @@ -168,8 +166,8 @@ def __init__( self._ds = None def _open_ds(self): - store = zarr.open(self._zarr_path, mode='r') - return store['seviri'] + store = zarr.open(self._zarr_path, mode="r") + return store["seviri"] @property def ds(self): @@ -185,8 +183,8 @@ def _create_statistics(self): statistics = Path(self.stream_info["metadata"]) / "statistics_global.npz" df_stats = _assemble_statistics_from_npz(statistics) - #mean_lookup = df_stats.set_index('variable')["mean"] - #std_lookup = df_stats.set_index('variable')["std"] + # mean_lookup = df_stats.set_index('variable')["mean"] + # std_lookup = df_stats.set_index('variable')["std"] mean, stdev = [], [] @@ -197,13 +195,10 @@ def _create_statistics(self): else: mean.append(df_stats[ch]["mean"]) stdev.append(df_stats[ch]["std"]) - + mean = np.array(mean) stdev = np.array(stdev) - print("Mean shape", mean.shape) - print("Means", mean) - return mean, stdev @override @@ -211,7 +206,7 @@ def init_empty(self) -> None: super().init_empty() self._ds = None self.len = 0 - + @override def length(self) -> int: return self.len @@ -228,7 +223,7 @@ def _get(self, idx: TIndex, channels_idx: list[int]) -> ReaderData: return ReaderData.empty( num_data_fields=len(channels_idx), num_geo_fields=len(self.geoinfo_idx) ) - + if len(t_idxs) == 0 or len(channels_idx) == 0: return ReaderData.empty( num_data_fields=len(channels_idx), num_geo_fields=len(self.geoinfo_idx) @@ -237,22 +232,20 @@ def _get(self, idx: TIndex, channels_idx: list[int]) -> ReaderData: assert t_idxs[0] >= 0, "index must be non-negative" # Convert to actual zarr indices (accounting for time offset and stride) - didx_start = self._time_offset + t_idxs[0] * self.stride_temporal + didx_start = self._time_offset + t_idxs[0] * self.stride_temporal didx_end = self._time_offset + t_idxs[-1] * self.stride_temporal + 1 sel_channels = [self.channels_file[i] for i in channels_idx] - + # Access zarr directly with numpy advanced indexing data_list = [] for ch in sel_channels: # zarr array: shape is (time, lat, lon) - ch_data = self.ds[ch][ - didx_start:didx_end:self.stride_temporal, - self._lat_idx, - : - ][:, :, self._lon_idx] + ch_data = self.ds[ch][didx_start : didx_end : self.stride_temporal, self._lat_idx, :][ + :, :, self._lon_idx + ] data_list.append(ch_data) - + data = np.stack(data_list, axis=-1) # shape: (n_times, n_lats, n_lons, n_channels) n_times = data.shape[0] @@ -274,9 +267,9 @@ def _get(self, idx: TIndex, channels_idx: list[int]) -> ReaderData: self.latitudes, self.longitudes, indexing="ij", - ) - lat_flat = lat2d.reshape(-1) - lon_flat = lon2d.reshape(-1) + ) + lat_flat = lat2d.reshape(-1) + lon_flat = lon2d.reshape(-1) # Tile spatial coordinates for each timestep coords = np.tile(np.column_stack((lat_flat, lon_flat)), (n_times, 1)) @@ -285,12 +278,9 @@ def _get(self, idx: TIndex, channels_idx: list[int]) -> ReaderData: time_indices = slice( t_idxs[0] * self.stride_temporal, t_idxs[-1] * self.stride_temporal + 1, - self.stride_temporal - ) - datetimes = np.repeat( - self._time_values[time_indices], - n_spatial + self.stride_temporal, ) + datetimes = np.repeat(self._time_values[time_indices], n_spatial) rd = ReaderData( coords=coords, @@ -299,7 +289,7 @@ def _get(self, idx: TIndex, channels_idx: list[int]) -> ReaderData: datetimes=datetimes, ) check_reader_data(rd, dtr) - + return rd def select_channels(self, ds, ch_type: str) -> NDArray[np.int64]: @@ -307,7 +297,7 @@ def select_channels(self, ds, ch_type: str) -> NDArray[np.int64]: channels = self.stream_info.get(ch_type) assert channels is not None, f"{ch_type} channels need to be specified" - + is_empty = len(channels) == 0 if channels is not None else False if is_empty: stream_name = self.stream_info["name"] @@ -330,7 +320,8 @@ def _clip_lon(lons: NDArray) -> NDArray[np.float32]: """Clip longitudes to the range [-180, 180] and ensure periodicity.""" return ((lons + 180.0) % 360.0 - 180.0).astype(np.float32) -def _assemble_statistics_from_npz(src: str | Path ) -> dict[str, dict[str, float]]: + +def _assemble_statistics_from_npz(src: str | Path) -> dict[str, dict[str, float]]: """ Loads statistics saved with `save_statistics_npz`. Returns: @@ -339,16 +330,16 @@ def _assemble_statistics_from_npz(src: str | Path ) -> dict[str, dict[str, float out: dict[str, dict[str, float]] = {} # If it's path-like, normalize to Path; otherwise assume it's file-like - if isinstance(src, (str, os.PathLike)): + if isinstance(src, (str | Path)): src = Path(src) with np.load(src, allow_pickle=True) as z: - variables = list(z['variables']) - stat_names = [k for k in z.files if k != 'variables'] + variables = list(z["variables"]) + stat_names = [k for k in z.files if k != "variables"] for i, var in enumerate(variables): out[str(var)] = {} for stat in stat_names: out[str(var)][stat] = np.asarray(z[stat][i]).item() - return out \ No newline at end of file + return out diff --git a/src/weathergen/datasets/multi_stream_data_sampler.py b/src/weathergen/datasets/multi_stream_data_sampler.py index 429527fbd..4d619233c 100644 --- a/src/weathergen/datasets/multi_stream_data_sampler.py +++ b/src/weathergen/datasets/multi_stream_data_sampler.py @@ -7,15 +7,13 @@ # granted to it by virtue of its status as an intergovernmental organisation # nor does it submit to any jurisdiction. +# for interactive debugging import logging import pathlib import numpy as np import torch -# for interactive debugging -import code - from weathergen.common.io import IOReaderData from weathergen.datasets.data_reader_anemoi import DataReaderAnemoi from weathergen.datasets.data_reader_base import ( @@ -25,8 +23,8 @@ str_to_datetime64, ) from weathergen.datasets.data_reader_fesom import DataReaderFesom -from weathergen.datasets.data_reader_seviri import DataReaderSeviri from weathergen.datasets.data_reader_obs import DataReaderObs +from weathergen.datasets.data_reader_seviri import DataReaderSeviri from weathergen.datasets.masking import Masker from weathergen.datasets.stream_data import StreamData, spoof from weathergen.datasets.tokenizer_forecast import TokenizerForecast @@ -64,7 +62,7 @@ def collect_datasources(stream_datasets: list, idx: int, type: str) -> IOReaderD rdatas = [] for ds in stream_datasets: - #code.interact(local=locals()) + # code.interact(local=locals()) if type == "source": get_reader_data = ds.get_source normalize_channels = ds.normalize_source_channels @@ -152,7 +150,7 @@ def __init__( datapath = cf.data_path_fesom case "msg_lst": dataset = DataReaderSeviri - datapath = cf.data_path_anemoi # on leonardo. + datapath = cf.data_path_anemoi # on leonardo. case type_name: reader_entry = get_extra_reader(type_name, cf) if reader_entry is not None: From 1300fedf28d22f3afc152b8fffa241e60fe2a345 Mon Sep 17 00:00:00 2001 From: MWesselkamp Date: Thu, 12 Feb 2026 09:52:09 +0100 Subject: [PATCH 23/24] stage data reader before updating branch --- src/weathergen/datasets/data_reader_seviri.py | 11 +++++------ 1 file changed, 5 insertions(+), 6 deletions(-) diff --git a/src/weathergen/datasets/data_reader_seviri.py b/src/weathergen/datasets/data_reader_seviri.py index ed66257b9..73f8205c1 100644 --- a/src/weathergen/datasets/data_reader_seviri.py +++ b/src/weathergen/datasets/data_reader_seviri.py @@ -46,10 +46,10 @@ def __init__( np32 = np.float32 # set sampling parameters - self.stride_temporal = 6 # downsample to six hourly timesteps - self.stride_spatial = 0 # use every 8th point to reduce memory usage on workers + self.stride_temporal = stream_info["temporal_stride"] # downsample to six hourly timesteps + self.stride_spatial = stream_info["spatial_stride"] # use every 8th point to reduce memory usage on workers - index_path = Path(stream_info["metadata"]) / "train_scene_000.npz" + index_path = Path(stream_info["metadata"]) / stream_info["scene"] self.spatial_indices = np.load(index_path)["seviri_indices"] self._zarr_path = filename @@ -160,6 +160,7 @@ def __init__( self.stdev[self.geoinfo_idx], ) + print(f"geoinfo_channels: {self.geoinfo_channels}, _geoinfo_flat shape: {getattr(self, '_geoinfo_flat', 'NOT SET')}") # Close xarray, force lazy zarr open in workers ds_xr.close() ds_full.close() @@ -183,9 +184,6 @@ def _create_statistics(self): statistics = Path(self.stream_info["metadata"]) / "statistics_global.npz" df_stats = _assemble_statistics_from_npz(statistics) - # mean_lookup = df_stats.set_index('variable')["mean"] - # std_lookup = df_stats.set_index('variable')["std"] - mean, stdev = [], [] for ch in self.channels_file: @@ -216,6 +214,7 @@ def _get(self, idx: TIndex, channels_idx: list[int]) -> ReaderData: """ Get data for window (for either source or target, through public interface) """ + print(f"geoinfo_channels: {self.geoinfo_channels}, _geoinfo_flat shape: {getattr(self, '_geoinfo_flat', 'NOT SET')}") (t_idxs, dtr) = self._get_dataset_idxs(idx) From eaea3cd9916f598e4e5308119e7da1e22f1a7b46 Mon Sep 17 00:00:00 2001 From: MWesselkamp Date: Thu, 12 Feb 2026 09:58:08 +0100 Subject: [PATCH 24/24] stage configs before updating branch --- config/evaluate/eval_config_lst.yml | 18 +++--------------- config/streams/seviri_lst/seviri_lst.yml | 12 +++++++----- 2 files changed, 10 insertions(+), 20 deletions(-) diff --git a/config/evaluate/eval_config_lst.yml b/config/evaluate/eval_config_lst.yml index 4e958f640..bee9ef72f 100644 --- a/config/evaluate/eval_config_lst.yml +++ b/config/evaluate/eval_config_lst.yml @@ -12,30 +12,18 @@ evaluation: run_ids : - mrkv586w : # Inference run id. + ndl2qget : # Inference run id. label: "One-shot LST prediction" mini_epoch: 0 rank: 0 streams: - # ERA5: - # channels: ["2t", "10u"] #, "10v", "z_500", "t_850", "u_850", "v_850", "q_850", ] - # evaluation: - # forecast_step: "all" - # sample: "all" - # ensemble: "all" #supported: "all", "mean", [0,1,2] - # plotting: - # sample: [1, 3] - # forecast_step: [1,3, 2] - # ensemble: [0,2,5] #supported: "all", "mean", [0,1,2] - # plot_maps: true - # plot_target: false SEVIRI_LST: channels: ["LST"] #["2t", "q_850", ] #["LST"] # ["LST"] #["2t", "q_850", ] evaluation: sample: "all" forecast_step: "all" plotting: - sample: [0, 1, 2] - forecast_step: [ 1, 2 ] #, 2, 3, 4] #, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24] + sample: [0, 1] + forecast_step: [ 1, 2, 3, 4, 5, 6] #, 2, 3, 4] #, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24] plot_maps: true plot_histograms: true \ No newline at end of file diff --git a/config/streams/seviri_lst/seviri_lst.yml b/config/streams/seviri_lst/seviri_lst.yml index 016feef5e..71efa6ef7 100644 --- a/config/streams/seviri_lst/seviri_lst.yml +++ b/config/streams/seviri_lst/seviri_lst.yml @@ -11,20 +11,22 @@ SEVIRI_LST : type : msg_lst filenames : ['mpg_seviri_l2_2017-18_v0/lst_test.zarr'] # use ['mpg_seviri_l2_2017-18_v0/seviri.zarr'] after zarr3 is enabled - data_start_time : "2017-01-01 00:00" - data_end_time : "2017-09-30 00:00" + data_start_time : "2017-02-01 00:00" + data_end_time : "2017-06-30 00:00" target: ["LST"] source: ["LST"] - geoinfos: ["DEM"] #, "LANDCOV"] + geoinfos: [] #["DEM"] #, "LANDCOV"] metadata: "/leonardo_work/AIFAC_5C0_154/weathergen/data/mpg_seviri_l2_2017-18_v1/metadata" # uses one scene over south africa for finetuning + scene: "scenes_train_scene_001.npz" + spatial_stride: 24 + temporal_stride: 6 + sampling_rate_target: 0.1 # use 10% of spatial points loss_weight : 1.0 masking_rate : 0.6 masking_rate_none : 0.05 token_size : 64 tokenize_spacetime : True max_num_targets: -1 #-1 - val_source_channels: ["LST"] - val_target_channels: ["LST"] embed : net : transformer num_tokens : 1