From 91247acf4e388c3f03e484547bc87dc4cf2a5812 Mon Sep 17 00:00:00 2001 From: SubhraSameerDash <2303105_cseai@gita.edu.in> Date: Sun, 8 Feb 2026 07:48:58 +0530 Subject: [PATCH 1/3] feat: add file id repr Signed-off-by: SubhraSameerDash <2303105_cseai@gita.edu.in> --- .coderabbit.yaml | 150 ++- .github/CODEOWNERS | 55 + .../01_good_first_issue_candidate.yml | 333 ++++-- .../ISSUE_TEMPLATE/02_good_first_issue.yml | 427 ++++++++ ...ginner_issue.yml => 03_beginner_issue.yml} | 20 +- .../ISSUE_TEMPLATE/04_good_first_issue.yml | 300 ------ ...te_issue.yml => 04_intermediate_issue.yml} | 0 ...vanced_issue.yml => 05_advanced_issue.yml} | 0 .../{02_bug_report.yml => bug.yml} | 0 .../{03_feature_request.yml => feature.yml} | 4 +- .github/scripts/bot-advanced-check.sh | 30 +- .../scripts/bot-beginner-assign-on-comment.js | 122 ++- .github/scripts/bot-gfi-assign-on-comment.js | 39 +- .github/scripts/bot-inactivity-unassign.sh | 63 +- .github/scripts/bot-issue-reminder-no-pr.sh | 59 +- .github/scripts/bot-mentor-assignment.js | 64 +- .github/scripts/bot-merge-conflict.js | 3 +- .../scripts/bot-next-issue-recommendation.js | 215 ++++ .github/scripts/bot-office-hours.sh | 3 +- .../scripts/bot-pr-missing-linked-issue.js | 2 +- .github/scripts/bot-unassign-on-comment.js | 63 +- .github/scripts/bot-verified-commits.js | 320 ++++++ .github/scripts/bot-workflows.js | 344 ++++++ .github/scripts/bot-working-on-comment.js | 88 ++ .github/scripts/linked_issue_enforce.js | 14 + .github/scripts/pr_inactivity_reminder.js | 122 ++- .github/workflows/bot-advanced-check.yml | 54 +- .github/workflows/bot-assignment-check.yml | 2 +- .../bot-beginner-assign-on-comment.yml | 2 +- .../workflows/bot-coderabbit-plan-trigger.yml | 2 +- .github/workflows/bot-community-calls.yml | 4 +- .../workflows/bot-gfi-assign-on-comment.yml | 2 +- .../bot-gfi-candidate-notification.yaml | 2 +- .github/workflows/bot-inactivity-unassign.yml | 2 +- .../workflows/bot-intermediate-assignment.yml | 2 +- .../workflows/bot-issue-reminder-no-pr.yml | 2 +- .../workflows/bot-linked-issue-enforcer.yml | 2 +- .github/workflows/bot-merge-conflict.yml | 2 +- .../bot-next-issue-recommendation.yml | 36 + .github/workflows/bot-office-hours.yml | 2 +- .../workflows/bot-p0-issues-notify-team.yml | 2 +- .../workflows/bot-pr-inactivity-reminder.yml | 2 +- .../workflows/bot-pr-missing-linked-issue.yml | 2 +- .github/workflows/bot-verified-commits.yml | 137 ++- .github/workflows/bot-workflows.yml | 78 +- .github/workflows/cron-check-broken-links.yml | 2 +- .github/workflows/cron-update-spam-list.yml | 4 +- .github/workflows/pr-check-broken-links.yml | 2 +- .github/workflows/pr-check-changelog.yml | 3 +- .github/workflows/pr-check-codecov.yml | 4 +- .github/workflows/pr-check-examples.yml | 4 +- .github/workflows/pr-check-test.yml | 6 +- .github/workflows/pr-check-title.yml | 2 +- .github/workflows/publish.yml | 4 +- .github/workflows/unassign-on-comment.yml | 2 +- .github/workflows/working-on-comment.yml | 41 + CHANGELOG.md | 88 +- CONTRIBUTING.md | 10 +- MAINTAINERS.md | 8 +- README.md | 1 + docs/maintainers/team.md | 11 + .../next-issue-recommendation-bot.md | 156 +++ docs/sdk_developers/merge_conflicts.md | 47 +- docs/sdk_developers/setup.md | 2 +- .../training/network_and_client.md | 2 +- .../protoBuf-training/ProtoBuf-Training.md | 235 +++++ .../protoBuf-training/ProtoBuf_Example.md | 89 ++ .../training/setup/setup_windows.md | 95 ++ .../account_id_populate_from_mirror.py | 144 +++ .../consensus/topic_create_transaction.py | 70 +- examples/protobuf_round_trip.py | 100 ++ examples/tokens/token_freeze_transaction.py | 69 +- .../transaction_freeze_manually.py | 119 +++ .../transaction_freeze_secondary_client.py | 149 +++ .../transaction_freeze_without_operator.py | 135 +++ src/hiero_sdk_python/__init__.py | 4 + src/hiero_sdk_python/account/account_id.py | 279 ++++- src/hiero_sdk_python/client/client.py | 233 +++- src/hiero_sdk_python/client/network.py | 279 +++-- src/hiero_sdk_python/consensus/topic_id.py | 9 + .../topic_message_submit_transaction.py | 20 +- .../contract/contract_call_query.py | 7 +- src/hiero_sdk_python/contract/contract_id.py | 174 ++- .../contract/contract_info_query.py | 7 +- src/hiero_sdk_python/executable.py | 467 ++++++-- .../file/file_append_transaction.py | 9 +- src/hiero_sdk_python/file/file_id.py | 9 + src/hiero_sdk_python/node.py | 134 ++- .../query/account_balance_query.py | 8 +- .../query/account_info_query.py | 7 +- .../query/token_info_query.py | 7 +- .../query/token_nft_info_query.py | 7 +- .../query/topic_info_query.py | 7 +- .../query/transaction_get_receipt_query.py | 5 +- .../query/transaction_record_query.py | 6 +- src/hiero_sdk_python/staking_info.py | 139 +++ .../tokens/assessed_custom_fee.py | 98 ++ .../transaction/transaction.py | 68 +- src/hiero_sdk_python/utils/crypto_utils.py | 17 +- .../utils/entity_id_helper.py | 68 +- .../account_id_population_e2e_test.py | 108 ++ .../contract_id_population_e2e_test.py | 69 ++ .../transaction_freeze_e2e_test.py | 148 +++ tests/integration/utils.py | 170 ++- ...ount_allowance_approve_transaction_test.py | 62 +- ...count_allowance_delete_transaction_test.py | 57 +- tests/unit/account_balance_query_test.py | 11 +- tests/unit/account_create_transaction_test.py | 209 ++-- tests/unit/account_id_test.py | 577 +++++++++- tests/unit/account_info_query_test.py | 95 +- tests/unit/account_info_test.py | 60 +- tests/unit/account_records_query_test.py | 4 +- tests/unit/account_update_transaction_test.py | 22 +- tests/unit/assessed_custom_fee_test.py | 199 ++++ tests/unit/batch_transaction_test.py | 168 +-- tests/unit/client_test.py | 385 ++++++- tests/unit/conftest.py | 23 +- tests/unit/contract_call_query_test.py | 3 +- .../unit/contract_create_transaction_test.py | 1 + .../unit/contract_execute_transaction_test.py | 1 + tests/unit/contract_id_test.py | 381 +++++-- tests/unit/contract_info_test.py | 1 + .../unit/contract_update_transaction_test.py | 1 + tests/unit/custom_fee_test.py | 88 +- tests/unit/entity_id_helper_test.py | 90 ++ tests/unit/ethereum_transaction_test.py | 2 + tests/unit/evm_address_test.py | 7 + tests/unit/executable_test.py | 997 ++++++++++++++++-- tests/unit/file_append_transaction_test.py | 70 +- tests/unit/file_create_transaction_test.py | 110 +- tests/unit/file_delete_transaction_test.py | 1 + tests/unit/file_id_test.py | 116 +- tests/unit/file_info_query_test.py | 93 +- tests/unit/file_update_transaction_test.py | 3 +- tests/unit/get_receipt_query_test.py | 33 +- tests/unit/hbar_allowance_test.py | 4 +- tests/unit/hbar_test.py | 82 +- tests/unit/hbar_transfer_test.py | 8 +- tests/unit/hedera_trust_manager_test.py | 18 +- tests/unit/key_utils_test.py | 25 +- tests/unit/keys_private_test.py | 26 +- tests/unit/keys_public_test.py | 54 +- tests/unit/logger_test.py | 25 +- tests/unit/mock_server.py | 106 +- tests/unit/network_test.py | 449 ++++++++ tests/unit/network_tls_test.py | 102 +- tests/unit/nft_id_test.py | 35 +- tests/unit/node_address_test.py | 74 +- tests/unit/node_create_transaction_test.py | 6 +- tests/unit/node_test.py | 87 ++ tests/unit/node_tls_test.py | 184 ++-- tests/unit/node_update_transaction_test.py | 24 +- tests/unit/prng_transaction_test.py | 4 +- tests/unit/query_nodes_test.py | 3 + tests/unit/query_test.py | 192 ++-- tests/unit/staking_info_test.py | 302 ++++++ tests/unit/token_create_transaction_test.py | 4 +- tests/unit/topic_id_test.py | 5 + .../topic_message_submit_transaction_test.py | 24 +- .../unit/transaction_freeze_and_bytes_test.py | 94 ++ 160 files changed, 10780 insertions(+), 2371 deletions(-) create mode 100644 .github/CODEOWNERS create mode 100644 .github/ISSUE_TEMPLATE/02_good_first_issue.yml rename .github/ISSUE_TEMPLATE/{05_beginner_issue.yml => 03_beginner_issue.yml} (90%) delete mode 100644 .github/ISSUE_TEMPLATE/04_good_first_issue.yml rename .github/ISSUE_TEMPLATE/{06_intermediate_issue.yml => 04_intermediate_issue.yml} (100%) rename .github/ISSUE_TEMPLATE/{07_advanced_issue.yml => 05_advanced_issue.yml} (100%) rename .github/ISSUE_TEMPLATE/{02_bug_report.yml => bug.yml} (100%) rename .github/ISSUE_TEMPLATE/{03_feature_request.yml => feature.yml} (97%) create mode 100644 .github/scripts/bot-next-issue-recommendation.js create mode 100644 .github/scripts/bot-verified-commits.js create mode 100644 .github/scripts/bot-workflows.js create mode 100644 .github/scripts/bot-working-on-comment.js create mode 100644 .github/workflows/bot-next-issue-recommendation.yml create mode 100644 .github/workflows/working-on-comment.yml create mode 100644 docs/sdk_developers/automations/next-issue-recommendation-bot.md create mode 100644 docs/sdk_developers/training/protoBuf-training/ProtoBuf-Training.md create mode 100644 docs/sdk_developers/training/protoBuf-training/ProtoBuf_Example.md create mode 100644 docs/sdk_developers/training/setup/setup_windows.md create mode 100644 examples/account/account_id_populate_from_mirror.py create mode 100644 examples/protobuf_round_trip.py create mode 100644 examples/transaction/transaction_freeze_manually.py create mode 100644 examples/transaction/transaction_freeze_secondary_client.py create mode 100644 examples/transaction/transaction_freeze_without_operator.py create mode 100644 src/hiero_sdk_python/staking_info.py create mode 100644 src/hiero_sdk_python/tokens/assessed_custom_fee.py create mode 100644 tests/integration/account_id_population_e2e_test.py create mode 100644 tests/integration/contract_id_population_e2e_test.py create mode 100644 tests/integration/transaction_freeze_e2e_test.py create mode 100644 tests/unit/assessed_custom_fee_test.py create mode 100644 tests/unit/network_test.py create mode 100644 tests/unit/node_test.py create mode 100644 tests/unit/staking_info_test.py diff --git a/.coderabbit.yaml b/.coderabbit.yaml index f5cd8a50d..7f3afb755 100644 --- a/.coderabbit.yaml +++ b/.coderabbit.yaml @@ -160,38 +160,38 @@ reviews: - Deterministic and user-safe # QUERY REVIEW INSTRUCTIONS - query_review_instructions: &query_review_instructions | + query_review_instructions: &query_review_instructions | You are acting as a senior maintainer reviewing the Query base class and its subclasses for the hiero-sdk-python project. - + NOTE: - Review focus levels indicate areas that are important to check carefully. - They do NOT imply severity or urgency. - Only recommend fixes when there is a clear behavioral regression. - + Scope is STRICTLY LIMITED to: - Changes to the base `Query` class - Changes to existing `Query` subclasses - Newly added `Query` subclasses - + ---------------------------------------------------------- REVIEW FOCUS 1 — QUERY SEMANTICS & PAYMENT BEHAVIOR (CONTRACTUAL / HIGH SENSITIVITY) ---------------------------------------------------------- Queries do not reach consensus and use `QueryHeader` for payment and responseType. - + The following behaviors are contractual and must remain unchanged: - `_is_payment_required()` semantics - FREE vs PAID query classification - COST_ANSWER vs ANSWER_ONLY behavior - Whether a payment transaction is attached - + Good to check and verify that changes do NOT: - Alter FREE → PAID or PAID → FREE behavior - Attach payment to COST_ANSWER queries - Bypass `get_cost(client)` for paid queries - Hardcode fees or override payment logic - + ---------------------------------------------------------- REVIEW FOCUS 2 — EXECUTION LIFECYCLE & BASE CLASS INTEGRITY ---------------------------------------------------------- @@ -199,15 +199,15 @@ reviews: - Use the base `Query` execution flow - Delegate retries, backoff, and node selection to `_Executable` - Call `_before_execute(client)` before `_execute(client)` - + Subclasses MUST NOT: - Override retry logic - Implement custom node selection - Manage gRPC deadlines manually - Bypass `_Executable` state handling - + Flag deviations for review; recommend fixes only if behavior changes. - + ---------------------------------------------------------- REVIEW FOCUS 3 — REQUEST CONSTRUCTION CONTRACT ---------------------------------------------------------- @@ -221,7 +221,7 @@ reviews: - Set `responseType` directly - Inject payment logic - Rebuild headers manually - + ---------------------------------------------------------- REVIEW FOCUS 4 — RESPONSE EXTRACTION & DOMAIN MAPPING ---------------------------------------------------------- @@ -229,12 +229,12 @@ reviews: - Return the exact protobuf response field - Perform NO data transformation - Match the expected protobuf response type - + `execute()` MUST NOT: - Implement retries or error handling - Modify payment or execution behavior - Catch and suppress execution errors - + ---------------------------------------------------------- REVIEW FOCUS 5 — NEW SUBCLASS VALIDATION ---------------------------------------------------------- @@ -244,9 +244,9 @@ reviews: - Confirm payment semantics match the Hedera API - Validate protobuf service and method correctness - Ensure naming matches existing query patterns - + Missing or incorrect semantics should be flagged clearly. - + ---------------------------------------------------------- REVIEW FOCUS 6 — REGRESSION & BEHAVIOR CHANGE DETECTION ---------------------------------------------------------- @@ -257,10 +257,10 @@ reviews: - Alters `_get_method()` behavior - Introduces side effects (logging, prints, stack traces) - Changes error propagation behavior - + Small changes should be flagged for verification if they could affect execution flow or payment safety. - + ---------------------------------------------------------- REVIEW FOCUS 7 — EXPLICIT NON-GOALS ---------------------------------------------------------- @@ -268,7 +268,7 @@ reviews: - Review query consumers - Propose refactors unless correctness is impacted - Comment on style, formatting, or naming unless misleading - + ---------------------------------------------------------- FINAL OBJECTIVE ---------------------------------------------------------- @@ -278,7 +278,6 @@ reviews: - Execution-consistent - Strictly aligned with Hedera query semantics - # ============================================================ # GLOBAL REVIEW INSTRUCTIONS (APPLY TO ALL FILES) # ============================================================ @@ -293,6 +292,116 @@ reviews: - Do NOT suggest fixes inline. - Instead, aggregate all out-of-scope issues into a single comment with a list of recommendations for one or more follow-up issues that can be created. path_instructions: + # --- CODEOWNERS REVIEW INSTRUCTIONS --- + - path: ".github/CODEOWNERS" + instructions: | + You are acting as a senior maintainer reviewing the CODEOWNERS file + for the hiero-sdk-python repository. This file controls review + enforcement and repository governance. + + Your role is to verify correctness, coverage, and organizational + alignment — not formatting preferences. + + ---------------------------------------------------------- + REVIEW FOCUS 1 — TEAM SLUG CORRECTNESS (CRITICAL) + ---------------------------------------------------------- + All GitHub team references MUST use the exact, valid organization + team slugs that represent teams with write or higher permissions. + + Flag as critical: + - Any non-existent team slugs + - Use of individual usernames + - Inclusion of teams that do NOT have write permissions + (eg @hiero-ledger/hiero-sdk-python-triage) + + Expected teams commonly include: + - @hiero-ledger/hiero-sdk-python-maintainers + - @hiero-ledger/hiero-sdk-python-committers + - @hiero-ledger/github-maintainers + - @hiero-ledger/tsc + + The following team MUST NOT appear because they do not have write access: + - @hiero-ledger/hiero-sdk-python-triage + ---------------------------------------------------------- + REVIEW FOCUS 2 — DIRECTORY VS FILE MATCHING (HIGH IMPORTANCE) + ---------------------------------------------------------- + Directories that are intended to cover their contents MUST use /** + Flag directory cases like: + - /.github/workflows incorrect, use /.github/workflows/** + + Specific files MUST remain specific to them and NOT use /** + - /.github/CODEOWNERS/** incorrect, use /.github/CODEOWNERS + + Ensure rules actually match real repo assets. + + ---------------------------------------------------------- + REVIEW FOCUS 3 — PRECEDENCE & ORDERING (HIGH IMPORTANCE) + ---------------------------------------------------------- + CODEOWNERS uses last-match-wins precedence. + + Flag if rule ordering breaks CODEOWNERS precedence (last match wins): + - A broad rule (e.g. *) appears after more specific rules + - A broader path (e.g. /.github/**) appears after a more specific subpath rule + (e.g. /.github/workflows/**) + - A sensitive path (e.g. /.github/workflows/**) is followed by a broader rule that would override it + + ---------------------------------------------------------- + REVIEW FOCUS 4 — REPOSITORY GOVERNANCE PROTECTION (HIGH IMPORTANCE) + ---------------------------------------------------------- + These paths must NOT be owned only by committers. + + Ensure these include @hiero-ledger/github-maintainers: + - /.github/workflows/** + - /.github/scripts/** + - /.github/CODEOWNERS + - /CODEOWNERS (enables root-level protection) + + Ensure packaging governance includes: + - /pyproject.toml → @hiero-ledger/hiero-sdk-python-maintainers, @hiero-ledger/github-maintainers, @hiero-ledger/tsc + + + Flag missing governance coverage. + + ---------------------------------------------------------- + REVIEW FOCUS 5 — SCOPE APPROPRIATENESS (MEDIUM IMPORTANCE) + ---------------------------------------------------------- + Committers may own source, tests, and documentation, but + governance and infrastructure files should be maintainer-level + or higher. + + Flag if: + - TSC or GitHub maintainers are missing from critical governance files + + ---------------------------------------------------------- + REVIEW FOCUS 6 — REDUNDANCY & DEAD RULES (MEDIUM IMPORTANCE) + ---------------------------------------------------------- + Identify rules that will never apply because a later rule overrides + them, or because the path does not exist. + + Examples to flag: + - Duplicate patterns that are shadowed by broader ones, that do not improve clarity + - Rules referencing directories or files not present in the repo + + EXCEPTION: + The following paths are intentional future-protection rules and MUST + NOT be flagged as dead even if the file does not currently exist: + - /CODEOWNERS + ---------------------------------------------------------- + EXPLICIT NON-GOALS (CRITICAL IMPORTANCE) + ---------------------------------------------------------- + Do NOT: + - Suggest stylistic formatting changes + - Suggest reordering rules UNLESS precedence is incorrect + - Suggest adding reviewers or changing org governance policy + + ---------------------------------------------------------- + FINAL OBJECTIVE + ---------------------------------------------------------- + Ensure the CODEOWNERS file: + - Uses valid GitHub team slugs + - Correctly matches repository paths + - Enforces intended review governance + - Has no precedence bugs that weaken protection - path: "src/hiero_sdk_python/tokens/**/*.py" instructions: *token_review_instructions # --- CUSTOM INSTRUCTIONS FOR EXAMPLES DIRECTORY --- @@ -686,10 +795,9 @@ reviews: - path: "src/hiero_sdk_python/query/**/*.py" instructions: *query_review_instructions - + - path: "src/hiero_sdk_python/contract/**/*_query.py" instructions: *query_review_instructions - chat: art: false # Don't draw ASCII art (false) diff --git a/.github/CODEOWNERS b/.github/CODEOWNERS new file mode 100644 index 000000000..01a10d7bd --- /dev/null +++ b/.github/CODEOWNERS @@ -0,0 +1,55 @@ +################################### +##### Global Protection Rule ###### +################################### +# Catch-all rule for all files not covered by more specific rules. +* @hiero-ledger/hiero-sdk-python-maintainers + + +############################ +##### Project Files ###### +############################ + +# Python source +/src/** @hiero-ledger/hiero-sdk-python-committers @hiero-ledger/hiero-sdk-python-maintainers + +# Tests +/tests/** @hiero-ledger/hiero-sdk-python-committers @hiero-ledger/hiero-sdk-python-maintainers + +# Examples & docs +/examples/** @hiero-ledger/hiero-sdk-python-committers @hiero-ledger/hiero-sdk-python-maintainers +/docs/** @hiero-ledger/hiero-sdk-python-committers @hiero-ledger/hiero-sdk-python-maintainers + +# Workflow +/CONTRIBUTING.md @hiero-ledger/hiero-sdk-python-committers @hiero-ledger/hiero-sdk-python-maintainers + +######################### +##### Core Files ###### +######################### +# Must be placed last to ensure enforcement over all other rules + +# GitHub configuration and workflows +/.github/** @hiero-ledger/hiero-sdk-python-maintainers @hiero-ledger/github-maintainers +/.github/ISSUE_TEMPLATE/** @hiero-ledger/hiero-sdk-python-maintainers @hiero-ledger/github-maintainers +/.github/scripts/** @hiero-ledger/hiero-sdk-python-maintainers @hiero-ledger/github-maintainers +/.github/workflows/** @hiero-ledger/hiero-sdk-python-maintainers @hiero-ledger/github-maintainers + +# Python packaging & build system +/pyproject.toml @hiero-ledger/hiero-sdk-python-maintainers @hiero-ledger/github-maintainers @hiero-ledger/tsc + +# Tooling +/mypy.ini @hiero-ledger/hiero-sdk-python-maintainers +/pytest.ini @hiero-ledger/hiero-sdk-python-maintainers +/codecov.yml @hiero-ledger/hiero-sdk-python-maintainers + +# Self-protection +/MAINTAINERS.md @hiero-ledger/hiero-sdk-python-maintainers +/.github/CODEOWNERS @hiero-ledger/hiero-sdk-python-maintainers @hiero-ledger/github-maintainers +/CODEOWNERS @hiero-ledger/github-maintainers + +# Documentation & legal +/README.md @hiero-ledger/hiero-sdk-python-maintainers +/LICENSE @hiero-ledger/hiero-sdk-python-maintainers @hiero-ledger/github-maintainers @hiero-ledger/tsc + +# Git ignore definitions +**/.gitignore @hiero-ledger/hiero-sdk-python-maintainers @hiero-ledger/github-maintainers +**/.gitignore.* @hiero-ledger/hiero-sdk-python-maintainers @hiero-ledger/github-maintainers diff --git a/.github/ISSUE_TEMPLATE/01_good_first_issue_candidate.yml b/.github/ISSUE_TEMPLATE/01_good_first_issue_candidate.yml index bd1e62b27..dfb1ccf5a 100644 --- a/.github/ISSUE_TEMPLATE/01_good_first_issue_candidate.yml +++ b/.github/ISSUE_TEMPLATE/01_good_first_issue_candidate.yml @@ -4,6 +4,16 @@ title: "[Good First Issue]:" labels: ["Good First Issue Candidate"] assignees: [] body: + - type: markdown + attributes: + value: | + --- + ## **Thanks for creating a good first issue candidate!** 😊 + + We truly appreciate your time and effort, welcome! + This template is designed to help you create a Good First Issue Candidate (GFI) : a small, well-scoped task that may be missing some documentation or there is uncertainty if it is a good fit for a GFI. + --- + - type: textarea id: intro-gfi-candidate attributes: @@ -13,10 +23,9 @@ body: > It is being evaluated for suitability and may require > clarification or refinement before it is ready to be picked up. > - > Please wait for maintainer confirmation before starting work. - > - > Maintainers and reviewers can read more about Good First Issues: - > docs/maintainers/good_first_issue_guidelines.md + > **Please wait for maintainer confirmation before asking to be assigned.** + > + > Maintainers and reviewers can read more about [Good First Issues](https://github.com/hiero-ledger/hiero-sdk-python/blob/main/docs/maintainers/good_first_issues_guidelines.md) and [Good First Issue Candidates](https://github.com/hiero-ledger/hiero-sdk-python/blob/main/docs/maintainers/good_first_issue_candidate_guidelines.md) validations: required: false @@ -24,66 +33,69 @@ body: - type: textarea id: intro attributes: - label: 🆕🐥 First Timers Only + label: 🆕🐥 Newcomer Friendly description: Who is this issue for? value: | - This issue is reserved for people who have never contributed or have made minimal contributions to [Hiero Python SDK](https://hiero.org). - We know that creating a pull request (PR) is a major barrier for new contributors. - The goal of this issue and all other issues in [**find a good first issue**](https://github.com/issues?q=is%3Aopen+is%3Aissue+org%3Ahiero-ledger+archived%3Afalse+label%3A%22good+first+issue%22+) is to help you make your first contribution to the Hiero Python SDK. - validations: - required: false + This **[Good First Issue](https://github.com/hiero-ledger/hiero-sdk-python/issues?q=is%3Aissue%20state%3Aopen%20label%3A%22Good%20First%20Issue%22%20no%3Aassignee)** is a guided, well-scoped task intended for new contributors to the Hiero Python SDK. + + #### What you’ll do + - ✅ understand how the repository is structured + - ✅ practice the standard contribution workflow + - ✅ submit and merge a pull request + + #### Support + A maintainer or mentor actively monitors this issue and will help **guide it to completion**. + + > [!IMPORTANT] + > **This issue does not require prior domain knowledge.** + > + > - No Hiero or Hedera experience needed + > - No distributed ledger background required + > - **Basic Python and Git are sufficient** + + > [!NOTE] + > ⏱️ **Typical time to complete:** 30–90 minutes (once setup is done) + > 🧩 **Difficulty:** Small, well-contained change + > 🎓 **Best for:** New contributors + + **🏁 Completion** + When this issue is complete, you will have: + + - ✅ Solved a real issue + - ✅ A merged pull request in the Hiero Python SDK + - ✅ Your name in the project history + - ✅ Confidence to take on larger issues next - type: markdown attributes: value: | > [!IMPORTANT] - > ### 📋 Good First Issue (GFI) Guidelines - > A Good First Issue generally has a narrow scope and clear instructions, with examples or solution provided. - > - > Examples of acceptable GFI work include: - > - > - **Very small, explicitly defined changes to `src` functionality** - > *(rare and mechanical only — not behavior-changing)*, such as: - > - Adding or fixing `__str__` or `__repr__` methods **when the exact output is specified** - > - Fully specified typing fixes (e.g. adding a known return type) - > - **Refactors of existing examples** that are purely structural: - > - Splitting an existing example into functions - > - Combining a split example into a single function - > *(when explicitly instructed)* - > - **Documentation improvements** that are instruction-driven: - > - Fixing known typos or grammar issues - > - Renaming variables when new names are provided - > - Making explicitly requested changes to docstrings, comments, or print statements - > - **Functional improvements to examples** - > *(only when the additional steps are explicitly described)* - > - **Small, clearly specified edits to existing tests** - > - Adding a known assertion to an existing test file - > - > **What we do NOT consider Good First Issues:** + > #### 📋 Good First Issue (GFI) Guidelines + > A **Good First Issue [Guidelines](https://github.com/hiero-ledger/hiero-sdk-python/blob/main/docs/maintainers/good_first_issue_guidelines.md)** is a small, well-scoped task that helps new contributors get familiar with the codebase and contribution workflow. > - > - Writing new documentation, examples, or tests - > - Any change that requires deciding *what* to do or *how* something should behave - > - Changes to SDK behavior, public APIs, or contracts - > - Changes to DLT or protocol logic, including `to_proto` / `from_proto` - > - Work spanning multiple files, modules, or subsystems - > - Tasks requiring domain, protocol, or architectural knowledge + > **Often a good fit for Good First Issues:** + > - Spelling or grammar fixes + > - Small documentation formatting improvements + > - Purely structural refactors of examples + > - Minor, clearly described edits to docstrings or comments + > - Light file moves or renames for clarity + > - Applying formatting tools (e.g. `black`, `ruff`) in limited areas > + > More involved tasks may be a better fit for **Beginner**, **Intermediate**, or **Advanced** issues. > - > 📖 *For a more detailed explanation, refer to: - > [`docs/maintainers/good_first_issue_candidate_guidelines.md`](docs/maintainers/good_first_issue_candidate_guidelines.md).* + > If you’re unsure, the **Good First Issue Candidate Template** can be used. - type: textarea id: issue attributes: - label: 👾 Description of the issue + label: 👾 Issue description description: | - DESCRIBE THE ISSUE IN A WAY THAT IS UNDERSTANDABLE TO NEW CONTRIBUTORS. - YOU MUST NOT ASSUME THAT SUCH CONTRIBUTORS HAVE ANY KNOWLEDGE ABOUT THE LANGUAGE, CODEBASE OR HIERO. - IT IS HELPFUL TO ADD LINKS TO THE RELEVANT DOCUMENTATION AND/OR CODE SECTIONS. - BELOW IS AN EXAMPLE. + Describe the issue in a way that’s easy for new contributors to understand. + Briefly explain why this change is useful or needed, even if the impact seems small. + Please avoid assuming prior knowledge of the language, codebase, or Hiero, as Good First Issues are often surfaced to new developers. + Links to relevant documentation or code are very welcome. value: | Edit here. Example provided below. - validations: required: true @@ -162,13 +174,10 @@ body: attributes: label: 💡 Proposed Solution description: | - AT THIS SECTION YOU NEED TO DESCRIBE THE STEPS NEEDED TO SOLVE THE ISSUE. - PLEASE BREAK DOWN THE STEPS AS MUCH AS POSSIBLE AND MAKE SURE THAT THEY - ARE EASY TO FOLLOW. IF POSSIBLE, ADD LINKS TO THE RELEVANT - DOCUMENTATION AND/OR CODE SECTIONS. + Describe what a good solution would look like. + Keep this high-level and easy to understand. Implementation details can go in the subsequent Implementation Steps section. value: | Edit here. Example provided below. - validations: required: true @@ -188,14 +197,19 @@ body: - type: textarea id: implementation attributes: - label: 👩‍💻 Implementation Steps + label: 🛠️ Implementation Steps description: | - AT THIS SECTION YOU NEED TO DESCRIBE THE TECHNICAL STEPS NEEDED TO SOLVE THE ISSUE. - PLEASE BREAK DOWN THE STEPS AS MUCH AS POSSIBLE AND MAKE SURE THAT THEY ARE EASY TO FOLLOW. - IF POSSIBLE, ADD LINKS TO THE RELEVANT DOCUMENTATION AND/OR CODE. + To make this issue easy to pick up and complete, please include: + - Which files need to be changed or added + - Any functions, classes, or modules involved + - The complete steps to implement the solution + - What the final result or output should look like + - Links to relevant documentation or code (if helpful) + For good first issues, please keep this as guided and clear as possible. value: | Edit here. Example provided below. + --- validations: required: true @@ -203,7 +217,7 @@ body: attributes: value: | - ### 👩‍💻 Implementation - Example + #### 👩‍💻 Implementation - Example To break down the monolithic main function, you need to: - [ ] Extract the Key Steps (set up a client, create a test account, create a token, associate the token) @@ -236,73 +250,194 @@ body: ``` - type: textarea - id: acceptance-criteria + id: setup_steps attributes: - label: ✅ Acceptance Criteria - description: | - EDIT OR EXPAND THE CHECKLIST ON WHAT IS REQUIRED TO BE ABLE TO MERGE A PULL REQUEST FOR THIS ISSUE + label: 📋 Step-by-Step Setup Guide + description: Provide a step-by-step setup guide for new contributors value: | - To be able to merge a pull request for this issue, we need: - - - [ ] **Assignment:** get assigned by commenting `/assign` [see guide](https://github.com/hiero-ledger/hiero-sdk-python/blob/main/docs/sdk_developers/training/workflow/04_assigning_issues.md) - - [ ] **Changelog Entry:** Correct changelog entry [see guide](https://github.com/hiero-ledger/hiero-sdk-python/blob/main/docs/sdk_developers/changelog_entry.md) - - [ ] **Signed commits:** commits must be DCO and GPG key signed as `git commit -S -s -m "chore: my change"` with a GPG key set up [see guide](https://github.com/hiero-ledger/hiero-sdk-python/blob/main/docs/sdk_developers/signing.md) - - [ ] **All Tests Pass:** our workflow checks like unit and integration tests must pass - - [ ] **Issue is Solved:** The implementation fully addresses the issue requirements as described above - - [ ] **No Further Changes are Made:** Code review feedback has been addressed and no further changes are requested + #### Suggestions: + - [ ] Visual Studio (VS) Code: [Guide](https://github.com/hiero-ledger/hiero-sdk-python/blob/main/docs/sdk_developers/training/workflow/01_supporting_infrastructure.md) + + - [ ] GitHub Desktop: [Guide](https://github.com/hiero-ledger/hiero-sdk-python/blob/main/docs/sdk_developers/training/workflow/01_supporting_infrastructure.md) + + - [ ] Hedera Testnet Account with root .env file: [Guide](https://github.com/hiero-ledger/hiero-sdk-python/blob/main/docs/sdk_developers/training/setup/03_setting_up_env.md) + + - [ ] Create a GPG key linked to GitHub: [Guide](https://github.com/hiero-ledger/hiero-sdk-python/blob/main/docs/sdk_developers/signing.md) + + #### Setup the Hiero Python SDK for development + - [ ] **Fork** Create an online and local copy of the repository: [Guide](https://github.com/hiero-ledger/hiero-sdk-python/blob/main/docs/sdk_developers/training/workflow/02_forking_python_sdk.md) + + - [ ] **Connect** origin with upstream: [Guide](https://github.com/hiero-ledger/hiero-sdk-python/blob/main/docs/sdk_developers/training/workflow/03_staying_in_sync.md) + + - [ ] **Install Packages** and protobufs: [Guide](https://github.com/hiero-ledger/hiero-sdk-python/blob/main/docs/sdk_developers/training/setup/02_installing_hiero_python_sdk.md) (or [Windows Setup Guide](https://github.com/hiero-ledger/hiero-sdk-python/blob/main/docs/sdk_developers/training/setup/setup_windows.md) for Windows users) + + - [ ] **Sync Main** pull any recent upstream changes: [Guide](https://github.com/hiero-ledger/hiero-sdk-python/blob/main/docs/sdk_developers/rebasing.md) + + You are set up! 🎉 validations: required: true - type: textarea id: contribution_steps attributes: - label: 📋 Step-by-Step Contribution Guide - description: Provide a contribution workflow suitable for new contributors + label: 📋 Step-by-step contribution guide + description: Provide a contribution workflow suitable for new contributors. value: | - If you have never contributed to an open source project at GitHub, the following step-by-step guide will introduce you to the workflow. + #### ✅ Get ready + - [ ] **Claim the issue:** comment `/assign`: [Guide](https://github.com/hiero-ledger/hiero-sdk-python/blob/main/docs/sdk_developers/training/workflow/04_assigning_issues.md) + + - [ ] **Double check the Issue and AI plan:** carefully re-read the issue description and the CodeRabbit AI plan + + - [ ] **Ask questions early:** ask on [Discord](https://github.com/hiero-ledger/hiero-sdk-python/blob/main/docs/discord.md), your `@mentor` (Python SDK help) and the `@good_first_issue_support_team` (setup and workflow help) + + - [ ] **Sync with main:** pull the latest upstream changes [Guide](https://github.com/hiero-ledger/hiero-sdk-python/blob/main/docs/sdk_developers/rebasing.md) + + - [ ] 💡 Tip: Before coding, leave a short comment describing what you plan to change. We’ll confirm you’re on the right track. + + #### 🛠️ Solve the Issue + - [ ] **Create a branch from `main`:** [Guide](https://github.com/hiero-ledger/hiero-sdk-python/blob/main/docs/sdk_developers/training/workflow/05_working_branches.md) + + - [ ] **Implement the solution**: follow the implementation steps in the issue description. + + - [ ] **Commit with DCO and GPG signing:** commit changes using: `git commit -S -s -m "chore: your message"`, [Guide](https://github.com/hiero-ledger/hiero-sdk-python/blob/main/docs/sdk_developers/signing.md) + + - [ ] **Add a `.CHANGELOG.md` entry:** under the appropriate **[UNRELEASED]** section and commit as `git commit -S -s -m "chore: changelog entry"` [Guide](https://github.com/hiero-ledger/hiero-sdk-python/blob/main/docs/sdk_developers/changelog_entry.md) + + #### 🚀 Create the pull request + - [ ] **Push your commits:** push your branch to your fork `git push origin your-branch-name` + + - [ ] **Open a pull request:** [here](https://github.com/hiero-ledger/hiero-sdk-python/pulls) [guide](https://github.com/hiero-ledger/hiero-sdk-python/blob/main/docs/sdk_developers/training/workflow/11_submit_pull_request.md) - - [ ] **Assignment:** get assigned by commenting `/assign` [see guide](https://github.com/hiero-ledger/hiero-sdk-python/blob/main/docs/sdk_developers/training/workflow/04_assigning_issues.md) - - [ ] **Fork, Branch and Work on the issue:** Create a copy of the repository, create a branch for the issue and solve the problem. For instructions, please read our [Contributing guide](https://github.com/hiero-ledger/hiero-sdk-python/blob/main/CONTRIBUTING.md) file. Further help can be found at [Set-up Training](https://github.com/hiero-ledger/hiero-sdk-python/tree/main/docs/sdk_developers/training/setup) and [Workflow Training](https://github.com/hiero-ledger/hiero-sdk-python/tree/main/docs/sdk_developers/training/workflow). - - [ ] **DCO and GPG key sign each commit :** each commit must be -s and -S signed. An explanation on how to do this is at [Signing Guide](https://github.com/hiero-ledger/hiero-sdk-python/blob/main/docs/sdk_developers/signing.md) - - [ ] **Add a Changelog Entry :** your pull request will require a changelog. Read [Changelog Entry Guide](https://github.com/hiero-ledger/hiero-sdk-python/blob/main/docs/sdk_developers/changelog_entry.md) to learn how. - - [ ] **Push and Create a Pull Request :** Once your issue is resolved, and your commits are signed, and you have a changelog entry, push your changes and create a pull request. Detailed instructions can be found at [Submit PR Training](https://github.com/hiero-ledger/hiero-sdk-python/blob/main/docs/sdk_developers/training/workflow/11_submit_pull_request.md), part of [Workflow Training](https://github.com/hiero-ledger/hiero-sdk-python/tree/main/docs/sdk_developers/training/workflow). - - [ ] **You did it 🎉:** A maintainer or committer will review your pull request and provide feedback. If approved, we will merge the fix in the main branch. Thanks for being part of the Hiero community as an open-source contributor ❤️ + - [ ] **Complete the PR description:** briefly describe your changes, [Guide](https://github.com/hiero-ledger/hiero-sdk-python/blob/main/docs/sdk_developers/training/workflow/11_submit_pull_request.md) + + - [ ] **Link the Issue:** link the issue the PR solves in the PR description, [Guide](https://github.com/hiero-ledger/hiero-sdk-python/blob/main/docs/sdk_developers/how_to_link_issues.md) + + - [ ] **Submit the pull request:** click `**Create pull request**` 🎉 - ***IMPORTANT*** You will ONLY be assigned to the issue if you comment: `/assign` - ***IMPORTANT*** Your pull request CANNOT BE MERGED until you add a changelog entry AND sign your commits each with `git commit -S -s -m "chore: your commit message"` with a GPG key setup. validations: required: true - type: textarea - id: ai_usage_guidelines + id: acceptance-criteria attributes: - label: 🤖 AI Usage Guidelines - description: Guidance on using AI tools responsibly for this issue + label: ✅ Acceptance criteria + description: | + Edit or expand this checklist with what is required to merge a pull request for this issue. value: | - You are welcome to use AI to help you understand and solve this issue. + To be able to close this issue, the following criteria must be met: - Because AI tools can sometimes make mistakes, please take care to: + - [ ] **The issue is solved:** I’ve carefully read and implemented the issue requirements - - Only implement what is described in this issue - - Avoid changing anything else in the file - - Be careful when modifying parameters or return statements, as this may affect runtime behavior + - [ ] **I did not add extra changes:** I did not modify anything beyond what is described in the issue - If you're unsure, ask your mentor or the maintainers for help — they can provide expert Python SDK guidance and point you to the right examples or methods. + - [ ] **Behavior:** All other existing features continue to work as before + + - [ ] **Checks and feedback:** All checks pass and any requested changes have been made validations: - required: false + required: true - type: textarea - id: information + id: getting_help attributes: - label: 🤔 Additional Information - description: Provide any extra resources or context for contributors to solve this good first issue + label: 🧭 Getting help if you’re stuck + description: How to get support while working on this issue. + value: | + If questions come up, don’t spend more than **20 minutes** blocked. + + > [!TIP] + > + > - Comment on this issue and tag `@good_first_issue_support_team` or `@mentor_name` + > - Ask for help in [Discord](https://github.com/hiero-ledger/hiero-sdk-python/blob/main/docs/discord.md) + > + + --- + + - type: textarea + id: pr_expectations + attributes: + label: 🤔 What to expect after submitting a PR + description: Explain what happens after a pull request is opened. + value: | + Once you open a pull request, here’s what happens next. + + **🤖 1. Automated checks** + A small set of automated checks must pass before merging (signing, changelog, tests, examples, code quality). + Open any failed check to see details. + + --- + + **🤝 2. AI feedback (CodeRabbit)** + CodeRabbit AI may suggest improvements or flag issues. + Feedback is advisory — use what’s relevant and helpful. + + --- + + **😎 3. Team review** + A Python SDK team member reviews your PR within **1–3 days**. + You may be asked to make changes or your PR may be approved. + Approved PRs are usually merged within **one day**. + + + **🔄 Merge conflicts (sometimes)** + Conflicts can happen and are normal as the SDK updates. + Changelog conflicts can be resolved online in the PR in the merge editor, accepting both entries + Others may require **[rebasing](https://github.com/hiero-ledger/hiero-sdk-python/blob/main/docs/sdk_developers/rebasing.md)**. + + --- + + validations: + required: true + + - type: textarea + id: ai_usage_guidelines + attributes: + label: 🤖 AI usage guidelines + description: Guidance on using AI tools responsibly for this issue. value: | - For more help, we have extensive documentation attributes: - - [SDK Developer Docs](https://github.com/hiero-ledger/hiero-sdk-python/tree/main/docs/sdk_developers) - - [SDK Developer Training](https://github.com/hiero-ledger/hiero-sdk-python/tree/main/docs/sdk_developers/training) + You’re welcome to use AI tools while working on this issue. + + Many contributors do — especially for: + - understanding unfamiliar code + - drafting small refactors + - sanity-checking approaches - Additionally, we invite you to join our community on our [Discord](https://github.com/hiero-ledger/hiero-sdk-python/blob/main/docs/discord.md) server. + **Use AI responsibly:** + - review suggestions carefully + - apply changes incrementally + - test as you go - We also invite you to attend each Wednesday, 2pm UTC our [Python SDK Office Hour and Community Calls](https://zoom-lfx.platform.linuxfoundation.org/meetings/hiero?view=week). The Python SDK Office hour is for hands-on-help and the Community Call for general community discussion. + If in doubt, ask — maintainers are happy to help. - You can also ask for help in a comment below! + - type: textarea + id: information + attributes: + label: 🤔 Additional Help + description: Provide any extra resources or context for contributors to solve this good first issue + value: | + #### First Points of Contact: + - [Discord](https://github.com/hiero-ledger/hiero-sdk-python/blob/main/docs/discord.md) + - Comment with `@mentor_name` (for Python SDK questions) + - Comment with `@hiero-ledger/hiero-sdk-good-first-issue-support` (for setup and workflow questions) + The more you ask, the more you learn and so do we! + + #### Documentation: + - [README.md](https://github.com/hiero-ledger/hiero-sdk-python/blob/main/README.md) + - [CONTRIBUTING.md](https://github.com/hiero-ledger/hiero-sdk-python/blob/main/CONTRIBUTING.md) + - [Project Structure](https://github.com/hiero-ledger/hiero-sdk-python/blob/main/docs/sdk_developers/training/setup/project_structure.md) + - [DCO and Verified Signing Guide](https://github.com/hiero-ledger/hiero-sdk-python/blob/main/docs/sdk_developers/signing.md) + - [Changelog Guide](https://github.com/hiero-ledger/hiero-sdk-python/blob/main/docs/sdk_developers/changelog_entry.md) + - [Rebasing Guide](https://github.com/hiero-ledger/hiero-sdk-python/blob/main/docs/sdk_developers/rebasing.md) + - [Merge Conflicts Guide](https://github.com/hiero-ledger/hiero-sdk-python/blob/main/docs/sdk_developers/merge_conflicts.md) + - [Linking Issues Guide](https://github.com/hiero-ledger/hiero-sdk-python/blob/main/docs/sdk_developers/how_to_link_issues.md) + - [Workflow Guide](https://github.com/hiero-ledger/hiero-sdk-python/blob/main/docs/sdk_developers/workflow.md) + + - [Pin Github Actions Guide](https://github.com/hiero-ledger/hiero-sdk-python/blob/main/docs/sdk_developers/how-to-pin-github-actions.md) + - [Running Examples](https://github.com/hiero-ledger/hiero-sdk-python/blob/main/docs/sdk_developers/examples.md) + - [Testing on Forks](https://github.com/hiero-ledger/hiero-sdk-python/blob/main/docs/sdk_developers/training/testing_forks.md) + + - [General Training](https://github.com/hiero-ledger/hiero-sdk-python/blob/main/docs/sdk_developers/training) + - [General SDK Developer Docs](https://github.com/hiero-ledger/hiero-sdk-python/blob/main/docs/sdk_developers) + + #### Calls: + - Get hands-on-help by our expert team at our [Office Hours](https://zoom-lfx.platform.linuxfoundation.org/meeting/99912667426?password=5b584a0e-1ed7-49d3-b2fc-dc5ddc888338) + - Learn, raise issues and provide feedback at [Community Calls](https://zoom-lfx.platform.linuxfoundation.org/meeting/92041330205?password=2f345bee-0c14-4dd5-9883-06fbc9c60581) diff --git a/.github/ISSUE_TEMPLATE/02_good_first_issue.yml b/.github/ISSUE_TEMPLATE/02_good_first_issue.yml new file mode 100644 index 000000000..cd73807ec --- /dev/null +++ b/.github/ISSUE_TEMPLATE/02_good_first_issue.yml @@ -0,0 +1,427 @@ +name: Good First Issue Template +description: Create a Good First Issue for new contributors +title: "[Good First Issue]: " +labels: ["Good First Issue"] +assignees: [] +body: + - type: markdown + attributes: + value: | + --- + ## **Thanks for creating a good first issue!** 😊 + + We truly appreciate your time and effort, welcome! + This template is designed to help you create a Good First Issue (GFI) : a small, well-scoped task that helps new contributors learn the codebase and workflow. + --- + + - type: textarea + id: intro + attributes: + label: 🆕🐥 Newcomer Friendly + description: Who is this issue for? + value: | + This **[Good First Issue](https://github.com/hiero-ledger/hiero-sdk-python/issues?q=is%3Aissue%20state%3Aopen%20label%3A%22Good%20First%20Issue%22%20no%3Aassignee)** is a guided, well-scoped task intended for new contributors to the Hiero Python SDK. + + #### What you’ll do + - ✅ understand how the repository is structured + - ✅ practice the standard contribution workflow + - ✅ submit and merge a pull request + + #### Support + A maintainer or mentor actively monitors this issue and will help **guide it to completion**. + + > [!IMPORTANT] + > **This issue does not require prior domain knowledge.** + > + > - No Hiero or Hedera experience needed + > - No distributed ledger background required + > - **Basic Python and Git are sufficient** + + > [!NOTE] + > ⏱️ **Typical time to complete:** 30–90 minutes (once setup is done) + > 🧩 **Difficulty:** Small, well-contained change + > 🎓 **Best for:** New contributors + + **🏁 Completion** + When this issue is complete, you will have: + + - ✅ Solved a real issue + - ✅ A merged pull request in the Hiero Python SDK + - ✅ Your name in the project history + - ✅ Confidence to take on larger issues next + + - type: markdown + attributes: + value: | + > [!IMPORTANT] + > #### 📋 Good First Issue (GFI) Guidelines + > A **Good First Issue [Guidelines](https://github.com/hiero-ledger/hiero-sdk-python/blob/main/docs/maintainers/good_first_issue_guidelines.md)** is a small, well-scoped task that helps new contributors get familiar with the codebase and contribution workflow. + > + > **Often a good fit for Good First Issues:** + > - Spelling or grammar fixes + > - Small documentation formatting improvements + > - Purely structural refactors of examples + > - Minor, clearly described edits to docstrings or comments + > - Light file moves or renames for clarity + > - Applying formatting tools (e.g. `black`, `ruff`) in limited areas + > + > More involved tasks may be a better fit for **Beginner**, **Intermediate**, or **Advanced** issues. + > + > If you’re unsure, the **Good First Issue Candidate Template** can be used. + + - type: textarea + id: issue + attributes: + label: 👾 Issue description + description: | + Describe the issue in a way that’s easy for new contributors to understand. + Briefly explain why this change is useful or needed, even if the impact seems small. + Please avoid assuming prior knowledge of the language, codebase, or Hiero, as Good First Issues are often surfaced to new developers. + Links to relevant documentation or code are very welcome. + value: | + Edit here. Example provided below. + validations: + required: true + + - type: markdown + attributes: + value: | + + ## 👾 Description of the issue - Example + + The example for Token Associate Transaction located at examples/tokens/token_associate_transaction.py can be improved. It correctly illustrates how to associate a token, however, it does so all from one function main() + + As everything is grouped together in main(), it is difficult for a user to understand all the individual steps required to associate a token. + + For example: + ```python + + def run_demo(): + """Monolithic token association demo.""" + print(f"🚀 Connecting to Hedera {network_name} network!") + client = Client(Network(network_name)) + operator_id = AccountId.from_string(os.getenv("OPERATOR_ID", "")) + operator_key = PrivateKey.from_string(os.getenv("OPERATOR_KEY", "")) + client.set_operator(operator_id, operator_key) + print(f"✅ Client ready (operator {operator_id})") + + test_key = PrivateKey.generate_ed25519() + receipt = ( + AccountCreateTransaction() + .set_key(test_key.public_key()) + .set_initial_balance(Hbar(1)) + .set_account_memo("Test account for token association demo") + .freeze_with(client) + .sign(operator_key) + .execute(client) + ) + if receipt.status != ResponseCode.SUCCESS: + raise Exception(receipt.status) + account_id = receipt.account_id + print(f"✅ Created test account {account_id}") + + # Create tokens + tokens = [] + for i in range(3): + try: + receipt = ( + TokenCreateTransaction() + .set_token_name(f"DemoToken{i}") + .set_token_symbol(f"DTK{i}") + .set_decimals(2) + .set_initial_supply(100_000) + .set_treasury_account_id(operator_id) + .freeze_with(client) + .sign(operator_key) + .execute(client) + ) + if receipt.status != ResponseCode.SUCCESS: + raise Exception(receipt.status) + token_id = receipt.token_id + tokens.append(token_id) + print(f"✅ Created token {token_id}") + except Exception as e: + print(f"❌ Token creation failed: {e}") + sys.exit(1) + + # Associate first token + try: + TokenAssociateTransaction().set_account_id(account_id).add_token_id(tokens[0]).freeze_with(client).sign(test_key).execute(client) + print(f"✅ Token {tokens[0]} associated with account {account_id}") + except Exception as e: + print(f"❌ Token association failed: {e}") + sys.exit(1) + ``` + + - type: textarea + id: solution + attributes: + label: 💡 Proposed Solution + description: | + Describe what a good solution would look like. + Keep this high-level and easy to understand. Implementation details can go in the subsequent Implementation Steps section. + value: | + Edit here. Example provided below. + validations: + required: true + + - type: markdown + attributes: + value: | + + ## 💡 Solution - Example + + For the TokenAssociateTransaction example, the solution is to split the monolithic main() function for illustrating TokenAssociateTransaction into separate smaller functions which are called from main(). + Such as: + - Setting up the client + - Creating an account + - Creating a token + - Associating the account to the token + + - type: textarea + id: implementation + attributes: + label: 🛠️ Implementation Steps + description: | + To make this issue easy to pick up and complete, please include: + - Which files need to be changed or added + - Any functions, classes, or modules involved + - The complete steps to implement the solution + - What the final result or output should look like + - Links to relevant documentation or code (if helpful) + For good first issues, please keep this as guided and clear as possible. + value: | + Edit here. Example provided below. + + --- + validations: + required: true + + - type: markdown + attributes: + value: | + + #### 👩‍💻 Implementation - Example + + To break down the monolithic main function, you need to: + - [ ] Extract the Key Steps (set up a client, create a test account, create a token, associate the token) + - [ ] Copy and paste the functionality for each key step into its own function + - [ ] Pass to each function the variables you need to run it + - [ ] Call each function in main() + - [ ] Ensure you return the values you'll need to pass on to the next step in main + - [ ] Ensure the example still runs and has the same output! + + For example: + ```python + + def setup_client(): + """Initialize and set up the client with operator account.""" + + def create_test_account(client, operator_key): + """Create a new test account for demonstration.""" + + def create_fungible_token(client, operator_id, operator_key): + """Create a fungible token for association with test account.""" + + def associate_token_with_account(client, token_id, account_id, account_key): + """Associate the token with the test account.""" + + def main(): + client, operator_id, operator_key = setup_client() + account_id, account_private_key = create_test_account(client, operator_key) + token_id = create_fungible_token(client, operator_id, operator_key) + associate_token_with_account(client, token_id, account_id, account_private_key) + ``` + + - type: textarea + id: setup_steps + attributes: + label: 📋 Step-by-Step Setup Guide + description: Provide a step-by-step setup guide for new contributors + value: | + #### Suggestions: + - [ ] Visual Studio (VS) Code: [Guide](https://github.com/hiero-ledger/hiero-sdk-python/blob/main/docs/sdk_developers/training/workflow/01_supporting_infrastructure.md) + + - [ ] GitHub Desktop: [Guide](https://github.com/hiero-ledger/hiero-sdk-python/blob/main/docs/sdk_developers/training/workflow/01_supporting_infrastructure.md) + + - [ ] Hedera Testnet Account with root .env file: [Guide](https://github.com/hiero-ledger/hiero-sdk-python/blob/main/docs/sdk_developers/training/setup/03_setting_up_env.md) + + - [ ] Create a GPG key linked to GitHub: [Guide](https://github.com/hiero-ledger/hiero-sdk-python/blob/main/docs/sdk_developers/signing.md) + + #### Setup the Hiero Python SDK for development + - [ ] **Fork** Create an online and local copy of the repository: [Guide](https://github.com/hiero-ledger/hiero-sdk-python/blob/main/docs/sdk_developers/training/workflow/02_forking_python_sdk.md) + + - [ ] **Connect** origin with upstream: [Guide](https://github.com/hiero-ledger/hiero-sdk-python/blob/main/docs/sdk_developers/training/workflow/03_staying_in_sync.md) + + - [ ] **Install Packages** and protobufs: [Guide](https://github.com/hiero-ledger/hiero-sdk-python/blob/main/docs/sdk_developers/training/setup/02_installing_hiero_python_sdk.md) (or [Windows Setup Guide](https://github.com/hiero-ledger/hiero-sdk-python/blob/main/docs/sdk_developers/training/setup/setup_windows.md) for Windows users) + + - [ ] **Sync Main** pull any recent upstream changes: [Guide](https://github.com/hiero-ledger/hiero-sdk-python/blob/main/docs/sdk_developers/rebasing.md) + + You are set up! 🎉 + validations: + required: true + + - type: textarea + id: contribution_steps + attributes: + label: 📋 Step-by-step contribution guide + description: Provide a contribution workflow suitable for new contributors. + value: | + #### ✅ Get ready + - [ ] **Claim the issue:** comment `/assign`: [Guide](https://github.com/hiero-ledger/hiero-sdk-python/blob/main/docs/sdk_developers/training/workflow/04_assigning_issues.md) + + - [ ] **Double check the Issue and AI plan:** carefully re-read the issue description and the CodeRabbit AI plan + + - [ ] **Ask questions early:** ask on [Discord](https://github.com/hiero-ledger/hiero-sdk-python/blob/main/docs/discord.md), your `@mentor` (Python SDK help) and the `@good_first_issue_support_team` (setup and workflow help) + + - [ ] **Sync with main:** pull the latest upstream changes [Guide](https://github.com/hiero-ledger/hiero-sdk-python/blob/main/docs/sdk_developers/rebasing.md) + + - [ ] 💡 Tip: Before coding, leave a short comment describing what you plan to change. We’ll confirm you’re on the right track. + + #### 🛠️ Solve the Issue + - [ ] **Create a branch from `main`:** [Guide](https://github.com/hiero-ledger/hiero-sdk-python/blob/main/docs/sdk_developers/training/workflow/05_working_branches.md) + + - [ ] **Implement the solution**: follow the implementation steps in the issue description. + + - [ ] **Commit with DCO and GPG signing:** commit changes using: `git commit -S -s -m "chore: your message"`, [Guide](https://github.com/hiero-ledger/hiero-sdk-python/blob/main/docs/sdk_developers/signing.md) + + - [ ] **Add a `.CHANGELOG.md` entry:** under the appropriate **[UNRELEASED]** section and commit as `git commit -S -s -m "chore: changelog entry"` [Guide](https://github.com/hiero-ledger/hiero-sdk-python/blob/main/docs/sdk_developers/changelog_entry.md) + + #### 🚀 Create the pull request + - [ ] **Push your commits:** push your branch to your fork `git push origin your-branch-name` + + - [ ] **Open a pull request:** [here](https://github.com/hiero-ledger/hiero-sdk-python/pulls) [guide](https://github.com/hiero-ledger/hiero-sdk-python/blob/main/docs/sdk_developers/training/workflow/11_submit_pull_request.md) + + - [ ] **Complete the PR description:** briefly describe your changes, [Guide](https://github.com/hiero-ledger/hiero-sdk-python/blob/main/docs/sdk_developers/training/workflow/11_submit_pull_request.md) + + - [ ] **Link the Issue:** link the issue the PR solves in the PR description, [Guide](https://github.com/hiero-ledger/hiero-sdk-python/blob/main/docs/sdk_developers/how_to_link_issues.md) + + - [ ] **Submit the pull request:** click `**Create pull request**` 🎉 + + validations: + required: true + + - type: textarea + id: acceptance-criteria + attributes: + label: ✅ Acceptance criteria + description: | + Edit or expand this checklist with what is required to merge a pull request for this issue. + value: | + To be able to close this issue, the following criteria must be met: + + - [ ] **The issue is solved:** I’ve carefully read and implemented the issue requirements + + - [ ] **I did not add extra changes:** I did not modify anything beyond what is described in the issue + + - [ ] **Behavior:** All other existing features continue to work as before + + - [ ] **Checks and feedback:** All checks pass and any requested changes have been made + validations: + required: true + + - type: textarea + id: getting_help + attributes: + label: 🧭 Getting help if you’re stuck + description: How to get support while working on this issue. + value: | + If questions come up, don’t spend more than **20 minutes** blocked. + + > [!TIP] + > + > - Comment on this issue and tag `@good_first_issue_support_team` or `@mentor_name` + > - Ask for help in [Discord](https://github.com/hiero-ledger/hiero-sdk-python/blob/main/docs/discord.md) + > + + --- + + - type: textarea + id: pr_expectations + attributes: + label: 🤔 What to expect after submitting a PR + description: Explain what happens after a pull request is opened. + value: | + Once you open a pull request, here’s what happens next. + + **🤖 1. Automated checks** + A small set of automated checks must pass before merging (signing, changelog, tests, examples, code quality). + Open any failed check to see details. + + --- + + **🤝 2. AI feedback (CodeRabbit)** + CodeRabbit AI may suggest improvements or flag issues. + Feedback is advisory — use what’s relevant and helpful. + + --- + + **😎 3. Team review** + A Python SDK team member reviews your PR within **1–3 days**. + You may be asked to make changes or your PR may be approved. + Approved PRs are usually merged within **one day**. + + + **🔄 Merge conflicts (sometimes)** + Conflicts can happen and are normal as the SDK updates. + Changelog conflicts can be resolved online in the PR in the merge editor, accepting both entries + Others may require **[rebasing](https://github.com/hiero-ledger/hiero-sdk-python/blob/main/docs/sdk_developers/rebasing.md)**. + + --- + + validations: + required: true + + - type: textarea + id: ai_usage_guidelines + attributes: + label: 🤖 AI usage guidelines + description: Guidance on using AI tools responsibly for this issue. + value: | + You’re welcome to use AI tools while working on this issue. + + Many contributors do — especially for: + - understanding unfamiliar code + - drafting small refactors + - sanity-checking approaches + + **Use AI responsibly:** + - review suggestions carefully + - apply changes incrementally + - test as you go + + If in doubt, ask — maintainers are happy to help. + + - type: textarea + id: information + attributes: + label: 🤔 Additional Help + description: Provide any extra resources or context for contributors to solve this good first issue + value: | + #### First Points of Contact: + - [Discord](https://github.com/hiero-ledger/hiero-sdk-python/blob/main/docs/discord.md) + - Comment with `@mentor_name` (for Python SDK questions) + - Comment with `@hiero-ledger/hiero-sdk-good-first-issue-support` (for setup and workflow questions) + The more you ask, the more you learn and so do we! + + #### Documentation: + - [README.md](https://github.com/hiero-ledger/hiero-sdk-python/blob/main/README.md) + - [CONTRIBUTING.md](https://github.com/hiero-ledger/hiero-sdk-python/blob/main/CONTRIBUTING.md) + - [Project Structure](https://github.com/hiero-ledger/hiero-sdk-python/blob/main/docs/sdk_developers/training/setup/project_structure.md) + - [DCO and Verified Signing Guide](https://github.com/hiero-ledger/hiero-sdk-python/blob/main/docs/sdk_developers/signing.md) + - [Changelog Guide](https://github.com/hiero-ledger/hiero-sdk-python/blob/main/docs/sdk_developers/changelog_entry.md) + - [Rebasing Guide](https://github.com/hiero-ledger/hiero-sdk-python/blob/main/docs/sdk_developers/rebasing.md) + - [Merge Conflicts Guide](https://github.com/hiero-ledger/hiero-sdk-python/blob/main/docs/sdk_developers/merge_conflicts.md) + - [Linking Issues Guide](https://github.com/hiero-ledger/hiero-sdk-python/blob/main/docs/sdk_developers/how_to_link_issues.md) + - [Workflow Guide](https://github.com/hiero-ledger/hiero-sdk-python/blob/main/docs/sdk_developers/workflow.md) + + - [Pin Github Actions Guide](https://github.com/hiero-ledger/hiero-sdk-python/blob/main/docs/sdk_developers/how-to-pin-github-actions.md) + - [Running Examples](https://github.com/hiero-ledger/hiero-sdk-python/blob/main/docs/sdk_developers/examples.md) + - [Testing on Forks](https://github.com/hiero-ledger/hiero-sdk-python/blob/main/docs/sdk_developers/training/testing_forks.md) + + - [General Training](https://github.com/hiero-ledger/hiero-sdk-python/blob/main/docs/sdk_developers/training) + - [General SDK Developer Docs](https://github.com/hiero-ledger/hiero-sdk-python/blob/main/docs/sdk_developers) + + #### Calls: + - Get hands-on-help by our expert team at our [Office Hours](https://zoom-lfx.platform.linuxfoundation.org/meeting/99912667426?password=5b584a0e-1ed7-49d3-b2fc-dc5ddc888338) + - Learn, raise issues and provide feedback at [Community Calls](https://zoom-lfx.platform.linuxfoundation.org/meeting/92041330205?password=2f345bee-0c14-4dd5-9883-06fbc9c60581) diff --git a/.github/ISSUE_TEMPLATE/05_beginner_issue.yml b/.github/ISSUE_TEMPLATE/03_beginner_issue.yml similarity index 90% rename from .github/ISSUE_TEMPLATE/05_beginner_issue.yml rename to .github/ISSUE_TEMPLATE/03_beginner_issue.yml index 373a67770..49ef1f5ce 100644 --- a/.github/ISSUE_TEMPLATE/05_beginner_issue.yml +++ b/.github/ISSUE_TEMPLATE/03_beginner_issue.yml @@ -155,13 +155,17 @@ body: description: | EDIT OR EXPAND THE CHECKLIST ON WHAT IS REQUIRED TO BE ABLE TO MERGE A PULL REQUEST FOR THIS ISSUE value: | - To be able to merge a pull request for this issue, we need: - - [ ] **Assignment:** You must be assigned to the issue, comment: `/assign` in the issue to get assigned [see guide](https://github.com/hiero-ledger/hiero-sdk-python/blob/main/docs/sdk_developers/training/workflow/04_assigning_issues.md) - - [ ] **Changelog Entry:** Correct changelog entry [see guide](https://github.com/hiero-ledger/hiero-sdk-python/blob/main/docs/sdk_developers/changelog_entry.md) - - [ ] **Signed commits:** commits must be DCO and GPG key signed as `git commit -S -s -m "chore: my change"` with a GPG key set up [see guide](https://github.com/hiero-ledger/hiero-sdk-python/blob/main/docs/sdk_developers/signing.md) - - [ ] **All Tests Pass:** our workflow checks like unit and integration tests must pass - - [ ] **Issue is Solved:** The implementation fully addresses the issue requirements as described above - - [ ] **No Further Changes are Made:** Code review feedback has been addressed and no further changes are requested + - [ ] **The issue is solved:** + My changes do exactly what the issue asked for. + + - [ ] **I did not add extra changes:** + I did not modify anything that was not mentioned in the issue description. + + - [ ] **Nothing else was broken:** + All existing features still work the same as before. + + - [ ] **All checks pass:** + The automated tests (unit and integration tests) run successfully. validations: required: true @@ -172,7 +176,7 @@ body: description: Provide a contribution workflow suitable for new contributors value: | - [ ] **Assignment:** You must be assigned to the issue, comment: `/assign` in the issue to get assigned [see guide](https://github.com/hiero-ledger/hiero-sdk-python/blob/main/docs/sdk_developers/training/workflow/04_assigning_issues.md) - - [ ] **Fork, Branch and Work on the issue:** Create a copy of the repository, create a branch for the issue and solve the problem. For instructions, please read our [Contributing guide](https://github.com/hiero-ledger/hiero-sdk-python/blob/main/CONTRIBUTING.md) file. Further help can be found at [Set-up Training](https://github.com/hiero-ledger/hiero-sdk-python/tree/main/docs/sdk_developers/training/setup) and [Workflow Training](https://github.com/hiero-ledger/hiero-sdk-python/tree/main/docs/sdk_developers/training/workflow). + - [ ] **Fork, Branch and Work on the issue:** Create a copy of the repository, create a branch for the issue and solve the problem. For instructions, please read our [Contributing guide](https://github.com/hiero-ledger/hiero-sdk-python/blob/main/CONTRIBUTING.md) file. Further help can be found at [Set-up Training](https://github.com/hiero-ledger/hiero-sdk-python/tree/main/docs/sdk_developers/training/setup) (including the [Windows Setup Guide](https://github.com/hiero-ledger/hiero-sdk-python/blob/main/docs/sdk_developers/training/setup/setup_windows.md) for Windows users) and [Workflow Training](https://github.com/hiero-ledger/hiero-sdk-python/tree/main/docs/sdk_developers/training/workflow). - [ ] **DCO and GPG key sign each commit :** each commit must be -s and -S signed. An explanation on how to do this is at [Signing Guide](https://github.com/hiero-ledger/hiero-sdk-python/blob/main/docs/sdk_developers/signing.md) - [ ] **Add a Changelog Entry :** your pull request will require a changelog. Read [Changelog Entry Guide](https://github.com/hiero-ledger/hiero-sdk-python/blob/main/docs/sdk_developers/changelog_entry.md) to learn how. - [ ] **Push and Create a Pull Request :** Once your issue is resolved, and your commits are signed, and you have a changelog entry, push your changes and create a pull request. Detailed instructions can be found at [Submit PR Training](https://github.com/hiero-ledger/hiero-sdk-python/blob/main/docs/sdk_developers/training/workflow/11_submit_pull_request.md), part of [Workflow Training](https://github.com/hiero-ledger/hiero-sdk-python/tree/main/docs/sdk_developers/training/workflow). diff --git a/.github/ISSUE_TEMPLATE/04_good_first_issue.yml b/.github/ISSUE_TEMPLATE/04_good_first_issue.yml deleted file mode 100644 index a0dcf4096..000000000 --- a/.github/ISSUE_TEMPLATE/04_good_first_issue.yml +++ /dev/null @@ -1,300 +0,0 @@ -name: Good First Issue Template -description: Create a Good First Issue for new contributors -title: "[Good First Issue]: " -labels: ["Good First Issue"] -assignees: [] -body: - - type: markdown - attributes: - value: | - --- - ## **Thanks for contributing!** 😊 - - We truly appreciate your time and effort. If this is your first open-source contribution, welcome! - This template is designed to help you create a Good First Issue (GFI) : a small, well-scoped task that helps new contributors learn the codebase and workflow. - --- - - type: textarea - id: intro - attributes: - label: 🆕🐥 First Timers Only - description: Who is this issue for? - value: | - This issue is reserved for people who have never contributed or have made minimal contributions to [Hiero Python SDK](https://hiero.org). - We know that creating a pull request (PR) is a major barrier for new contributors. - The goal of this issue and all other issues in [**find a good first issue**](https://github.com/issues?q=is%3Aopen+is%3Aissue+org%3Ahiero-ledger+archived%3Afalse+label%3A%22good+first+issue%22+) is to help you make your first contribution to the Hiero Python SDK. - validations: - required: false - - - type: markdown - attributes: - value: | - > [!IMPORTANT] - > ### 📋 Good First Issue (GFI) Guidelines - > A Good First Issue generally has a narrow scope and clear instructions, with examples or solution provided. - > - > Examples of acceptable GFI work include: - > - > - **Very small, explicitly defined changes to `src` functionality** - > *(rare and mechanical only — not behavior-changing)*, such as: - > - Adding or fixing `__str__` or `__repr__` methods **when the exact output is specified** - > - Fully specified typing fixes (e.g. adding a known return type) - > - **Refactors of existing examples** that are purely structural: - > - Splitting an existing example into functions - > - Combining a split example into a single function - > *(when explicitly instructed)* - > - **Documentation improvements** that are instruction-driven: - > - Fixing known typos or grammar issues - > - Renaming variables when new names are provided - > - Making explicitly requested changes to docstrings, comments, or print statements - > - **Functional improvements to examples** - > *(only when the additional steps are explicitly described)* - > - **Small, clearly specified edits to existing tests** - > - Adding a known assertion to an existing test file - > - > **What we do NOT consider Good First Issues:** - > - > - Writing new documentation, examples, or tests - > - Any change that requires deciding *what* to do or *how* something should behave - > - Changes to SDK behavior, public APIs, or contracts - > - Changes to DLT or protocol logic, including `to_proto` / `from_proto` - > - Work spanning multiple files, modules, or subsystems - > - Tasks requiring domain, protocol, or architectural knowledge - > - > - > 📖 *For a more detailed explanation, refer to: - > [`docs/maintainers/good_first_issue_candidate_guidelines.md`](docs/maintainers/good_first_issue_candidate_guidelines.md).* - - - type: textarea - id: issue - attributes: - label: 👾 Description of the issue - description: | - DESCRIBE THE ISSUE IN A WAY THAT IS UNDERSTANDABLE TO NEW CONTRIBUTORS. - YOU MUST NOT ASSUME THAT SUCH CONTRIBUTORS HAVE ANY KNOWLEDGE ABOUT THE LANGUAGE, CODEBASE OR HIERO. - IT IS HELPFUL TO ADD LINKS TO THE RELEVANT DOCUMENTATION AND/OR CODE SECTIONS. - BELOW IS AN EXAMPLE. - value: | - Edit here. Example provided below. - - validations: - required: true - - - type: markdown - attributes: - value: | - - ## 👾 Description of the issue - Example - - The example for Token Associate Transaction located at examples/tokens/token_associate_transaction.py can be improved. It correctly illustrates how to associate a token, however, it does so all from one function main() - - As everything is grouped together in main(), it is difficult for a user to understand all the individual steps required to associate a token. - - For example: - ```python - - def run_demo(): - """Monolithic token association demo.""" - print(f"🚀 Connecting to Hedera {network_name} network!") - client = Client(Network(network_name)) - operator_id = AccountId.from_string(os.getenv("OPERATOR_ID", "")) - operator_key = PrivateKey.from_string(os.getenv("OPERATOR_KEY", "")) - client.set_operator(operator_id, operator_key) - print(f"✅ Client ready (operator {operator_id})") - - test_key = PrivateKey.generate_ed25519() - receipt = ( - AccountCreateTransaction() - .set_key(test_key.public_key()) - .set_initial_balance(Hbar(1)) - .set_account_memo("Test account for token association demo") - .freeze_with(client) - .sign(operator_key) - .execute(client) - ) - if receipt.status != ResponseCode.SUCCESS: - raise Exception(receipt.status) - account_id = receipt.account_id - print(f"✅ Created test account {account_id}") - - # Create tokens - tokens = [] - for i in range(3): - try: - receipt = ( - TokenCreateTransaction() - .set_token_name(f"DemoToken{i}") - .set_token_symbol(f"DTK{i}") - .set_decimals(2) - .set_initial_supply(100_000) - .set_treasury_account_id(operator_id) - .freeze_with(client) - .sign(operator_key) - .execute(client) - ) - if receipt.status != ResponseCode.SUCCESS: - raise Exception(receipt.status) - token_id = receipt.token_id - tokens.append(token_id) - print(f"✅ Created token {token_id}") - except Exception as e: - print(f"❌ Token creation failed: {e}") - sys.exit(1) - - # Associate first token - try: - TokenAssociateTransaction().set_account_id(account_id).add_token_id(tokens[0]).freeze_with(client).sign(test_key).execute(client) - print(f"✅ Token {tokens[0]} associated with account {account_id}") - except Exception as e: - print(f"❌ Token association failed: {e}") - sys.exit(1) - ``` - - - type: textarea - id: solution - attributes: - label: 💡 Proposed Solution - description: | - AT THIS SECTION YOU NEED TO DESCRIBE THE STEPS NEEDED TO SOLVE THE ISSUE. - PLEASE BREAK DOWN THE STEPS AS MUCH AS POSSIBLE AND MAKE SURE THAT THEY - ARE EASY TO FOLLOW. IF POSSIBLE, ADD LINKS TO THE RELEVANT - DOCUMENTATION AND/OR CODE SECTIONS. - value: | - Edit here. Example provided below. - - validations: - required: true - - - type: markdown - attributes: - value: | - - ## 💡 Solution - Example - - For the TokenAssociateTransaction example, the solution is to split the monolithic main() function for illustrating TokenAssociateTransaction into separate smaller functions which are called from main(). - Such as: - - Setting up the client - - Creating an account - - Creating a token - - Associating the account to the token - - - type: textarea - id: implementation - attributes: - label: 👩‍💻 Implementation Steps - description: | - AT THIS SECTION YOU NEED TO DESCRIBE THE TECHNICAL STEPS NEEDED TO SOLVE THE ISSUE. - PLEASE BREAK DOWN THE STEPS AS MUCH AS POSSIBLE AND MAKE SURE THAT THEY ARE EASY TO FOLLOW. - IF POSSIBLE, ADD LINKS TO THE RELEVANT DOCUMENTATION AND/OR CODE. - value: | - Edit here. Example provided below. - - validations: - required: true - - - type: markdown - attributes: - value: | - - ### 👩‍💻 Implementation - Example - - To break down the monolithic main function, you need to: - - [ ] Extract the Key Steps (set up a client, create a test account, create a token, associate the token) - - [ ] Copy and paste the functionality for each key step into its own function - - [ ] Pass to each function the variables you need to run it - - [ ] Call each function in main() - - [ ] Ensure you return the values you'll need to pass on to the next step in main - - [ ] Ensure the example still runs and has the same output! - - For example: - ```python - - def setup_client(): - """Initialize and set up the client with operator account.""" - - def create_test_account(client, operator_key): - """Create a new test account for demonstration.""" - - def create_fungible_token(client, operator_id, operator_key): - """Create a fungible token for association with test account.""" - - def associate_token_with_account(client, token_id, account_id, account_key): - """Associate the token with the test account.""" - - def main(): - client, operator_id, operator_key = setup_client() - account_id, account_private_key = create_test_account(client, operator_key) - token_id = create_fungible_token(client, operator_id, operator_key) - associate_token_with_account(client, token_id, account_id, account_private_key) - ``` - - - type: textarea - id: acceptance-criteria - attributes: - label: ✅ Acceptance Criteria - description: | - EDIT OR EXPAND THE CHECKLIST ON WHAT IS REQUIRED TO BE ABLE TO MERGE A PULL REQUEST FOR THIS ISSUE - value: | - To be able to merge a pull request for this issue, we need: - - - [ ] **Assignment:** get assigned by commenting `/assign` [see guide](https://github.com/hiero-ledger/hiero-sdk-python/blob/main/docs/sdk_developers/training/workflow/04_assigning_issues.md) - - [ ] **Changelog Entry:** Correct changelog entry [see guide](https://github.com/hiero-ledger/hiero-sdk-python/blob/main/docs/sdk_developers/changelog_entry.md) - - [ ] **Signed commits:** commits must be DCO and GPG key signed as `git commit -S -s -m "chore: my change"` with a GPG key set up [see guide](https://github.com/hiero-ledger/hiero-sdk-python/blob/main/docs/sdk_developers/signing.md) - - [ ] **All Tests Pass:** our workflow checks like unit and integration tests must pass - - [ ] **Issue is Solved:** The implementation fully addresses the issue requirements as described above - - [ ] **No Further Changes are Made:** Code review feedback has been addressed and no further changes are requested - validations: - required: true - - - type: textarea - id: contribution_steps - attributes: - label: 📋 Step-by-Step Contribution Guide - description: Provide a contribution workflow suitable for new contributors - value: | - If you have never contributed to an open source project at GitHub, the following step-by-step guide will introduce you to the workflow. - - - [ ] **Assignment:** get assigned by commenting `/assign` [see guide](https://github.com/hiero-ledger/hiero-sdk-python/blob/main/docs/sdk_developers/training/workflow/04_assigning_issues.md) - - [ ] **Fork, Branch and Work on the issue:** Create a copy of the repository, create a branch for the issue and solve the problem. For instructions, please read our [Contributing guide](https://github.com/hiero-ledger/hiero-sdk-python/blob/main/CONTRIBUTING.md) file. Further help can be found at [Set-up Training](https://github.com/hiero-ledger/hiero-sdk-python/tree/main/docs/sdk_developers/training/setup) and [Workflow Training](https://github.com/hiero-ledger/hiero-sdk-python/tree/main/docs/sdk_developers/training/workflow). - - [ ] **DCO and GPG key sign each commit :** each commit must be -s and -S signed. An explanation on how to do this is at [Signing Guide](https://github.com/hiero-ledger/hiero-sdk-python/blob/main/docs/sdk_developers/signing.md) - - [ ] **Add a Changelog Entry :** your pull request will require a changelog. Read [Changelog Entry Guide](https://github.com/hiero-ledger/hiero-sdk-python/blob/main/docs/sdk_developers/changelog_entry.md) to learn how. - - [ ] **Push and Create a Pull Request :** Once your issue is resolved, and your commits are signed, and you have a changelog entry, push your changes and create a pull request. Detailed instructions can be found at [Submit PR Training](https://github.com/hiero-ledger/hiero-sdk-python/blob/main/docs/sdk_developers/training/workflow/11_submit_pull_request.md), part of [Workflow Training](https://github.com/hiero-ledger/hiero-sdk-python/tree/main/docs/sdk_developers/training/workflow). - - [ ] **You did it 🎉:** A maintainer or committer will review your pull request and provide feedback. If approved, we will merge the fix in the main branch. Thanks for being part of the Hiero community as an open-source contributor ❤️ - - ***IMPORTANT*** You will ONLY be assigned to the issue if you comment: `/assign` - ***IMPORTANT*** Your pull request CANNOT BE MERGED until you add a changelog entry AND sign your commits each with `git commit -S -s -m "chore: your commit message"` with a GPG key setup. - validations: - required: true - - - type: textarea - id: ai_usage_guidelines - attributes: - label: 🤖 AI Usage Guidelines - description: Guidance on using AI tools responsibly for this issue - value: | - You are welcome to use AI to help you understand and solve this issue. - - Because AI tools can sometimes make mistakes, please take care to: - - - Only implement what is described in this issue - - Avoid changing anything else in the file - - Be careful when modifying parameters or return statements, as this may affect runtime behavior - - If you're unsure, ask your mentor or the maintainers for help — they can provide expert Python SDK guidance and point you to the right examples or methods. - validations: - required: false - - - type: textarea - id: information - attributes: - label: 🤔 Additional Information - description: Provide any extra resources or context for contributors to solve this good first issue - value: | - For more help, we have extensive documentation attributes: - - [SDK Developer Docs](https://github.com/hiero-ledger/hiero-sdk-python/tree/main/docs/sdk_developers) - - [SDK Developer Training](https://github.com/hiero-ledger/hiero-sdk-python/tree/main/docs/sdk_developers/training) - - Additionally, we invite you to join our community on our [Discord](https://github.com/hiero-ledger/hiero-sdk-python/blob/main/docs/discord.md) server. - - We also invite you to attend each Wednesday, 2pm UTC our [Python SDK Office Hour and Community Calls](https://zoom-lfx.platform.linuxfoundation.org/meetings/hiero?view=week). The Python SDK Office hour is for hands-on-help and the Community Call for general community discussion. - - You can also ask for help in a comment below! diff --git a/.github/ISSUE_TEMPLATE/06_intermediate_issue.yml b/.github/ISSUE_TEMPLATE/04_intermediate_issue.yml similarity index 100% rename from .github/ISSUE_TEMPLATE/06_intermediate_issue.yml rename to .github/ISSUE_TEMPLATE/04_intermediate_issue.yml diff --git a/.github/ISSUE_TEMPLATE/07_advanced_issue.yml b/.github/ISSUE_TEMPLATE/05_advanced_issue.yml similarity index 100% rename from .github/ISSUE_TEMPLATE/07_advanced_issue.yml rename to .github/ISSUE_TEMPLATE/05_advanced_issue.yml diff --git a/.github/ISSUE_TEMPLATE/02_bug_report.yml b/.github/ISSUE_TEMPLATE/bug.yml similarity index 100% rename from .github/ISSUE_TEMPLATE/02_bug_report.yml rename to .github/ISSUE_TEMPLATE/bug.yml diff --git a/.github/ISSUE_TEMPLATE/03_feature_request.yml b/.github/ISSUE_TEMPLATE/feature.yml similarity index 97% rename from .github/ISSUE_TEMPLATE/03_feature_request.yml rename to .github/ISSUE_TEMPLATE/feature.yml index c86ac25b0..04a0310a7 100644 --- a/.github/ISSUE_TEMPLATE/03_feature_request.yml +++ b/.github/ISSUE_TEMPLATE/feature.yml @@ -1,6 +1,6 @@ name: Feature description: Suggest an idea for this project -labels: [ enhancement ] +labels: [enhancement] type: Feature body: - type: markdown @@ -35,4 +35,4 @@ body: id: alternatives attributes: label: Alternatives - description: What alternative solutions have you considered? \ No newline at end of file + description: What alternative solutions have you considered? diff --git a/.github/scripts/bot-advanced-check.sh b/.github/scripts/bot-advanced-check.sh index babea07dd..22b18beb0 100644 --- a/.github/scripts/bot-advanced-check.sh +++ b/.github/scripts/bot-advanced-check.sh @@ -89,26 +89,20 @@ get_intermediate_count() { already_commented() { local user=$1 local marker="$COMMENT_MARKER_PREFIX @$user" - gh issue view "$ISSUE_NUMBER" --repo "$REPO" \ --json comments \ - --jq --arg marker "$marker" ' - .comments[].body - | select(contains($marker)) - ' | grep -q . + --jq '.comments[].body' | grep -Fq "$marker" } ####################################### # Helper: is user currently assigned? ####################################### + is_assigned() { local user=$1 - gh issue view "$ISSUE_NUMBER" --repo "$REPO" \ --json assignees \ - --jq --arg user "$user" ' - .assignees[].login | select(. == $user) - ' | grep -q . + --jq '.assignees[].login' | grep -Fxq "$user" } ####################################### @@ -215,8 +209,20 @@ $COMMENT_MARKER_PREFIX @$user" fi if is_assigned "$user"; then - log "Unassigning @$user." - gh issue edit "$ISSUE_NUMBER" --repo "$REPO" --remove-assignee "$user" + log "Unassigning @$user ..." + json_body="{\"assignees\": [\"$user\"]}" + response=$( + gh api \ + --method DELETE \ + "repos/$REPO/issues/$ISSUE_NUMBER/assignees" \ + --input <(echo "$json_body") \ + || echo "error" + ) + if [[ "$response" != "error" ]]; then + log "Successfully unassigned @$user." + else + log "Failed to unassign @$user." + fi else log "User @$user already unassigned. Skipping." fi @@ -248,4 +254,4 @@ else while read -r user; do [[ -n "$user" ]] && check_user "$user" done <<< "$ASSIGNEES" -fi \ No newline at end of file +fi diff --git a/.github/scripts/bot-beginner-assign-on-comment.js b/.github/scripts/bot-beginner-assign-on-comment.js index 3ca18a463..77fae5508 100644 --- a/.github/scripts/bot-beginner-assign-on-comment.js +++ b/.github/scripts/bot-beginner-assign-on-comment.js @@ -56,6 +56,44 @@ Parameters: const fs = require("fs"); const SPAM_LIST_PATH = ".github/spam-list.txt"; +const REQUIRED_GFI_COUNT = 1; +const GFI_LABEL = 'Good First Issue'; +const BEGINNER_GUARD_MARKER = ''; + +function isSafeSearchToken(value) { + return typeof value === 'string' && /^[a-zA-Z0-9._/-]+$/.test(value); +} + +async function countCompletedGfiIssues(github, owner, repo, username) { + if ( + !isSafeSearchToken(owner) || + !isSafeSearchToken(repo) || + !isSafeSearchToken(username) + ) { + return null; + } + + const searchQuery = [ + `repo:${owner}/${repo}`, + `label:"${GFI_LABEL}"`, + 'is:issue', + 'is:closed', + `assignee:${username}`, + ].join(' '); + + const result = await github.graphql( + ` + query ($searchQuery: String!) { + search(type: ISSUE, query: $searchQuery) { + issueCount + } + } + `, + { searchQuery } + ); + + return result?.search?.issueCount ?? 0; +} module.exports = async ({ github, context }) => { try { @@ -148,23 +186,91 @@ module.exports = async ({ github, context }) => { // 4. Logic Branch if (isAssignCommand) { - // --- ASSIGNMENT LOGIC --- - if (issue.assignees && issue.assignees.length > 0) { - const currentAssignee = issue.assignees[0].login; - console.log(`[Beginner Bot] Issue #${issue.number} is already assigned. Ignoring /assign command.`); + const completedGfiCount = await countCompletedGfiIssues( + github, + repo.owner.login, + repo.name, + commenter + ); - // Fix 4: Granular Try/Catch for Comment API + console.log("[Beginner Bot] Completed GFI count:",{ + commenter, + completedGfiCount, + }) + + if (completedGfiCount === null) { + console.log("[Beginner Bot] Skipping GFI guard due to API error."); + } else if (completedGfiCount < REQUIRED_GFI_COUNT) { + + let allComments = []; try { + allComments = await github.paginate( + github.rest.issues.listComments, + { + owner: repo.owner.login, + repo: repo.name, + issue_number: issue.number, + per_page: 100, + } + ); + } catch (error) { + console.error("[Beginner Bot] Failed to fetch comments for GFI guard:", { + issue: issue.number, + commenter, + message: error.message, + }); + return; + } + + const guardAlreadyPosted = allComments.some((c) => + c.body?.includes(BEGINNER_GUARD_MARKER) + ); + + if (!guardAlreadyPosted) { + try{ + await github.rest.issues.createComment({ + owner: repo.owner.login, + repo: repo.name, + issue_number: issue.number, + body: `${BEGINNER_GUARD_MARKER} +👋 Hi @${commenter}! Thanks for your interest in contributing 💡 + +Before taking on a **beginner** issue, we ask contributors to complete at least one **Good First Issue** to get familiar with the workflow. + +👉 [Find a Good First Issue here](https://github.com/${repo.owner.login}/${repo.name}/issues?q=is%3Aissue+is%3Aopen+label%3A%22Good+First+Issue%22+no%3Aassignee) + +Please try a GFI first, then come back — we’ll be happy to assign this! 😊`, + }); + console.log("[Beginner Bot] GFI guard comment posted."); + } catch(error){ + console.error("[Beginner Bot] Failed to post GFI guard comment:",{ + issue: issue.number, + commenter, + message: error.message, + }); + } + } + return; + } + + // --- ASSIGNMENT LOGIC --- + if (issue.assignees && issue.assignees.length > 0) { + try{ + const currentAssignee = issue.assignees[0]?.login ?? "another contributor"; await github.rest.issues.createComment({ owner: repo.owner.login, repo: repo.name, issue_number: issue.number, - body: `👋 Hi @${commenter}, thanks for your interest! This issue is already assigned to @${currentAssignee}, but we'd love your help on another one. You can find more "beginner" issues [here](https://github.com/hiero-ledger/hiero-sdk-python/issues?q=is%3Aissue%20state%3Aopen%20label%3Abeginner%20no%3Aassignee).`, + body: `👋 Hi @${commenter}, thanks for your interest! This issue is already assigned to @${currentAssignee}, but we'd love your help on another one. You can find more "beginner" issues [here](https://github.com/${repo.owner.login}/${repo.name}/issues?q=is%3Aissue+is%3Aopen+label%3Abeginner+no%3Aassignee).`, }); } catch (error) { - console.error(`[Beginner Bot] Failed to post already-assigned comment: ${error.message}`); + console.error("[Beginner Bot] Failed to post already-assigned message:", { + issue: issue.number, + commenter, + message: error.message, + }); } - return; // Exit after warning + return; } // Block spam users from beginner issues diff --git a/.github/scripts/bot-gfi-assign-on-comment.js b/.github/scripts/bot-gfi-assign-on-comment.js index b64f79b15..9a8d35b2b 100644 --- a/.github/scripts/bot-gfi-assign-on-comment.js +++ b/.github/scripts/bot-gfi-assign-on-comment.js @@ -146,28 +146,41 @@ async function isRepoCollaborator({ github, owner, repo, username }) { } try { - await github.rest.repos.checkCollaborator({ + const response = await github.rest.repos.getCollaboratorPermissionLevel({ owner, repo, username, }); - return true; // 204 = collaborator + + const permission = response?.data?.permission; + + const isTeamMember = + permission === 'admin' || + permission === 'write' || + permission === 'maintain' || + permission === 'read'; + + console.log('[gfi-assign] isRepoCollaborator:', { + username, + permission, + isTeamMember, + }); + + return isTeamMember; } catch (error) { - if (error?.status === 404 || isPermissionFailure(error)) { - if (isPermissionFailure(error)) { - console.log( - '[gfi-assign] isRepoCollaborator: insufficient permissions; treating as non-collaborator', - { owner, repo, username, status: error.status } - ); - } + if (isPermissionFailure(error) || error?.status === 404) { + console.log( + '[gfi-assign] isRepoCollaborator: no permission / not collaborator', + { username, status: error.status } + ); return false; } - throw error; // unexpected error + throw error; } - } + /// START OF SCRIPT /// module.exports = async ({ github, context }) => { try { @@ -220,7 +233,7 @@ module.exports = async ({ github, context }) => { username, }); - if (isTeamMember) { + if (isTeamMember) { console.log('[gfi-assign] Skip reminder: commenter is collaborator'); return; } @@ -397,4 +410,4 @@ module.exports = async ({ github, context }) => { }); throw error; } -}; \ No newline at end of file +}; diff --git a/.github/scripts/bot-inactivity-unassign.sh b/.github/scripts/bot-inactivity-unassign.sh index ecb43babb..2726c8a25 100755 --- a/.github/scripts/bot-inactivity-unassign.sh +++ b/.github/scripts/bot-inactivity-unassign.sh @@ -43,6 +43,42 @@ parse_ts() { fi } +# Check for /working command from the specific user within the last X days +has_recent_working_command() { + local issue_num="$1" + local user="$2" + local days_threshold="$3" + + local NOW_TS=$(date +%s) + local cutoff_ts=$((NOW_TS - (days_threshold * 86400))) + local cutoff_iso + if date --version >/dev/null 2>&1; then + cutoff_iso=$(date -u -d "@$cutoff_ts" +"%Y-%m-%dT%H:%M:%SZ") + else + cutoff_iso=$(date -u -r "$cutoff_ts" +"%Y-%m-%dT%H:%M:%SZ") + fi + # Fetch recent comments only, filter for user and "/working" string + local working_comments + working_comments=$(gh api "repos/$REPO/issues/$issue_num/comments?since=$cutoff_iso" \ + --jq ".[] | select(.user.login == \"$user\") | select(.body | test(\"(^|\\\\s)/working(\\\\s|$)\"; \"i\")) | .created_at") + + if [[ -z "$working_comments" ]]; then + return 1 # False + fi + + # Double check timestamp just in case API returned older items + + for created_at in $working_comments; do + local comment_ts + comment_ts=$(parse_ts "$created_at") + if (( comment_ts >= cutoff_ts )); then + return 0 # True + fi + done + + return 1 # False +} + # Quick gh availability/auth checks if ! command -v gh >/dev/null 2>&1; then echo "ERROR: gh CLI not found. Install it and ensure it's on PATH." @@ -93,7 +129,7 @@ for ISSUE in $ISSUES; do ASSIGNED_AT=$(echo "$ASSIGN_EVENT_JSON" | jq -r '.created_at // empty') ASSIGN_SOURCE="assignment_event" else - # FIX: Do not fallback to issue creation date + ASSIGNED_AT="" ASSIGN_SOURCE="not_found" fi @@ -102,13 +138,18 @@ for ISSUE in $ISSUES; do ASSIGNED_TS=$(parse_ts "$ASSIGNED_AT") ASSIGNED_AGE_DAYS=$(( (NOW_TS - ASSIGNED_TS) / 86400 )) else - # Safety valve: if assignment event is missing, skip checking to prevent false positives + echo " [WARN] Could not find 'assigned' event in timeline. Skipping inactivity check for safety." continue fi echo " [INFO] Assignment source: $ASSIGN_SOURCE" - echo " [INFO] Assigned at: ${ASSIGNED_AT:-(unknown)} (~${ASSIGNED_AGE_DAYS} days ago)" + echo " [INFO] Assigned at: ${ASSIGNED_AT:-(unknown)} (~${ASSIGNED_AGE_DAYS} days ago)" + + if has_recent_working_command "$ISSUE" "$USER" "$DAYS"; then + echo " [SKIP] User @$USER posted '/working' recently. Resetting timer." + continue + fi # Determine PRs cross-referenced from the same repo PR_NUMBERS=$(jq -r --arg repo "$REPO" ' @@ -134,7 +175,11 @@ Hi @$USER, this is InactivityBot 👋 You were assigned to this issue **${ASSIGNED_AGE_DAYS} days** ago, and there is currently no open pull request linked to it. To keep the backlog available for active contributors, I'm unassigning you for now. -If you'd like to continue working on this later, feel free to get re-assigned or comment here and we'll gladly assign it back to you. 🙂 +If you're no longer interested, no action is needed. + +**Tip:** You can comment \`/unassign\` on any issue to proactively step away before this bot kicks in. + +If you'd like to continue working on this later, feel free to comment \`/assign\` on the issue to get re-assigned, and open a new PR when you're ready. 🚀 EOF ) gh issue comment "$ISSUE" --repo "$REPO" --body "$MESSAGE" || echo "WARN: couldn't post comment (gh error)" @@ -167,6 +212,8 @@ EOF continue fi + + COMMITS_JSON=$(gh api "repos/$REPO/pulls/$PR_NUM/commits" --paginate 2>/dev/null || echo "[]") LAST_TS_STR=$(jq -r 'last? | (.commit.committer.date // .commit.author.date) // empty' <<<"$COMMITS_JSON" 2>/dev/null || echo "") if [[ -n "$LAST_TS_STR" ]]; then @@ -188,7 +235,11 @@ Hi @$USER, this is InactivityBot 👋 This pull request has had no new commits for **${PR_AGE_DAYS} days**, so I'm closing it and unassigning you from the linked issue to keep the backlog healthy. -You're very welcome to open a new PR or ask to be re-assigned when you're ready to continue working on this. 🚀 +If you're no longer interested, no action is needed. + +**Tip:** You can comment \`/unassign\` on any issue to proactively step away before this bot kicks in. + +If you'd like to continue working on this later, feel free to comment \`/assign\` on the issue to get re-assigned, and open a new PR when you're ready. 🚀 EOF ) gh pr comment "$PR_NUM" --repo "$REPO" --body "$MESSAGE" || echo "WARN: couldn't comment on PR" @@ -214,4 +265,4 @@ done echo "------------------------------------------------------------" echo " Unified Inactivity Bot Complete" echo " DRY_RUN: $DRY_RUN" -echo "------------------------------------------------------------" \ No newline at end of file +echo "------------------------------------------------------------" diff --git a/.github/scripts/bot-issue-reminder-no-pr.sh b/.github/scripts/bot-issue-reminder-no-pr.sh index 9a89057af..b081c331a 100644 --- a/.github/scripts/bot-issue-reminder-no-pr.sh +++ b/.github/scripts/bot-issue-reminder-no-pr.sh @@ -26,9 +26,9 @@ fi echo "------------------------------------------------------------" echo " Issue Reminder Bot (No PR)" -echo " Repo: $REPO" -echo " Threshold: $DAYS days" -echo " Dry Run: $DRY_RUN" +echo " Repo: $REPO" +echo " Threshold: $DAYS days" +echo " Dry Run: $DRY_RUN" echo "------------------------------------------------------------" echo @@ -44,6 +44,41 @@ parse_ts() { fi } +# Check for /working command from the specific user within the last X days +has_recent_working_command() { + local issue_num="$1" + local user="$2" + local days_threshold="$3" + + local cutoff_ts=$((NOW_TS - (days_threshold * 86400))) + local cutoff_iso + if date --version >/dev/null 2>&1; then + cutoff_iso=$(date -u -d "@$cutoff_ts" +"%Y-%m-%dT%H:%M:%SZ") + else + cutoff_iso=$(date -u -r "$cutoff_ts" +"%Y-%m-%dT%H:%M:%SZ") + fi + # Fetch recent comments only, filter for user and "/working" string + local working_comments + working_comments=$(gh api "repos/$REPO/issues/$issue_num/comments?since=$cutoff_iso" \ + --jq ".[] | select(.user.login == \"$user\") | select(.body | test(\"(^|\\\\s)/working(\\\\s|$)\"; \"i\")) | .created_at") + + if [[ -z "$working_comments" ]]; then + return 1 # False + fi + + # The 'since' parameter is an optimization, but the API may still return comments + # updated since the cutoff, not just created. We still need to check the create time. + for created_at in $working_comments; do + local comment_ts + comment_ts=$(parse_ts "$created_at") + if (( comment_ts >= cutoff_ts )); then + return 0 # True + fi + done + + return 1 # False +} + # Fetch open ISSUES (not PRs) that have assignees ALL_ISSUES_JSON=$(gh api "repos/$REPO/issues" \ --paginate \ @@ -82,6 +117,20 @@ echo "$ALL_ISSUES_JSON" | jq -c '.' | while read -r ISSUE_JSON; do continue fi + # Immunity Check: If ANY assignee has said /working, we skip the reminder for the whole issue + SKIP_REMINDER=false + for USER in $ASSIGNEES; do + if has_recent_working_command "$ISSUE" "$USER" "$DAYS"; then + echo "[SKIP] User @$USER posted '/working' recently. Skipping reminder." + SKIP_REMINDER=true + break + fi + done + + if [ "$SKIP_REMINDER" = "true" ]; then + continue + fi + # Get assignment time (use the last assigned event) ASSIGN_TS=$(gh api "repos/$REPO/issues/$ISSUE/events" \ --jq ".[] | select(.event==\"assigned\") | .created_at" \ @@ -142,7 +191,7 @@ Hi ${ASSIGNEE_MENTIONS} 👋 This issue has been assigned but no pull request has been created yet. Are you still planning on working on it? -If you are, please create a draft PR linked to this issue so we know you are working on it. +If you are, please create a draft PR linked to this issue or comment \`/working\` to let us know. If you’re no longer able to work on this issue, you can comment \`/unassign\` to release it. From the Python SDK Team" @@ -159,4 +208,4 @@ done echo "------------------------------------------------------------" echo " Issue Reminder Bot (No PR) complete." -echo "------------------------------------------------------------" \ No newline at end of file +echo "------------------------------------------------------------" diff --git a/.github/scripts/bot-mentor-assignment.js b/.github/scripts/bot-mentor-assignment.js index 358d27954..2e9fd0d94 100644 --- a/.github/scripts/bot-mentor-assignment.js +++ b/.github/scripts/bot-mentor-assignment.js @@ -105,25 +105,57 @@ async function isNewContributor(github, owner, repo, login) { } function buildComment({ mentee, mentor, owner, repo }) { - const repoUrl = `https://github.com/${owner}/${repo}`; - - return `${COMMENT_MARKER} -👋 Hi @${mentee}, welcome to the Hiero Python SDK community! - -You've been assigned this Good First Issue, and today’s on-call mentor from ${MENTOR_TEAM_ALIAS} is @${mentor}. They're here to help you land a great first contribution. - -**How to get started** -- Review the issue description and any linked docs -- Share updates early and ask @${mentor} anything right here -- Keep the feedback loop short so we can support you quickly - -Need more backup? ${SUPPORT_TEAM_ALIAS} is also on standby to cheer you on. - + const repoUrl = owner && repo ? `https://github.com/${owner}/${repo}` : "https://github.com/hiero-ledger/hiero-sdk-python"; + + return `${COMMENT_MARKER}👋 Hi @${mentee}, welcome to the Hiero Python SDK community! +You've been assigned this **Good First Issue**. Your on-call mentor today from ${MENTOR_TEAM_ALIAS} is **@${mentor}**, and the **Good First Issue Support Team** is **${SUPPORT_TEAM_ALIAS}**. +We’re here to help you get your first PR merged successfully 🚀 + +--- +### Step 1: Read the task +- Open the issue description, carefully read the requirements and workflow +- Read any linked documentation +- Make sure you understand the expected outcome +If anything is unclear, ask your mentor **before** writing code. + +--- +### Step 2: Tell us your plan (in this thread) +Reply here with: +- What you think the task is +- What you plan to change to solve the issue requirement and no more +- Any questions you have +This means we can guide you early on, helping you to have a more stress free experience + +--- +### Step 3: Get the right kind of help +Finishing your first PR can be really hard. We are here to help you - please ask us! + +🛠 **Good First Issue Support Team (${SUPPORT_TEAM_ALIAS})** +Use them for **workflow and GitHub issues**, such as: +- Failing CI checks +- Commit signing problems +- Merge conflicts +- Git/GitHub errors +They can also jump on a call if needed. + +🐍 **Your Mentor (@${mentor})** +Use your mentor for **Python and code guidance**, such as: +- Which files to edit +- How to structure your solution +- Docstring and code style questions +- Making sure your PR meets the issue requirements + +💬 **Discord (for fast help)** +Guide: [Join the Python SDK Discord](https://github.com/hiero-ledger/hiero-sdk-python/blob/main/docs/discord.md) +Use Discord when you need **immediate answers** from the community, like: +- What docs to read +- How to unblock yourself quickly +- Clarifying next steps in real time + +--- **Mentor:** @${mentor} **Mentee:** @${mentee} - If you're enjoying the SDK, consider ⭐️ [starring the repository](${repoUrl}) so it's easy to find later. - Happy building! — Python SDK Team`; } diff --git a/.github/scripts/bot-merge-conflict.js b/.github/scripts/bot-merge-conflict.js index befa9c269..cc6031d25 100644 --- a/.github/scripts/bot-merge-conflict.js +++ b/.github/scripts/bot-merge-conflict.js @@ -44,8 +44,7 @@ module.exports = async ({ github, context, core }) => { return; } - const body = `Hi, this is MergeConflictBot.\nYour pull request cannot be merged because it contains **merge conflicts**.\n\nPlease resolve these conflicts locally and push the changes.\n\nTo assist you, please read:\n- [Resolving Merge Conflicts](https://github.com/${owner}/${repo}/blob/main/docs/sdk_developers/merge_conflicts.md)\n- [Rebasing Guide](https://github.com/${owner}/${repo}/blob/main/docs/sdk_developers/rebasing.md)\n\nThank you for contributing!\n`; - + const body = `Hi, this is MergeConflictBot.\nYour pull request cannot be merged because it contains **merge conflicts**.\n\nPlease resolve these conflicts locally and push the changes.\n\n### Quick Fix for CHANGELOG.md Conflicts\nIf your conflict is only in **CHANGELOG.md**, you can resolve it easily using the [GitHub web editor](https://docs.github.com/en/codespaces/the-githubdev-web-based-editor):\n1. Click on the "Resolve conflicts" button in the PR\n2. Accept both changes (keep both changelog entries)\n3. Click "Mark as resolved"\n4. Commit the merge\n\nFor all other merge conflicts, please read:\n- [Resolving Merge Conflicts](https://github.com/${owner}/${repo}/blob/main/docs/sdk_developers/merge_conflicts.md)\n- [Rebasing Guide](https://github.com/${owner}/${repo}/blob/main/docs/sdk_developers/rebasing.md)\n\nThank you for contributing!\n`; if (dryRun) { console.log(`[DRY RUN] Would post comment to PR #${prNumber}: ${body}`); return; diff --git a/.github/scripts/bot-next-issue-recommendation.js b/.github/scripts/bot-next-issue-recommendation.js new file mode 100644 index 000000000..1e7194ab9 --- /dev/null +++ b/.github/scripts/bot-next-issue-recommendation.js @@ -0,0 +1,215 @@ +module.exports = async ({ github, context, core }) => { + const { payload } = context; + + // Get PR information from automatic pull_request_target trigger + let prNumber = payload.pull_request?.number; + let prBody = payload.pull_request?.body || ''; + + // Manual workflow_dispatch is no longer supported - inputs were removed + // Only automatic triggers from merged PRs will work + const repoOwner = context.repo.owner; + const repoName = context.repo.repo; + + if (!prNumber) { + core.info('No PR number found, skipping'); + return; + } + + core.info(`Processing PR #${prNumber}`); + + // Parse PR body to find linked issues + const MAX_PR_BODY_LENGTH = 50000; // Reasonable limit for PR body + if (prBody.length > MAX_PR_BODY_LENGTH) { + core.warning(`PR body exceeds ${MAX_PR_BODY_LENGTH} characters, truncating for parsing`); + prBody = prBody.substring(0, MAX_PR_BODY_LENGTH); + } + const issueRegex = /(fixes|closes|resolves|fix|close|resolve)\s+(?:[\w-]+\/[\w-]+)?#(\d+)/gi; + const matches = [...prBody.matchAll(issueRegex)]; + + if (matches.length === 0) { + core.info('No linked issues found in PR body'); + return; + } + + // Get the first linked issue number + const issueNumber = parseInt(matches[0][2]); + core.info(`Found linked issue #${issueNumber}`); + + try { + // Fetch issue details + const { data: issue } = await github.rest.issues.get({ + owner: repoOwner, + repo: repoName, + issue_number: issueNumber, + }); + + // Normalize and check issue labels (case-insensitive) + const labelNames = issue.labels.map(label => label.name.toLowerCase()); + const labelSet = new Set(labelNames); + core.info(`Issue labels: ${labelNames.join(', ')}`); + + // Determine issue difficulty level + const difficultyLevels = { + beginner: labelSet.has('beginner'), + goodFirstIssue: labelSet.has('good first issue'), + intermediate: labelSet.has('intermediate'), + advanced: labelSet.has('advanced'), + }; + + // Skip if intermediate or advanced + if (difficultyLevels.intermediate || difficultyLevels.advanced) { + core.info('Issue is intermediate or advanced level, skipping recommendation'); + return; + } + + // Only proceed for Good First Issue or beginner issues + if (!difficultyLevels.goodFirstIssue && !difficultyLevels.beginner) { + core.info('Issue is not a Good First Issue or beginner issue, skipping'); + return; + } + + let recommendedIssues = []; + let recommendedLabel = null; + let isFallback = false; + let recommendationScope = 'repo'; + + recommendedIssues = await searchIssues(github, core, repoOwner, repoName, 'beginner'); + recommendedLabel = 'Beginner'; + + if (recommendedIssues.length === 0) { + isFallback = true; + recommendedIssues = await searchIssues(github, core, repoOwner, repoName, 'good first issue'); + recommendedLabel = 'Good First Issue'; + } + + if (recommendedIssues.length === 0) { + recommendationScope = 'org'; + recommendedLabel = 'Good First Issue'; + recommendedIssues = await github.rest.search.issuesAndPullRequests({ + q: `org:hiero-ledger type:issue state:open label:"good first issue" no:assignee`, + per_page: 6, + }).then(res => res.data.items); + } + + // Remove the issue they just solved + recommendedIssues = recommendedIssues.filter(i => i.number !== issueNumber); + + // Generate and post comment + const completedLabel = difficultyLevels.goodFirstIssue ? 'Good First Issue' : 'Beginner'; + const completedLabelText = completedLabel === 'Beginner' ? 'Beginner issue' : completedLabel; + const recommendationMeta = { + completedLabelText, + recommendedLabel, + isFallback, + recommendationScope, + }; + await generateAndPostComment(github, context, core, prNumber, recommendedIssues, recommendationMeta); + + } catch (error) { + core.setFailed(`Error processing issue #${issueNumber}: ${error.message}`); + } +}; + +async function searchIssues(github, core, owner, repo, label) { + try { + const query = `repo:${owner}/${repo} type:issue state:open label:"${label}" no:assignee`; + core.info(`Searching for issues with query: ${query}`); + + const { data: searchResult } = await github.rest.search.issuesAndPullRequests({ + q: query, + per_page: 6, + }); + + core.info(`Found ${searchResult.items.length} issues with label "${label}"`); + return searchResult.items; + } catch (error) { + core.warning(`Error searching for issues with label "${label}": ${error.message}`); + return []; + } +} + +async function generateAndPostComment(github, context, core, prNumber, recommendedIssues, { completedLabelText, recommendedLabel, isFallback, recommendationScope }) { + const marker = ''; + + // Build comment content + let comment = `${marker}\n\n🎉 **Nice work completing a ${completedLabelText}!**\n\n`; + comment += `Thank you for your contribution to the Hiero Python SDK! We're excited to have you as part of our community.\n\n`; + + if (recommendedIssues.length > 0) { + if (recommendationScope === 'org') { + comment += `Here are some **Good First Issues across the Hiero organization** you might be interested in working on next:\n\n`; + } else if (isFallback) { + comment += `Here are some **${recommendedLabel}** issues at a similar level you might be interested in working on next:\n\n`; + } else { + comment += `Here are some issues labeled **${recommendedLabel}** you might be interested in working on next:\n\n`; + } + + // Sanitize title: escape markdown link syntax and special characters + const sanitizeTitle = (title) => title + .replace(/\[/g, '\\[') + .replace(/\]/g, '\\]') + .replace(/\(/g, '\\(') + .replace(/\)/g, '\\)'); + + recommendedIssues.slice(0, 5).forEach((issue, index) => { + comment += `${index + 1}. [${sanitizeTitle(issue.title)}](${issue.html_url})\n`; + if (issue.body && issue.body.length > 0) { + // Sanitize: strip HTML, normalize whitespace, escape markdown links + const sanitized = issue.body + .replace(/<[^>]*>/g, '') // Remove HTML tags + .replace(/\[([^\]]*)\]\([^)]*\)/g, '$1') // Remove markdown links, keep text + .replace(/\s+/g, ' ') // Normalize whitespace + .trim(); + const description = sanitized.substring(0, 150); + comment += ` ${description}${sanitized.length > 150 ? '...' : ''}\n\n`; + } else { + comment += ` *No description available*\n\n`; + } + }); + } else { + comment += `There are currently no open issues available at or near the ${completedLabelText} level in this repository.\n\n`; + const orgLabel = recommendedLabel === 'Beginner' ? 'beginner' : 'good first issue'; + const orgLabelQuery = encodeURIComponent(`label:"${orgLabel}"`); + comment += `You can check out ${recommendedLabel.toLowerCase()} issues across the entire Hiero organization: ` + + `[Hiero ${recommendedLabel} Issues](https://github.com/issues?q=org%3Ahiero-ledger+type%3Aissue+state%3Aopen+${orgLabelQuery})\n\n`; + } + + comment += `🌟 **Stay connected with the project:**\n`; + comment += `- ⭐ [Star this repository](https://github.com/${context.repo.owner}/${context.repo.repo}) to show your support\n`; + comment += `- 👀 [Watch this repository](https://github.com/${context.repo.owner}/${context.repo.repo}/watchers) to get notified of new issues and releases\n\n`; + + comment += `We look forward to seeing more contributions from you! If you have any questions, feel free to ask in our [Discord community](https://github.com/hiero-ledger/hiero-sdk-python/blob/main/docs/discord.md).\n\n`; + comment += `From the Hiero Python SDK Team 🚀`; + + // Check for existing comment + try { + const { data: comments } = await github.rest.issues.listComments({ + owner: context.repo.owner, + repo: context.repo.repo, + issue_number: prNumber, + }); + + const existingComment = comments.find(comment => comment.body.includes(marker)); + + if (existingComment) { + core.info('Comment already exists, skipping'); + return; + } + } catch (error) { + core.warning(`Error checking existing comments: ${error.message}`); + } + + // Post the comment + try { + await github.rest.issues.createComment({ + owner: context.repo.owner, + repo: context.repo.repo, + issue_number: prNumber, + body: comment, + }); + + core.info(`Successfully posted comment to PR #${prNumber}`); + } catch (error) { + core.setFailed(`Error posting comment: ${error.message}`); + } +} diff --git a/.github/scripts/bot-office-hours.sh b/.github/scripts/bot-office-hours.sh index 22ae05186..b536ad377 100755 --- a/.github/scripts/bot-office-hours.sh +++ b/.github/scripts/bot-office-hours.sh @@ -82,6 +82,7 @@ The Python SDK Team EOF ) + echo "$PR_DATA" | jq -r ' sort_by(.author.login) @@ -89,7 +90,7 @@ echo "$PR_DATA" | | .[] | max_by(.createdAt) | ((.author.login | endswith("[bot]")) as $is_bot - | "\(.number) \(.author.login) \($is_bot)" + | "\(.number) \(.author.login) \($is_bot)") ' | while read -r PR_NUM AUTHOR IS_BOT; do if [[ "$IS_BOT" == "true" ]]; then diff --git a/.github/scripts/bot-pr-missing-linked-issue.js b/.github/scripts/bot-pr-missing-linked-issue.js index 1c8430636..51f8fe45d 100644 --- a/.github/scripts/bot-pr-missing-linked-issue.js +++ b/.github/scripts/bot-pr-missing-linked-issue.js @@ -35,7 +35,7 @@ module.exports = async ({ github, context }) => { } const body = prData.body || ""; - const regex = /\bFixes\s*:?\s*(#\d+)(\s*,\s*#\d+)*/i; + const regex = /\b(Fixes|Closes|Resolves)\s*:?\s*(#\d+)(\s*,\s*#\d+)*/i; const comments = await github.rest.issues.listComments({ owner: context.repo.owner, diff --git a/.github/scripts/bot-unassign-on-comment.js b/.github/scripts/bot-unassign-on-comment.js index 0dfb8b197..4349d8361 100644 --- a/.github/scripts/bot-unassign-on-comment.js +++ b/.github/scripts/bot-unassign-on-comment.js @@ -20,16 +20,13 @@ Safeguards: function isValidUnassignContext(issue, comment) { if (!issue?.number || issue.pull_request) return false; if (!comment?.body || !comment?.user?.login) return false; - if (comment.user.type === 'Bot') return false; - if (issue.state !== 'open') return false; + if (comment.user.type === "Bot") return false; + if (issue.state !== "open") return false; return true; } function commentRequestsUnassign(body) { - return ( - typeof body === 'string' && - /(^|\s)\/unassign(\s|$)/i.test(body) - ); + return typeof body === "string" && /(^|\s)\/unassign(\s|$)/i.test(body); } function buildUnassignMarker(username) { @@ -37,7 +34,7 @@ function buildUnassignMarker(username) { } function isCurrentAssignee(issue, username) { - return issue.assignees?.some(a => a.login === username); + return issue.assignees?.some((a) => a.login === username); } module.exports = async ({ github, context }) => { @@ -45,7 +42,7 @@ module.exports = async ({ github, context }) => { const { issue, comment } = context.payload; const { owner, repo } = context.repo; - console.log('[unassign] Payload snapshot:', { + console.log("[unassign] Payload snapshot:", { issueNumber: issue?.number, commenter: comment?.user?.login, commenterType: comment?.user?.type, @@ -54,59 +51,56 @@ module.exports = async ({ github, context }) => { // Basic validation if (!isValidUnassignContext(issue, comment)) { - console.log('[unassign] Exit: invalid unassign context', { + console.log("[unassign] Exit: invalid unassign context", { issueNumber: issue?.number, commenter: comment?.user?.login, issueState: issue?.state, - isBot: comment?.user?.type === 'Bot', + isBot: comment?.user?.type === "Bot", }); return; } if (!commentRequestsUnassign(comment.body)) { - console.log('[unassign] Exit: comment does not request unassign'); + console.log("[unassign] Exit: comment does not request unassign"); return; } const username = comment.user.login; const issueNumber = issue.number; - console.log('[unassign] Unassign command detected by', username); + console.log("[unassign] Unassign command detected by", username); // Check if user is currently assigned if (!isCurrentAssignee(issue, username)) { - console.log('[unassign] Exit: commenter is not an assignee', { + console.log("[unassign] Exit: commenter is not an assignee", { requester: username, - currentAssignees: issue.assignees?.map(a => a.login), + currentAssignees: issue.assignees?.map((a) => a.login), }); return; } // Fetch comments to check for prior unassign - const comments = await github.paginate( - github.rest.issues.listComments, - { - owner, - repo, - issue_number: issueNumber, - per_page: 100, - } - ); + const comments = await github.paginate(github.rest.issues.listComments, { + owner, + repo, + issue_number: issueNumber, + per_page: 100, + }); const marker = buildUnassignMarker(username); - const alreadyUnassigned = comments.some(c => - typeof c.body === 'string' && c.body.includes(marker) + const alreadyUnassigned = comments.some( + (c) => typeof c.body === "string" && c.body.includes(marker), ); if (alreadyUnassigned) { - console.log('[unassign] Exit: unassign already requested previously', { + console.log("[unassign] Exit: unassign already requested previously", { requester: username, issueNumber, }); return; } - console.log('[unassign] Proceeding to unassign user', { + console.log("[unassign] Proceeding to unassign user", { requester: username, issueNumber, }); @@ -119,21 +113,24 @@ module.exports = async ({ github, context }) => { assignees: [username], }); - // Add hidden marker to track unassign + const confirmationMessage = + `✅ **@${username}, you’ve been unassigned from this issue.**\n\n` + + `Thanks for letting us know! If you’d like to work on something else, ` + + `feel free to browse our open issues.`; + await github.rest.issues.createComment({ owner, repo, issue_number: issueNumber, - body: marker, + body: `${marker}\n\n${confirmationMessage}`, }); - console.log('[unassign] Unassign completed successfully', { + console.log("[unassign] Unassign completed successfully", { requester: username, issueNumber, - }); - + }); } catch (error) { - console.error('[unassign] Error:', { + console.error("[unassign] Error:", { message: error.message, status: error.status, issueNumber: context.payload?.issue?.number, diff --git a/.github/scripts/bot-verified-commits.js b/.github/scripts/bot-verified-commits.js new file mode 100644 index 000000000..eccc8ce9e --- /dev/null +++ b/.github/scripts/bot-verified-commits.js @@ -0,0 +1,320 @@ +// .github/scripts/bot-verified-commits.js +// Verifies that all commits in a pull request are GPG-signed. +// Posts a one-time VerificationBot comment if unverified commits are found. + +// Sanitizes string input to prevent injection (uses Unicode property escape per Biome lint) +function sanitizeString(input) { + if (typeof input !== 'string') return ''; + return input.replace(/\p{Cc}/gu, '').trim(); +} + +// Escapes markdown special characters and breaks @mentions to prevent injection +// Required per CodeRabbit review: commit messages are user-controlled and can cause +// markdown injection or unwanted @mentions that spam teams +function sanitizeMarkdown(input) { + return sanitizeString(input) + .replace(/[\x60*_~[\]()]/g, '\\$&') // Escape markdown special chars (backtick via hex) + .replace(/@/g, '@\u200b'); // Break @mentions with zero-width space +} + + +// Validates URL format and returns fallback if invalid +function sanitizeUrl(input, fallback) { + const cleaned = sanitizeString(input); + return /^https?:\/\/[^\s]+$/i.test(cleaned) ? cleaned : fallback; +} + +// Configuration via environment variables (sanitized) +const CONFIG = { + BOT_NAME: sanitizeString(process.env.BOT_NAME) || 'VerificationBot', + BOT_LOGIN: sanitizeString(process.env.BOT_LOGIN) || 'github-actions', + COMMENT_MARKER: sanitizeString(process.env.COMMENT_MARKER) || '', + SIGNING_GUIDE_URL: sanitizeUrl( + process.env.SIGNING_GUIDE_URL, + 'https://github.com/hiero-ledger/hiero-sdk-python/blob/main/docs/sdk_developers/signing.md' + ), + README_URL: sanitizeUrl( + process.env.README_URL, + 'https://github.com/hiero-ledger/hiero-sdk-python/blob/main/README.md' + ), + DISCORD_URL: sanitizeUrl( + process.env.DISCORD_URL, + 'https://github.com/hiero-ledger/hiero-sdk-python/blob/main/docs/discord.md' + ), + TEAM_NAME: sanitizeString(process.env.TEAM_NAME) || 'Hiero Python SDK Team', + MAX_PAGES: (() => { + const parsed = Number.parseInt(process.env.MAX_PAGES ?? '5', 10); + return Number.isInteger(parsed) && parsed > 0 ? parsed : 5; + })(), + DRY_RUN: process.env.DRY_RUN === 'true', +}; + +// Validates PR number is a positive integer +function validatePRNumber(prNumber) { + const num = parseInt(prNumber, 10); + return Number.isInteger(num) && num > 0 ? num : null; +} + +// Fetches commits with bounded pagination and counts unverified ones +async function getCommitVerificationStatus(github, owner, repo, prNumber) { + console.log(`[${CONFIG.BOT_NAME}] Fetching commits for PR #${prNumber}...`); + + const commits = []; + let page = 0; + let truncated = false; + + try { + for await (const response of github.paginate.iterator( + github.rest.pulls.listCommits, + { owner, repo, pull_number: prNumber, per_page: 100 } + )) { + commits.push(...response.data); + if (++page >= CONFIG.MAX_PAGES) { + truncated = true; + console.warn(`[${CONFIG.BOT_NAME}] Reached MAX_PAGES (${CONFIG.MAX_PAGES}) limit`); + break; + } + } + } catch (error) { + console.error(`[${CONFIG.BOT_NAME}] Failed to list commits`, { + owner, + repo, + prNumber, + status: error?.status, + message: error?.message, + }); + throw error; + } + + const unverifiedCommits = commits.filter( + commit => commit.commit?.verification?.verified !== true + ); + + console.log(`[${CONFIG.BOT_NAME}] Found ${commits.length} total, ${unverifiedCommits.length} unverified`); + + // Fail-closed: if truncated and no unverified found, treat as potentially unverified + const unverifiedCount = truncated && unverifiedCommits.length === 0 + ? 1 + : unverifiedCommits.length; + + return { + total: commits.length, + unverified: unverifiedCount, + unverifiedCommits, + truncated, + }; +} + +// Checks if bot already posted a verification comment (marker-based detection) +// Uses bounded pagination and early return for efficiency +async function hasExistingBotComment(github, owner, repo, prNumber) { + console.log(`[${CONFIG.BOT_NAME}] Checking for existing bot comments...`); + + // Support both with and without [bot] suffix for GitHub Actions bot account + const botLogins = new Set([ + CONFIG.BOT_LOGIN, + `${CONFIG.BOT_LOGIN}[bot]`, + 'github-actions[bot]', + ]); + + let page = 0; + try { + for await (const response of github.paginate.iterator( + github.rest.issues.listComments, + { owner, repo, issue_number: prNumber, per_page: 100 } + )) { + // Early return if marker found + if (response.data.some(comment => + botLogins.has(comment.user?.login) && + typeof comment.body === 'string' && + comment.body.includes(CONFIG.COMMENT_MARKER) + )) { + console.log(`[${CONFIG.BOT_NAME}] Existing bot comment: true`); + return true; + } + if (++page >= CONFIG.MAX_PAGES) { + // Fail-safe: assume comment exists to prevent duplicates + console.warn( + `[${CONFIG.BOT_NAME}] Reached MAX_PAGES (${CONFIG.MAX_PAGES}) limit; assuming existing comment to avoid duplicates` + ); + return true; + } + } + } catch (error) { + console.error(`[${CONFIG.BOT_NAME}] Failed to list comments`, { + owner, + repo, + prNumber, + status: error?.status, + message: error?.message, + }); + throw error; + } + + console.log(`[${CONFIG.BOT_NAME}] Existing bot comment: false`); + return false; +} + +// Builds the verification failure comment with unverified commit details +function buildVerificationComment( + commitsUrl, + unverifiedCommits = [], + unverifiedCount = unverifiedCommits.length, + truncated = false +) { + // Build list of unverified commits (show first 10 max) + const maxDisplay = 10; + const commitList = unverifiedCommits.length + ? unverifiedCommits.slice(0, maxDisplay).map(c => { + const sha = c.sha?.substring(0, 7) || 'unknown'; + const msg = sanitizeMarkdown(c.commit?.message?.split('\n')[0] || 'No message').substring(0, 50); + return `- \`${sha}\` ${msg}`; + }).join('\n') + : (truncated ? '- Unable to enumerate commits due to pagination limit.' : ''); + + const moreCommits = unverifiedCommits.length > maxDisplay + ? `\n- ...and ${unverifiedCommits.length - maxDisplay} more` + : ''; + + const countText = truncated ? `at least ${unverifiedCount}` : `${unverifiedCount}`; + const truncationNote = truncated + ? '\n\n> ⚠️ Verification scanned only the first pages of commits due to pagination limits. Please review the commits tab.' + : ''; + + return `${CONFIG.COMMENT_MARKER} +Hi, this is ${CONFIG.BOT_NAME}. +Your pull request cannot be merged as it has **${countText} unverified commit(s)**: + +${commitList}${moreCommits}${truncationNote} + +View your commit verification status: [Commits Tab](${sanitizeString(commitsUrl)}). + +To achieve verified status, please read: +- [Signing guide](${CONFIG.SIGNING_GUIDE_URL}) +- [README](${CONFIG.README_URL}) +- [Discord](${CONFIG.DISCORD_URL}) + +Remember, you require a GPG key and each commit must be signed with: +\`git commit -S -s -m "Your message here"\` + +Thank you for contributing! + +From the ${CONFIG.TEAM_NAME}`; +} + +// Posts verification failure comment on the PR with error handling +async function postVerificationComment( + github, + owner, + repo, + prNumber, + commitsUrl, + unverifiedCommits, + unverifiedCount, + truncated +) { + // Skip posting in dry-run mode + if (CONFIG.DRY_RUN) { + console.log(`[${CONFIG.BOT_NAME}] DRY_RUN enabled; skipping comment.`); + return true; + } + + console.log(`[${CONFIG.BOT_NAME}] Posting verification failure comment...`); + + try { + + await github.rest.issues.createComment({ + owner, + repo, + issue_number: prNumber, + body: buildVerificationComment(commitsUrl, unverifiedCommits, unverifiedCount, truncated), + }); + console.log(`[${CONFIG.BOT_NAME}] Comment posted on PR #${prNumber}`); + return true; + } catch (error) { + console.error(`[${CONFIG.BOT_NAME}] Failed to post comment`, { + owner, + repo, + prNumber, + status: error?.status, + message: error?.message, + }); + return false; + } +} + +// Main workflow handler with full validation and error handling +async function main({ github, context }) { + const owner = sanitizeString(context.repo?.owner); + const repo = sanitizeString(context.repo?.repo); + // Support PR_NUMBER env var for workflow_dispatch, fallback to context payload + const prNumber = validatePRNumber( + process.env.PR_NUMBER || context.payload?.pull_request?.number + ); + const repoPattern = /^[A-Za-z0-9_.-]+$/; + + // Validate repo context + if (!repoPattern.test(owner) || !repoPattern.test(repo)) { + console.error(`[${CONFIG.BOT_NAME}] Invalid repo context`, { owner, repo }); + return { success: false, unverifiedCount: 0 }; + } + + console.log(`[${CONFIG.BOT_NAME}] Starting verification for ${owner}/${repo} PR #${prNumber}`); + + if (!prNumber) { + console.log(`[${CONFIG.BOT_NAME}] Invalid PR number`); + return { success: false, unverifiedCount: 0 }; + } + + try { + // Get commit verification status + const { total, unverified, unverifiedCommits, truncated } = + await getCommitVerificationStatus(github, owner, repo, prNumber); + + // All commits verified - success + if (unverified === 0) { + console.log(`[${CONFIG.BOT_NAME}] ✅ All ${total} commits are verified`); + return { success: true, unverifiedCount: 0 }; + } + + // Some commits unverified + console.log(`[${CONFIG.BOT_NAME}] ❌ Found ${unverified} unverified commits`); + + // Check for existing comment to avoid duplicates + const existingComment = await hasExistingBotComment(github, owner, repo, prNumber); + + if (existingComment) { + console.log(`[${CONFIG.BOT_NAME}] Bot already commented. Skipping duplicate.`); + } else { + const commitsUrl = `https://github.com/${owner}/${repo}/pull/${prNumber}/commits`; + await postVerificationComment( + github, + owner, + repo, + prNumber, + commitsUrl, + unverifiedCommits, + unverified, + truncated + ); + } + + return { success: false, unverifiedCount: unverified }; + } catch (error) { + console.error(`[${CONFIG.BOT_NAME}] Verification failed`, { + owner, + repo, + prNumber, + message: error?.message, + status: error?.status, + }); + return { success: false, unverifiedCount: 0 }; + } +} + +// Exports +module.exports = main; +module.exports.getCommitVerificationStatus = getCommitVerificationStatus; +module.exports.hasExistingBotComment = hasExistingBotComment; +module.exports.postVerificationComment = postVerificationComment; +module.exports.CONFIG = CONFIG; diff --git a/.github/scripts/bot-workflows.js b/.github/scripts/bot-workflows.js new file mode 100644 index 000000000..7f82d98ae --- /dev/null +++ b/.github/scripts/bot-workflows.js @@ -0,0 +1,344 @@ +#!/usr/bin/env node + +/** + * Workflow Failure Notifier - Looks up PR and posts failure notification + * DRY_RUN controls behaviour: + * DRY_RUN = 1 -> simulate only (no changes, just logs) + * DRY_RUN = 0 -> real actions (post PR comments) + */ + +const { spawnSync } = require('child_process'); +const process = require('process'); + +// Configuration constants +const MARKER = ''; +const MAX_PAGES = 10; // Safety bound for comment pagination + +// Documentation links (edit these when URLs change) +const DOC_SIGNING = 'https://github.com/hiero-ledger/hiero-sdk-python/blob/main/docs/sdk_developers/signing.md'; +const DOC_CHANGELOG = 'https://github.com/hiero-ledger/hiero-sdk-python/blob/main/docs/sdk_developers/changelog_entry.md'; +const DOC_MERGE_CONFLICTS = 'https://github.com/hiero-ledger/hiero-sdk-python/blob/main/docs/sdk_developers/merge_conflicts.md'; +const DOC_REBASING = 'https://github.com/hiero-ledger/hiero-sdk-python/blob/main/docs/sdk_developers/rebasing.md'; +const DOC_TESTING = 'https://github.com/hiero-ledger/hiero-sdk-python/blob/main/docs/sdk_developers/testing.md'; +const DOC_DISCORD = 'https://github.com/hiero-ledger/hiero-sdk-python/blob/main/docs/discord.md'; +const COMMUNITY_CALLS = 'https://zoom-lfx.platform.linuxfoundation.org/meetings/hiero?view=week'; + +/** + * Execute gh CLI command safely + * @param {string[]} args - Arguments array for gh command + * @param {boolean} silent - Whether to suppress output + * @returns {string} - Command output + */ +function ghCommand(args = [], silent = false) { + try { + const result = spawnSync('gh', args, { + encoding: 'utf8', + stdio: silent ? 'pipe' : ['pipe', 'pipe', 'pipe'], + shell: false + }); + + if (result.error) { + throw result.error; + } + + if (result.status !== 0 && !silent) { + throw new Error(`Command failed with exit code ${result.status}`); + } + + return (result.stdout || '').trim(); + } catch (error) { + if (!silent) { + throw error; + } + return ''; + } +} + +/** + * Check if gh CLI exists + * @returns {boolean} + */ +function ghExists() { + try { + if (process.platform === 'win32') { + const result = spawnSync('where', ['gh'], { + encoding: 'utf8', + stdio: 'pipe', + shell: false + }); + return result.status === 0; + } else { + const result = spawnSync('which', ['gh'], { + encoding: 'utf8', + stdio: 'pipe', + shell: false + }); + return result.status === 0; + } + } catch { + return false; + } +} + +/** + * Normalise DRY_RUN input ("true"/"false" -> 1/0, case-insensitive) + * @param {string|number} value - Input value + * @returns {number} - Normalised value (0 or 1) + */ +function normaliseDryRun(value) { + const strValue = String(value).toLowerCase(); + + if (strValue === '1' || strValue === '0') { + return parseInt(strValue); + } + + if (strValue === 'true') { + return 1; + } + + if (strValue === 'false') { + return 0; + } + + console.error(`ERROR: DRY_RUN must be one of: true, false, 1, 0 (got: ${value})`); + process.exit(1); +} + +// Validate required environment variables +let FAILED_WORKFLOW_NAME = process.env.FAILED_WORKFLOW_NAME || ''; +let FAILED_RUN_ID = process.env.FAILED_RUN_ID || ''; +let GH_TOKEN = process.env.GH_TOKEN || process.env.GITHUB_TOKEN || ''; +const REPO = process.env.REPO || process.env.GITHUB_REPOSITORY || ''; +let DRY_RUN = normaliseDryRun(process.env.DRY_RUN || '1'); +let PR_NUMBER = process.env.PR_NUMBER || ''; + +// Validate workflow name contains only safe characters +if (FAILED_WORKFLOW_NAME && !/^[\w\s\-\.]+$/.test(FAILED_WORKFLOW_NAME)) { + console.error(`ERROR: FAILED_WORKFLOW_NAME contains invalid characters: ${FAILED_WORKFLOW_NAME}`); + process.exit(1); +} + +// Set GH_TOKEN environment variable for gh CLI +if (GH_TOKEN) { + process.env.GH_TOKEN = GH_TOKEN; +} + +// Validate required variables or set defaults in dry-run mode +if (!FAILED_WORKFLOW_NAME) { + if (DRY_RUN === 1) { + console.log('WARN: FAILED_WORKFLOW_NAME not set, using default for dry-run.'); + FAILED_WORKFLOW_NAME = 'DRY_RUN_TEST'; + } else { + console.error('ERROR: FAILED_WORKFLOW_NAME environment variable not set.'); + process.exit(1); + } +} + +if (!FAILED_RUN_ID) { + if (DRY_RUN === 1) { + console.log('WARN: FAILED_RUN_ID not set, using default for dry-run.'); + FAILED_RUN_ID = '12345'; + } else { + console.error('ERROR: FAILED_RUN_ID environment variable not set.'); + process.exit(1); + } +} + +// Validate FAILED_RUN_ID is numeric (always check when provided) +if (!/^\d+$/.test(FAILED_RUN_ID)) { + console.error(`ERROR: FAILED_RUN_ID must be a numeric integer (got: '${FAILED_RUN_ID}')`); + process.exit(1); +} + +// Validate PR_NUMBER if provided +if (PR_NUMBER && PR_NUMBER !== 'null' && !/^\d+$/.test(PR_NUMBER)) { + console.error(`ERROR: PR_NUMBER must be a numeric integer (got: '${PR_NUMBER}')`); + process.exit(1); +} + +if (!GH_TOKEN) { + if (DRY_RUN === 1) { + console.log('WARN: GH_TOKEN not set. Some dry-run operations may fail.'); + } else { + console.error('ERROR: GH_TOKEN (or GITHUB_TOKEN) environment variable not set.'); + process.exit(1); + } +} + +if (!REPO) { + console.error('ERROR: REPO environment variable not set.'); + process.exit(1); +} + +console.log('------------------------------------------------------------'); +console.log(' Workflow Failure Notifier'); +console.log(` Repo: ${REPO}`); +console.log(` Failed Workflow: ${FAILED_WORKFLOW_NAME}`); +console.log(` Failed Run ID: ${FAILED_RUN_ID}`); +console.log(` DRY_RUN: ${DRY_RUN}`); +console.log('------------------------------------------------------------'); + +// Quick gh availability/auth checks +if (!ghExists()) { + console.error('ERROR: gh CLI not found. Install it and ensure it\'s on PATH.'); + process.exit(1); +} + +try { + ghCommand(['auth', 'status'], true); +} catch (error) { + if (DRY_RUN === 0) { + console.error('ERROR: gh authentication required for non-dry-run mode.'); + process.exit(1); + } else { + console.log(`WARN: gh auth status failed — some dry-run operations may not work. (${error.message})`); + } +} + +// PR lookup logic - use PR_NUMBER from workflow_run payload if available, otherwise fallback to branch-based approach +console.log('Looking up PR for failed workflow run...'); + +// Use PR_NUMBER from workflow_run payload if provided (optimized path) +if (PR_NUMBER && PR_NUMBER !== 'null') { + console.log(`Using PR number from workflow_run payload: ${PR_NUMBER}`); +} else { + console.log('PR_NUMBER not provided, falling back to branch-based lookup...'); + + let HEAD_BRANCH = ''; + try { + HEAD_BRANCH = ghCommand(['run', 'view', FAILED_RUN_ID, '--repo', REPO, '--json', 'headBranch', '--jq', '.headBranch'], true); + } catch { + HEAD_BRANCH = ''; + } + + if (!HEAD_BRANCH) { + if (DRY_RUN === 1) { + console.log('WARN: Could not retrieve head branch in dry-run mode (run ID may be invalid). Exiting gracefully.'); + process.exit(0); + } else { + console.error(`ERROR: Could not retrieve head branch from workflow run ${FAILED_RUN_ID}`); + process.exit(1); + } + } + + console.log(`Found head branch: ${HEAD_BRANCH}`); + + // Validate branch name format + if (HEAD_BRANCH.startsWith('-') || !/^[\w.\/-]+$/.test(HEAD_BRANCH)) { + console.error(`ERROR: HEAD_BRANCH contains invalid characters: ${HEAD_BRANCH}`); + process.exit(1); + } + + // Find PR number for this branch (only open PRs) + try { + PR_NUMBER = ghCommand(['pr', 'list', '--repo', REPO, '--head', HEAD_BRANCH, '--json', 'number', '--jq', '.[0].number'], true); + } catch { + PR_NUMBER = ''; + } + + if (!PR_NUMBER) { + if (DRY_RUN === 1) { + console.log(`No PR associated with workflow run ${FAILED_RUN_ID}, but DRY_RUN=1 - exiting successfully.`); + process.exit(0); + } else { + console.log(`INFO: No open PR found for branch '${HEAD_BRANCH}' (workflow run ${FAILED_RUN_ID}). Nothing to notify.`); + process.exit(0); + } + } +} + +console.log(`Found PR #${PR_NUMBER}`); + +// Build notification message with failure details and documentation links +const COMMENT = `${MARKER} +Hi, this is WorkflowBot. +Your pull request cannot be merged as it is not passing all our workflow checks. +Please click on each check to review the logs and resolve issues so all checks pass. +To help you: +- [DCO signing guide](${DOC_SIGNING}) +- [Changelog guide](${DOC_CHANGELOG}) +- [Merge conflicts guide](${DOC_MERGE_CONFLICTS}) +- [Rebase guide](${DOC_REBASING}) +- [Testing guide](${DOC_TESTING}) +- [Discord](${DOC_DISCORD}) +- [Community Calls](${COMMUNITY_CALLS}) +Thank you for contributing! +From the Hiero Python SDK Team`; + +// Check for duplicate comments using the correct endpoint for issue comments +let PAGE = 1; +let DUPLICATE_EXISTS = false; + +while (PAGE <= MAX_PAGES) { + let COMMENTS_PAGE = ''; + try { + COMMENTS_PAGE = ghCommand(['api', '--header', 'Accept: application/vnd.github.v3+json', `/repos/${REPO}/issues/${PR_NUMBER}/comments?per_page=100&page=${PAGE}`], true); + } catch (error) { + console.log(`WARN: Failed to fetch comments page ${PAGE}: ${error.message}`); + COMMENTS_PAGE = '[]'; + } + + // Parse JSON + let comments = []; + try { + comments = JSON.parse(COMMENTS_PAGE); + } catch (error) { + console.log(`WARN: Failed to parse comments JSON on page ${PAGE}: ${error.message}`); + comments = []; + } + + // Check if the page is empty (no more comments) + if (comments.length === 0) { + break; + } + + // Check this page for the marker + const foundDuplicate = comments.some(comment => { + return comment.body && comment.body.includes(MARKER); + }); + + if (foundDuplicate) { + DUPLICATE_EXISTS = true; + console.log('Found existing duplicate comment. Skipping.'); + break; + } + + PAGE++; +} + +if (!DUPLICATE_EXISTS) { + console.log('No existing duplicate comment found.'); +} + +// Dry-run mode or actual posting +if (DRY_RUN === 1) { + console.log(`[DRY RUN] Would post comment to PR #${PR_NUMBER}:`); + console.log('----------------------------------------'); + console.log(COMMENT); + console.log('----------------------------------------'); + if (DUPLICATE_EXISTS) { + console.log('[DRY RUN] Would skip posting due to duplicate comment'); + } else { + console.log('[DRY RUN] Would post new comment (no duplicates found)'); + } +} else { + if (DUPLICATE_EXISTS) { + console.log('Comment already exists, skipping.'); + } else { + console.log(`Posting new comment to PR #${PR_NUMBER}...`); + + try { + ghCommand(['pr', 'comment', PR_NUMBER, '--repo', REPO, '--body', COMMENT]); + console.log(`Successfully posted comment to PR #${PR_NUMBER}`); + } catch (error) { + console.error(`ERROR: Failed to post comment to PR #${PR_NUMBER}`); + console.error(error.message); + process.exit(1); + } + } +} + +console.log('------------------------------------------------------------'); +console.log(' Workflow Failure Notifier Complete'); +console.log(` DRY_RUN: ${DRY_RUN}`); +console.log('------------------------------------------------------------'); diff --git a/.github/scripts/bot-working-on-comment.js b/.github/scripts/bot-working-on-comment.js new file mode 100644 index 000000000..6341c7c78 --- /dev/null +++ b/.github/scripts/bot-working-on-comment.js @@ -0,0 +1,88 @@ +/* +------------------------------------------------------------------------------ +Working On It Bot + +Executes When: + - Triggered by GitHub Actions on 'issue_comment' (created) + +Goal: + Allows an assignee or PR author to signal they are active by commenting "/working". + This acts as a signal to other inactivity bots to skip their next cleanup cycle. + +Safeguards: + - Works on both Issues and PRs + - Only the Assignee (for issues) or Author (for PRs) can trigger it + - Reacts with an emoji to confirm receipt +------------------------------------------------------------------------------ +*/ + +function isValidContext(comment) { + if (!comment?.body || !comment?.user?.login) return false; + if (comment.user.type === "Bot") return false; + return true; +} + +function commentRequestsWorking(body) { + return typeof body === "string" && /(^|\s)\/working(\s|$)/i.test(body); +} + +function isAuthorizedUser(issue, username) { + // If it's a PR, the author can trigger it + if (issue.pull_request && issue.user?.login === username) { + return true; + } + // If it's an issue (or PR), any assignee can trigger it + return issue.assignees?.some((a) => a.login === username); +} + +module.exports = async ({ github, context }) => { + const dryRun = (process.env.DRY_RUN || "false").toLowerCase() === "true"; + try { + const { issue, comment } = context.payload; + const { owner, repo } = context.repo; + + // 1. Basic Validation + if (!issue) { + console.log("[working] No issue in payload. Ignoring."); + return; + } + + if (!isValidContext(comment)) return; + + // 2. Check for Command + if (!commentRequestsWorking(comment.body)) return; + + const username = comment.user.login; + console.log( + `[working] Command detected by ${username} on #${issue.number}`, + ); + + // 3. Authorization Check + if (!isAuthorizedUser(issue, username)) { + console.log( + `[working] User ${username} is not assigned or the author. Ignoring.`, + ); + return; + } + + // 4. Acknowledge with a reaction + // We don't need to save state here; the other bots will query the comment timestamp directly. + if (dryRun) { + console.log(`[working] DRY-RUN: Would have reacted to comment ${comment.id} with 'eyes'.`); + return; + } + await github.rest.reactions.createForIssueComment({ + owner, + repo, + comment_id: comment.id, + content: "eyes", + }); + + console.log( + `[working] Acknowledged command from ${username} with reaction.`, + ); + } catch (error) { + console.error("[working] Error:", error.message); + // Don't throw, just log, so we don't fail the workflow pipeline for minor API hiccups + } +}; diff --git a/.github/scripts/linked_issue_enforce.js b/.github/scripts/linked_issue_enforce.js index 98d4cd55c..443dca627 100644 --- a/.github/scripts/linked_issue_enforce.js +++ b/.github/scripts/linked_issue_enforce.js @@ -8,6 +8,9 @@ const requireAuthorAssigned = (process.env.REQUIRE_AUTHOR_ASSIGNED || 'true').to const getDaysOpen = (pr) => Math.floor((Date.now() - new Date(pr.created_at)) / (24 * 60 * 60 * 1000)); +// Check if the PR author is a bot +const isBotAuthor = (pr) => pr.user?.type === 'Bot'; + // Check if the PR author is assigned to the issue const isAuthorAssigned = (issue, login) => { if (!issue || issue.state?.toUpperCase() !== 'OPEN') return false; @@ -104,6 +107,17 @@ module.exports = async ({ github, context }) => { console.log(`Evaluating ${prs.length} open PRs\n`); for (const pr of prs) { + + const authorLogin = pr.user?.login; + if (!authorLogin) { + console.warn(`PR #${pr.number} missing author login. Skipping for safety.`); + continue; + } + if (isBotAuthor(pr)) { + console.log(`PR #${pr.number} authored by bot (${authorLogin}). Skipping.`); + continue; + } + const days = getDaysOpen(pr); if (days < daysBeforeClose) { console.log(`PR #${pr.number} link: ${pr.html_url} is only ${days} days old. Skipping.`); diff --git a/.github/scripts/pr_inactivity_reminder.js b/.github/scripts/pr_inactivity_reminder.js index 27411e2f0..a98a34464 100644 --- a/.github/scripts/pr_inactivity_reminder.js +++ b/.github/scripts/pr_inactivity_reminder.js @@ -1,7 +1,8 @@ // A script to remind PR authors of inactivity by posting a comment. // DRY_RUN env var: any case-insensitive 'true' value will enable dry-run -const dryRun = (process.env.DRY_RUN || 'false').toString().toLowerCase() === 'true'; +const dryRun = + (process.env.DRY_RUN || "false").toString().toLowerCase() === "true"; // Helper to resolve the head repo of a PR function resolveHeadRepo(pr, defaultOwner, defaultRepo) { @@ -13,7 +14,11 @@ function resolveHeadRepo(pr, defaultOwner, defaultRepo) { // Helper to get the last commit date of a PR async function getLastCommitDate(github, pr, owner, repo) { - const { owner: headRepoOwner, repo: headRepoName } = resolveHeadRepo(pr, owner, repo); + const { owner: headRepoOwner, repo: headRepoName } = resolveHeadRepo( + pr, + owner, + repo, + ); try { const commitRes = await github.rest.repos.getCommit({ owner: headRepoOwner, @@ -21,14 +26,18 @@ async function getLastCommitDate(github, pr, owner, repo) { ref: pr.head.sha, }); const commit = commitRes.data?.commit ?? null; - return new Date(commit?.author?.date || commit?.committer?.date || pr.created_at); + return new Date( + commit?.author?.date || commit?.committer?.date || pr.created_at, + ); } catch (getCommitErr) { - console.log(`Failed to fetch head commit ${pr.head.sha} for PR #${pr.number}:`, getCommitErr.message || getCommitErr); + console.log( + `Failed to fetch head commit ${pr.head.sha} for PR #${pr.number}:`, + getCommitErr.message || getCommitErr, + ); return null; // Signal fallback needed } } - // Look for an existing bot comment using our unique marker. async function hasExistingBotComment(github, pr, owner, repo, marker) { try { @@ -38,21 +47,49 @@ async function hasExistingBotComment(github, pr, owner, repo, marker) { issue_number: pr.number, per_page: 100, }); - return comments.find(c => c.body && c.body.includes(marker)) || false; + return comments.find((c) => c.body && c.body.includes(marker)) || false; } catch (err) { - console.log(`Failed to list comments for PR #${pr.number}:`, err.message || err); + console.log( + `Failed to list comments for PR #${pr.number}:`, + err.message || err, + ); return null; // Prevent duplicate comment if we cannot check } } // Helper to post an inactivity comment -async function postInactivityComment(github, pr, owner, repo, marker, inactivityDays, discordLink, office_hours_calendar) { +async function postInactivityComment( + github, + pr, + owner, + repo, + marker, + inactivityDays, + discordLink, + office_hours_calendar, +) { const comment = `${marker} -Hi @${pr.user.login},\n\nThis pull request has had no commit activity for ${inactivityDays} days. Are you still working on the issue? please push a commit to keep the PR active or it will be closed due to inactivity. -If you’re no longer able to work on this issue, please comment `/unassign` on the linked **issue** (not this pull request) to release it. -Reach out on discord or join our office hours if you need assistance.\n\n- ${discordLink}\n- ${office_hours_calendar} \n\nFrom the Python SDK Team`; +Hi @${pr.user.login}, + +This pull request has had no commit activity for ${inactivityDays} days. Are you still working on it? +To keep the PR active, you can: + +- Push a new commit. +- Comment \`/working\` on the linked **issue** (not this PR). + +If you're no longer working on this, please comment \`/unassign\` on the linked issue to release it for others. Otherwise, this PR may be closed due to inactivity. + +Reach out on discord or join our office hours if you need assistance. + +- ${discordLink} +- ${office_hours_calendar} + +From the Python SDK Team`; + if (dryRun) { - console.log(`DRY-RUN: Would comment on PR #${pr.number} (${pr.html_url}) with body:\n---\n${comment}\n---`); + console.log( + `DRY-RUN: Would comment on PR #${pr.number} (${pr.html_url}) with body:\n---\n${comment}\n---`, + ); return true; } @@ -72,18 +109,20 @@ Reach out on discord or join our office hours if you need assistance.\n\n- ${dis } // Main module function -module.exports = async ({github, context}) => { +module.exports = async ({ github, context }) => { const inactivityThresholdDays = 10; // days of inactivity before commenting - const cutoff = new Date(Date.now() - inactivityThresholdDays * 24 * 60 * 60 * 1000); + const cutoff = new Date( + Date.now() - inactivityThresholdDays * 24 * 60 * 60 * 1000, + ); const owner = context.repo.owner; const repo = context.repo.repo; const discordLink = `[Discord](https://github.com/hiero-ledger/hiero-sdk-python/blob/main/docs/discord.md)`; - const office_hours_calendar =`[Office Hours](https://zoom-lfx.platform.linuxfoundation.org/meetings/hiero?view=week)`; + const office_hours_calendar = `[Office Hours](https://zoom-lfx.platform.linuxfoundation.org/meetings/hiero?view=week)`; // Unique marker so we can find the bot's own comment later. - const marker = ''; + const marker = ""; if (dryRun) { - console.log('Running in DRY-RUN mode: no comments will be posted.'); + console.log("Running in DRY-RUN mode: no comments will be posted."); } let commentedCount = 0; @@ -92,33 +131,68 @@ module.exports = async ({github, context}) => { const prs = await github.paginate(github.rest.pulls.list, { owner, repo, - state: 'open', + state: "open", per_page: 100, }); for (const pr of prs) { // 1. Check inactivity const lastCommitDate = await getLastCommitDate(github, pr, owner, repo); - const inactivityDays = Math.floor((Date.now() - (lastCommitDate ? lastCommitDate.getTime() : new Date(pr.created_at).getTime())) / (1000 * 60 * 60 * 24)); + const inactivityDays = Math.floor( + (Date.now() - + (lastCommitDate + ? lastCommitDate.getTime() + : new Date(pr.created_at).getTime())) / + (1000 * 60 * 60 * 24), + ); if (lastCommitDate > cutoff) { skippedCount++; - console.log(`PR #${pr.number} has recent commit on ${lastCommitDate.toISOString()} - skipping`); + + console.log( + `PR #${pr.number} has recent commit on ${lastCommitDate.toISOString()} - skipping`, + ); + continue; } // 2. Check for existing comment - const existingBotComment = await hasExistingBotComment(github, pr, owner, repo, marker); + const existingBotComment = await hasExistingBotComment( + github, + pr, + owner, + repo, + marker, + ); + if (existingBotComment) { skippedCount++; - const idInfo = existingBotComment && existingBotComment.id ? existingBotComment.id : '(unknown)'; - console.log(`PR #${pr.number} already has an inactivity comment (id: ${idInfo}) - skipping`); + + const idInfo = + existingBotComment && existingBotComment.id + ? existingBotComment.id + : "(unknown)"; + + console.log( + `PR #${pr.number} already has an inactivity comment (id: ${idInfo}) - skipping`, + ); + continue; } // 3. Post inactivity comment - const commented = await postInactivityComment(github, pr, owner, repo, marker, inactivityDays, discordLink, office_hours_calendar); + const commented = await postInactivityComment( + github, + pr, + owner, + repo, + marker, + inactivityDays, + discordLink, + office_hours_calendar, + ); + if (commented) commentedCount++; } diff --git a/.github/workflows/bot-advanced-check.yml b/.github/workflows/bot-advanced-check.yml index 517ae8ab3..f998b71c9 100644 --- a/.github/workflows/bot-advanced-check.yml +++ b/.github/workflows/bot-advanced-check.yml @@ -1,9 +1,7 @@ name: PythonBot - Advanced Requirement Check - on: issues: types: [assigned, labeled] - workflow_dispatch: inputs: dry_run: @@ -15,56 +13,54 @@ on: description: "GitHub username to dry-run qualification check" required: true type: string - permissions: - contents: read # required for actions/checkout + contents: read issues: write - concurrency: group: ${{ github.workflow }}-${{ github.event.issue.number || github.run_id }} cancel-in-progress: true - jobs: ####################################### # Automatic enforcement + manual dry-run ####################################### check-advanced-qualification: - if: > - ( - github.event_name == 'issues' && - contains(github.event.issue.labels.*.name, 'advanced') && - ( - github.event.action == 'assigned' || - ( - github.event.action == 'labeled' && - github.event.label.name == 'advanced' && - github.event.issue.assignees[0] != null - ) - ) - ) || - github.event_name == 'workflow_dispatch' - + # steps are skipped, job completes successfully runs-on: ubuntu-latest - steps: + - name: Filter + id: should_run + run: echo "value=true" >> $GITHUB_OUTPUT + if: | + github.event_name == 'workflow_dispatch' || + ( + github.event_name == 'issues' && + contains(github.event.issue.labels.*.name, 'advanced') && + ( + github.event.action == 'assigned' || + (github.event.action == 'labeled' && github.event.label.name == 'advanced' && join(github.event.issue.assignees.*.login, ',') != '') + ) + ) + - name: Log skip reason + if: steps.should_run.outputs.value != 'true' + run: | + echo "::notice::Skipping: event=${{ github.event_name }}, action=${{ github.event.action }}, has_advanced_label=${{ contains(github.event.issue.labels.*.name, 'advanced') }}" - name: Checkout scripts + if: steps.should_run.outputs.value == 'true' uses: actions/checkout@8e8c483db84b4bee98b60c0593521ed34d9990e8 # v6.0.1 with: - sparse-checkout: .github/scripts + sparse-checkout: | + .github/scripts + sparse-checkout-cone-mode: false - name: Verify User Qualification + if: steps.should_run.outputs.value == 'true' env: GH_TOKEN: ${{ secrets.GITHUB_TOKEN }} REPO: ${{ github.repository }} - - # Issue-driven execution TRIGGER_ASSIGNEE: ${{ github.event.assignee.login || '' }} ISSUE_NUMBER: ${{ github.event.issue.number || '' }} - - # Manual execution (workflow_dispatch) DRY_RUN: ${{ github.event_name == 'workflow_dispatch' && inputs.dry_run || 'false' }} DRY_RUN_USER: ${{ github.event_name == 'workflow_dispatch' && inputs.username || '' }} - run: | chmod +x .github/scripts/bot-advanced-check.sh - ./.github/scripts/bot-advanced-check.sh \ No newline at end of file + .github/scripts/bot-advanced-check.sh \ No newline at end of file diff --git a/.github/workflows/bot-assignment-check.yml b/.github/workflows/bot-assignment-check.yml index a4f580d39..06ee15d21 100644 --- a/.github/workflows/bot-assignment-check.yml +++ b/.github/workflows/bot-assignment-check.yml @@ -12,7 +12,7 @@ jobs: runs-on: ubuntu-latest steps: - name: Harden the runner - uses: step-security/harden-runner@20cf305ff2072d973412fa9b1e3a4f227bda3c76 # v2.14.0 + uses: step-security/harden-runner@e3f713f2d8f53843e71c69a996d56f51aa9adfb9 # v2.14.1 with: egress-policy: audit diff --git a/.github/workflows/bot-beginner-assign-on-comment.yml b/.github/workflows/bot-beginner-assign-on-comment.yml index 5eb751834..ee47b46cc 100644 --- a/.github/workflows/bot-beginner-assign-on-comment.yml +++ b/.github/workflows/bot-beginner-assign-on-comment.yml @@ -23,7 +23,7 @@ jobs: steps: - name: Harden runner - uses: step-security/harden-runner@20cf305ff2072d973412fa9b1e3a4f227bda3c76 # v2.14.0 + uses: step-security/harden-runner@e3f713f2d8f53843e71c69a996d56f51aa9adfb9 # v2.14.1 with: egress-policy: audit diff --git a/.github/workflows/bot-coderabbit-plan-trigger.yml b/.github/workflows/bot-coderabbit-plan-trigger.yml index 18ada25df..c832e772f 100644 --- a/.github/workflows/bot-coderabbit-plan-trigger.yml +++ b/.github/workflows/bot-coderabbit-plan-trigger.yml @@ -34,7 +34,7 @@ jobs: steps: - name: Harden the runner - uses: step-security/harden-runner@20cf305ff2072d973412fa9b1e3a4f227bda3c76 # v2.14.0 + uses: step-security/harden-runner@e3f713f2d8f53843e71c69a996d56f51aa9adfb9 # v2.14.1 with: egress-policy: audit diff --git a/.github/workflows/bot-community-calls.yml b/.github/workflows/bot-community-calls.yml index 7f01a418a..fd6f40f5a 100644 --- a/.github/workflows/bot-community-calls.yml +++ b/.github/workflows/bot-community-calls.yml @@ -27,7 +27,7 @@ jobs: runs-on: ubuntu-latest steps: - name: Harden the runner - uses: step-security/harden-runner@20cf305ff2072d973412fa9b1e3a4f227bda3c76 #2.14.0 + uses: step-security/harden-runner@e3f713f2d8f53843e71c69a996d56f51aa9adfb9 #2.14.1 with: egress-policy: audit @@ -37,6 +37,6 @@ jobs: - name: Check Schedule and Notify env: GH_TOKEN: ${{ secrets.GITHUB_TOKEN }} - DRY_RUN: "false" # will post when workflow is triggered by cron + DRY_RUN: ${{ github.event_name == 'workflow_dispatch' && inputs.dry_run || 'false' }} run: | bash .github/scripts/bot-community-calls.sh diff --git a/.github/workflows/bot-gfi-assign-on-comment.yml b/.github/workflows/bot-gfi-assign-on-comment.yml index 3bd35dbb6..002b259dc 100644 --- a/.github/workflows/bot-gfi-assign-on-comment.yml +++ b/.github/workflows/bot-gfi-assign-on-comment.yml @@ -23,7 +23,7 @@ jobs: steps: - name: Harden runner - uses: step-security/harden-runner@20cf305ff2072d973412fa9b1e3a4f227bda3c76 # v2.14.0 + uses: step-security/harden-runner@e3f713f2d8f53843e71c69a996d56f51aa9adfb9 # v2.14.1 with: egress-policy: audit diff --git a/.github/workflows/bot-gfi-candidate-notification.yaml b/.github/workflows/bot-gfi-candidate-notification.yaml index 072732d6e..bc369c86d 100644 --- a/.github/workflows/bot-gfi-candidate-notification.yaml +++ b/.github/workflows/bot-gfi-candidate-notification.yaml @@ -21,7 +21,7 @@ jobs: steps: - name: Harden the runner - uses: step-security/harden-runner@20cf305ff2072d973412fa9b1e3a4f227bda3c76 + uses: step-security/harden-runner@e3f713f2d8f53843e71c69a996d56f51aa9adfb9 with: egress-policy: audit diff --git a/.github/workflows/bot-inactivity-unassign.yml b/.github/workflows/bot-inactivity-unassign.yml index 4592eaa8b..c30967dc2 100644 --- a/.github/workflows/bot-inactivity-unassign.yml +++ b/.github/workflows/bot-inactivity-unassign.yml @@ -25,7 +25,7 @@ jobs: uses: actions/checkout@8e8c483db84b4bee98b60c0593521ed34d9990e8 - name: Harden the runner - uses: step-security/harden-runner@df199fb7be9f65074067a9eb93f12bb4c5547cf2 + uses: step-security/harden-runner@e3f713f2d8f53843e71c69a996d56f51aa9adfb9 with: egress-policy: audit diff --git a/.github/workflows/bot-intermediate-assignment.yml b/.github/workflows/bot-intermediate-assignment.yml index 8d26ac9a8..5c0c66392 100644 --- a/.github/workflows/bot-intermediate-assignment.yml +++ b/.github/workflows/bot-intermediate-assignment.yml @@ -26,7 +26,7 @@ jobs: runs-on: ubuntu-latest steps: - name: Harden the runner - uses: step-security/harden-runner@20cf305ff2072d973412fa9b1e3a4f227bda3c76 # v2.14.0 + uses: step-security/harden-runner@e3f713f2d8f53843e71c69a996d56f51aa9adfb9 # v2.14.1 with: egress-policy: audit diff --git a/.github/workflows/bot-issue-reminder-no-pr.yml b/.github/workflows/bot-issue-reminder-no-pr.yml index 2ce4ccb49..3cce4d2a0 100644 --- a/.github/workflows/bot-issue-reminder-no-pr.yml +++ b/.github/workflows/bot-issue-reminder-no-pr.yml @@ -22,7 +22,7 @@ jobs: steps: - name: Harden the runner - uses: step-security/harden-runner@df199fb7be9f65074067a9eb93f12bb4c5547cf2 + uses: step-security/harden-runner@e3f713f2d8f53843e71c69a996d56f51aa9adfb9 with: egress-policy: audit diff --git a/.github/workflows/bot-linked-issue-enforcer.yml b/.github/workflows/bot-linked-issue-enforcer.yml index db9efffcb..d94f2fa93 100644 --- a/.github/workflows/bot-linked-issue-enforcer.yml +++ b/.github/workflows/bot-linked-issue-enforcer.yml @@ -24,7 +24,7 @@ jobs: steps: - name: Harden the runner - uses: step-security/harden-runner@20cf305ff2072d973412fa9b1e3a4f227bda3c76 # v2.14.0 + uses: step-security/harden-runner@e3f713f2d8f53843e71c69a996d56f51aa9adfb9 # v2.14.1 with: egress-policy: audit - name: Checkout repository diff --git a/.github/workflows/bot-merge-conflict.yml b/.github/workflows/bot-merge-conflict.yml index 2ff5eea0b..ab1bfd810 100644 --- a/.github/workflows/bot-merge-conflict.yml +++ b/.github/workflows/bot-merge-conflict.yml @@ -33,7 +33,7 @@ jobs: ref: ${{ github.event.repository.default_branch }} - name: Harden the runner - uses: step-security/harden-runner@20cf305ff2072d973412fa9b1e3a4f227bda3c76 # v2.14.0 + uses: step-security/harden-runner@e3f713f2d8f53843e71c69a996d56f51aa9adfb9 # v2.14.1 with: egress-policy: audit diff --git a/.github/workflows/bot-next-issue-recommendation.yml b/.github/workflows/bot-next-issue-recommendation.yml new file mode 100644 index 000000000..8ccf6e0ae --- /dev/null +++ b/.github/workflows/bot-next-issue-recommendation.yml @@ -0,0 +1,36 @@ +name: Next Issue Recommendation Bot + +on: + pull_request_target: + types: [closed] + +permissions: + pull-requests: write + issues: read + contents: read + +concurrency: + group: next-issue-bot-${{ github.event.pull_request.number || github.run_id }} + cancel-in-progress: false + +jobs: + recommend-next-issue: + runs-on: ubuntu-latest + if: github.event.pull_request.merged == true + + steps: + - name: Harden the runner (Audit all outbound calls) + uses: step-security/harden-runner@e3f713f2d8f53843e71c69a996d56f51aa9adfb9 # v2.14.1 + with: + egress-policy: audit + + - name: Checkout repository + uses: actions/checkout@8e8c483db84b4bee98b60c0593521ed34d9990e8 # v6.0.1 + + - name: Recommend next issue + uses: actions/github-script@ed597411d8f924073f98dfc5c65a23a2325f34cd # 8.0.0 + with: + github-token: ${{ secrets.GITHUB_TOKEN }} + script: | + const script = require('./.github/scripts/bot-next-issue-recommendation.js'); + await script({ github, context, core }); diff --git a/.github/workflows/bot-office-hours.yml b/.github/workflows/bot-office-hours.yml index 223e2ce01..e2147426e 100644 --- a/.github/workflows/bot-office-hours.yml +++ b/.github/workflows/bot-office-hours.yml @@ -26,7 +26,7 @@ jobs: cancel-in-progress: false steps: - name: Harden the runner - uses: step-security/harden-runner@20cf305ff2072d973412fa9b1e3a4f227bda3c76 #2.14.0 + uses: step-security/harden-runner@e3f713f2d8f53843e71c69a996d56f51aa9adfb9 #2.14.1 with: egress-policy: audit diff --git a/.github/workflows/bot-p0-issues-notify-team.yml b/.github/workflows/bot-p0-issues-notify-team.yml index 476b61e1b..d4fba998b 100644 --- a/.github/workflows/bot-p0-issues-notify-team.yml +++ b/.github/workflows/bot-p0-issues-notify-team.yml @@ -21,7 +21,7 @@ jobs: steps: - name: Harden the runner - uses: step-security/harden-runner@20cf305ff2072d973412fa9b1e3a4f227bda3c76 # v2.14.0 + uses: step-security/harden-runner@e3f713f2d8f53843e71c69a996d56f51aa9adfb9 # v2.14.1 with: egress-policy: audit diff --git a/.github/workflows/bot-pr-inactivity-reminder.yml b/.github/workflows/bot-pr-inactivity-reminder.yml index 7de72d36f..be07db03a 100644 --- a/.github/workflows/bot-pr-inactivity-reminder.yml +++ b/.github/workflows/bot-pr-inactivity-reminder.yml @@ -26,7 +26,7 @@ jobs: steps: - name: Harden the runner - uses: step-security/harden-runner@20cf305ff2072d973412fa9b1e3a4f227bda3c76 # v2.14.0 + uses: step-security/harden-runner@e3f713f2d8f53843e71c69a996d56f51aa9adfb9 # v2.14.1 with: egress-policy: audit - name: Checkout repository diff --git a/.github/workflows/bot-pr-missing-linked-issue.yml b/.github/workflows/bot-pr-missing-linked-issue.yml index fd39c4431..b60788907 100644 --- a/.github/workflows/bot-pr-missing-linked-issue.yml +++ b/.github/workflows/bot-pr-missing-linked-issue.yml @@ -33,7 +33,7 @@ jobs: steps: - name: Harden the runner - uses: step-security/harden-runner@20cf305ff2072d973412fa9b1e3a4f227bda3c76 # v2.14.0 + uses: step-security/harden-runner@e3f713f2d8f53843e71c69a996d56f51aa9adfb9 # v2.14.1 with: egress-policy: audit diff --git a/.github/workflows/bot-verified-commits.yml b/.github/workflows/bot-verified-commits.yml index 5e032d27c..5d775127c 100644 --- a/.github/workflows/bot-verified-commits.yml +++ b/.github/workflows/bot-verified-commits.yml @@ -1,8 +1,27 @@ +# .github/workflows/bot-verified-commits.yml +# +# Verifies that all commits in a pull request are GPG-signed. +# Posts a one-time VerificationBot comment if unverified commits are found. +# +# This workflow uses pull_request_target for security with fork PRs. +# Logic is handled by .github/scripts/bot-verified-commits.js +# +# Configuration is done via environment variables for easy customization. + name: PythonBot - Verify PR Commits on: pull_request_target: types: [opened, synchronize] + workflow_dispatch: + inputs: + pr_number: + description: "PR number to verify (required for manual runs)" + required: true + dry_run: + description: "Run without posting comments" + required: false + default: "true" permissions: contents: read @@ -10,67 +29,81 @@ permissions: issues: write concurrency: - group: "verify-commits-${{ github.event.pull_request.number }}" + group: "verify-commits-${{ github.event_name == 'workflow_dispatch' && inputs.pr_number || github.event.pull_request.number }}" cancel-in-progress: true jobs: verify-commits: runs-on: ubuntu-latest + # ========================================================================= + # CONFIGURATION - All customizable values are defined here as env vars + # ========================================================================= + env: + # Bot identity + BOT_NAME: "VerificationBot" + BOT_LOGIN: "github-actions" + + # Comment marker for duplicate detection + COMMENT_MARKER: "[commit-verification-bot]" + + # Documentation links + SIGNING_GUIDE_URL: "https://github.com/hiero-ledger/hiero-sdk-python/blob/main/docs/sdk_developers/signing.md" + README_URL: "https://github.com/hiero-ledger/hiero-sdk-python/blob/main/README.md" + DISCORD_URL: "https://github.com/hiero-ledger/hiero-sdk-python/blob/main/docs/discord.md" + + # Team signature + TEAM_NAME: "Hiero Python SDK Team" + + # Dry-run mode (workflow_dispatch uses input, PR events default to false) + DRY_RUN: ${{ github.event_name == 'workflow_dispatch' && inputs.dry_run || 'false' }} + + # PR number (supports both PR events and manual workflow_dispatch) + PR_NUMBER: ${{ github.event_name == 'workflow_dispatch' && inputs.pr_number || github.event.pull_request.number }} + steps: - name: Harden the runner (Audit all outbound calls) - uses: step-security/harden-runner@20cf305ff2072d973412fa9b1e3a4f227bda3c76 # v2.14.0 + uses: step-security/harden-runner@e3f713f2d8f53843e71c69a996d56f51aa9adfb9 # v2.14.1 with: egress-policy: audit - - - name: Check for unverified commits + + - name: Checkout repository + uses: actions/checkout@34e114876b0b11c390a56381ad16ebd13914f8d5 # v4.3.1 + with: + sparse-checkout: .github/scripts + persist-credentials: false + + - name: Log workflow context + run: | + echo "Repository: ${{ github.repository }}" + echo "PR: ${{ env.PR_NUMBER }}" + echo "Actor: ${{ github.actor }}" + echo "Event: ${{ github.event_name }}" + echo "Dry run mode: ${{ env.DRY_RUN }}" + + - name: Verify PR commits + uses: actions/github-script@ed597411d8f924073f98dfc5c65a23a2325f34cd # v8.0.0 + id: verify env: - GH_TOKEN: ${{ secrets.GITHUB_TOKEN }} + PR_NUMBER: ${{ env.PR_NUMBER }} + with: + github-token: ${{ secrets.GITHUB_TOKEN }} + result-encoding: json + script: | + const script = require('./.github/scripts/bot-verified-commits.js'); + const result = await script({ github, context }); + + // Set outputs for downstream steps if needed + core.setOutput('success', result.success); + core.setOutput('unverified_count', result.unverifiedCount); + + return result; + + - name: Fail if unverified commits found + if: steps.verify.outputs.success != 'true' && env.DRY_RUN != 'true' run: | - PR_NUMBER=${{ github.event.pull_request.number }} - REPO="${{ github.repository }}" - COMMITS_URL="https://github.com/$REPO/pull/${PR_NUMBER}/commits" - - echo "Checking commits in PR #$PR_NUMBER for repository $REPO..." - - COMMITS_JSON=$(gh api repos/$REPO/pulls/$PR_NUMBER/commits) - UNVERIFIED_COUNT=$(echo "$COMMITS_JSON" | jq '[.[] | select(.commit.verification.verified == false)] | length') - - echo "Unverified commits: $UNVERIFIED_COUNT" - - EXISTING_BOT_COMMENT_COUNT=$(gh pr view $PR_NUMBER --repo $REPO --json comments | jq '[.comments[] | select(.author.login == "github-actions" and (.body | contains("[commit-verification-bot]")))] | length') - - echo "Existing verification commit bot comments: $EXISTING_BOT_COMMENT_COUNT" - - if [ "$UNVERIFIED_COUNT" -gt 0 ]; then - if [ "$EXISTING_BOT_COMMENT_COUNT" -ge 1 ]; then - echo "VerificationBot already commented. Skipping additional comments." - else - COMMENT=$(cat <> $GITHUB_ENV - - - name: Comment on PR - if: env.PR_NUMBER != '' + - name: Notify PR of workflow failure + if: github.event_name != 'workflow_dispatch' || github.event.inputs.failed_run_id != '' env: + FAILED_WORKFLOW_NAME: ${{ github.event.workflow_run.name || 'Manual Test Run' }} + FAILED_RUN_ID: ${{ github.event.inputs.failed_run_id || github.event.workflow_run.id }} GH_TOKEN: ${{ secrets.GITHUB_TOKEN }} - run: | - REPO="${{ github.repository }}" - COMMENT=$(cat < contract_types_pb2.ContractLoginfo ### Removed - N/A + +# [0.1.0] - 2025-02-19 diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md index 0b57256d2..4d2c70607 100644 --- a/CONTRIBUTING.md +++ b/CONTRIBUTING.md @@ -22,8 +22,9 @@ Thank you for your interest in contributing to the Hiero Python SDK! **Get Started By Reading:** -- [Project Structure](docs/sdk_developers/project_structure.md) +- [Project Structure](docs/sdk_developers/training/setup/project_structure.md) - [Setup](docs/sdk_developers/training/setup) +- [Setup (Windows)](docs/sdk_developers/training/setup/setup_windows.md) - [Workflow](docs/sdk_developers/training/workflow) **Quick Start:** @@ -161,13 +162,6 @@ git rebase main -S **Full guide:** [Rebasing Guide](docs/sdk_developers/rebasing.md) ---- - -## Common Issues - -**HELP! I have an issue...** -No worries, we're here to help. But please first see the [Common Issues Guide](docs/common_issues.md). - --- diff --git a/MAINTAINERS.md b/MAINTAINERS.md index c81f6a0f0..08948deb3 100644 --- a/MAINTAINERS.md +++ b/MAINTAINERS.md @@ -1,6 +1,6 @@ # Maintainers -The general handling of Maintainer rights and all groups in this GitHub org is done in the https://github.com/hiero-ledger/governance repository. +The general handling of Maintainer rights and all groups in this GitHub org is done in the repository. ## Maintainer Scopes, GitHub Roles and GitHub Teams @@ -18,11 +18,11 @@ Maintainers are assigned the following scopes in this repository: | Name | GitHub ID | Scope | LFID | Discord ID | Email | Company Affiliation | | -------------- | ------------- | ----- | ---- | -------------- | ----- | ------------------- | -| Nadine Loepfe | nadineloepfe | | | nadine_90669 | | Hashgraph | | Sophie Bulloch | exploreriii | | | explorer3 | | | +| Manish Dait | manishdait | | | manish24 | | | +| Nadine Loepfe | nadineloepfe | | | nadine_90669 | | Hashgraph | | Richard Bair | rbair23 | | | rbair | | Hashgraph | - ## Emeritus Maintainers | Name | GitHub ID | Scope | LFID | Discord ID | Email | Company Affiliation | @@ -31,4 +31,4 @@ Maintainers are assigned the following scopes in this repository: ## The Duties of a Maintainer -Maintainers are expected to perform duties in alignment with **[Hiero-Ledger's defined maintainer guidelines](https://github.com/hiero-ledger/governance/blob/main/roles-and-groups.md#maintainers).** +Maintainers are expected to perform duties in alignment with **[Hiero-Ledger's defined maintainer guidelines](https://github.com/hiero-ledger/.github/blob/main/CONTRIBUTING.md#about-users-and-maintainers).** diff --git a/README.md b/README.md index a80f5a60c..f8ccb309d 100644 --- a/README.md +++ b/README.md @@ -63,6 +63,7 @@ print(f"Balance: {balance.hbars} HBAR") - **[Contributing Guide](CONTRIBUTING.md)** - Start here! - **[Setup Guide](docs/sdk_developers/setup.md)** - First-time environment setup +- **[Windows Setup Guide](docs/sdk_developers/training/setup/setup_windows.md)** - Step-by-step guide for Windows users - **[Workflow Guide](docs/sdk_developers/workflow.md)** - Day-to-day development workflow - **[Signing Guide](docs/sdk_developers/signing.md)** - GPG and DCO commit signing (required) - **[Changelog Guide](docs/sdk_developers/changelog_entry.md)** - How to write changelog entries diff --git a/docs/maintainers/team.md b/docs/maintainers/team.md index 426498d9b..d8f52cd86 100644 --- a/docs/maintainers/team.md +++ b/docs/maintainers/team.md @@ -22,6 +22,12 @@ GitHub IDs and names are provided as listed on GitHub. | Priyanshu | @tech0priyanshu | | Akshat | @Akshat8510 | | Nadine Loepfe | @nadineloepfe | +| Mounil2005 | @Mounil2005 | +| parvninama | @parvninama | +| drtoxic69 | @drtoxic69 | +| prajeeta15 | @prajeeta15 | +| undefinedIsMyLife | @undefinedIsMyLife | +| cheese-cakee | @cheese-cakee | [View Triage Team on GitHub](https://github.com/orgs/hiero-ledger/teams/hiero-sdk-python-triage) @@ -35,6 +41,10 @@ GitHub IDs and names are provided as listed on GitHub. | Manish Dait | @manishdait | | Dosi Kolev | @Dosik13 | | Nadine Loepfe | @nadineloepfe | +| Adityarya11 | @Adityarya11 | +| MonaaEid | @MonaaEid | +| aceppaluni | @aceppaluni | +| AntonioCeppellini |@AntonioCeppellini | [View Committer Team on GitHub](https://github.com/orgs/hiero-ledger/teams/hiero-sdk-python-committers) @@ -47,6 +57,7 @@ GitHub IDs and names are provided as listed on GitHub. | Richard Bair | @rbair23 | | Sophie Bulloch | @exploreriii | | Nadine Loepfe| @nadineloepfe | +| manishdait | @manishdait | [View Maintainer Team on GitHub](https://github.com/orgs/hiero-ledger/teams/hiero-sdk-python-maintainers) diff --git a/docs/sdk_developers/automations/next-issue-recommendation-bot.md b/docs/sdk_developers/automations/next-issue-recommendation-bot.md new file mode 100644 index 000000000..1547645d9 --- /dev/null +++ b/docs/sdk_developers/automations/next-issue-recommendation-bot.md @@ -0,0 +1,156 @@ +# Next Issue Recommendation Bot + +## Overview + +The Next Issue Recommendation Bot is an automated GitHub Actions workflow designed to improve contributor retention by recommending relevant issues to contributors after their first successful pull request merge. This bot specifically targets contributors who complete "Good First Issue" or "beginner" level issues, helping them find their next contribution opportunity. + +## Trigger Conditions + +The bot triggers under the following conditions: + +1. **Automatic Trigger**: When a pull request is merged (`pull_request_target` event with `closed` type) + +The workflow only runs when: +- The pull request has been merged (`github.event.pull_request.merged == true`) +- The merged PR is linked to an issue with "Good First Issue" or "beginner" labels +- The linked issue is not labeled as "intermediate" or "advanced" + +## Recommendation Logic + +### Issue Detection + +The bot parses the pull request body to find linked issues using regex patterns that match: +- `Fixes #ISSUE_NUMBER` +- `Closes #ISSUE_NUMBER` +- `Resolves #ISSUE_NUMBER` +- `Fix #ISSUE_NUMBER` +- `Close #ISSUE_NUMBER` +- `Resolve #ISSUE_NUMBER` + +### Recommendation Strategy + +1. **For Good First Issue completers**: + - First searches for unassigned issues with "beginner" label + - Falls back to unassigned "Good First Issue" issues if no beginner issues found + +2. **For beginner issue completers**: + - Searches for unassigned issues with "beginner" label + - Falls back to unassigned "Good First Issue" issues if no beginner issues found + +3. **Fallback behavior**: + - If no repository issues are available, provides link to organization-wide good first issues + - Limits recommendations to up to 5 issues to avoid overwhelming contributors + +## Comment Content + +The bot posts a congratulatory comment that includes: + +- **Congratulations message**: Thank you and encouragement for the contribution +- **Recommended issues**: List of up to 5 relevant issues with: + - Issue title and direct link + - Brief description (truncated to 150 characters) +- **Repository engagement**: + - Direct link to star the repository + - Direct link to watch the repository for notifications +- **Community resources**: Link to Discord community for questions +- **Fallback message**: Organization-wide good first issues link if no repo issues available + +### Example Comment Structure + +```markdown + + +🎉 **Congratulations on your first merged contribution!** + +Thank you for your contribution to the Hiero Python SDK! We're excited to have you as part of our community. + +Here are some beginner-level issues you might be interested in working on next: + +1. [Issue Title](https://github.com/owner/repo/issues/123) + Brief description of the issue... + +2. [Another Issue](https://github.com/owner/repo/issues/456) + Another brief description... + +🌟 **Stay connected with the project:** +- ⭐ [Star this repository](https://github.com/owner/repo) +- 👀 [Watch this repository](https://github.com/owner/repo/watchers) + +We look forward to seeing more contributions from you! If you have any questions, feel free to ask in our [Discord community](https://github.com/hiero-ledger/hiero-sdk-python/blob/main/docs/discord.md). + +From the Hiero Python SDK Team 🚀 +``` + +## Idempotent Behavior + +The bot includes duplicate prevention by: + +- Wrapping comments with HTML marker `` +- Checking existing PR comments for the marker before posting +- Skipping if a comment already exists +- This ensures only one recommendation comment per PR + +## Technical Implementation + +### Workflow File + +**Location**: `.github/workflows/bot-next-issue-recommendation.yml` + +**Key features**: +- Uses pinned action versions per project conventions +- Minimal permissions (`pull-requests: write`, `issues: read`, `contents: read`) +- Concurrency control to prevent duplicate runs + +### Script File + +**Location**: `.github/scripts/bot-next-issue-recommendation.js` + +**Key components**: +- GitHub REST API integration for issue and comment operations +- Regex parsing for linked issue detection +- Search queries with label filtering +- Error handling and logging + +## Testing + +The bot can be tested through: + +1. **Fork testing**: Test workflow behavior in forks before production deployment +2. **GitHub CLI testing**: Local testing with appropriate environment variables + +**Note**: The workflow only triggers automatically when PRs are merged. Manual testing requires creating test PRs and merging them in a test environment. + +## Permissions + +The workflow requires minimal permissions: +- `pull-requests: write` - To post comments on pull requests +- `issues: read` - To fetch issue details and search for recommendations +- `contents: read` - To access the script file + +## Troubleshooting + +### Common Issues + +1. **No linked issues found**: Ensure PR body contains "Fixes #123" or similar pattern +2. **Permission errors**: Verify workflow has required permissions +3. **Rate limiting**: GitHub API limits are handled by the GitHub Actions runner +4. **Duplicate comments**: Bot checks for existing markers to prevent duplicates + +### Debug Information + +The bot provides detailed logging: +- PR number and dry-run status +- Linked issue detection results +- Issue labels found +- Search queries used +- Number of recommended issues found +- Comment posting status + +## Future Enhancements + +Potential improvements to consider: +- Support for custom recommendation criteria +- Integration with contributor statistics +- Personalized recommendations based on contribution history +- Support for multiple issue linking patterns +- Analytics on bot effectiveness and contributor retention diff --git a/docs/sdk_developers/merge_conflicts.md b/docs/sdk_developers/merge_conflicts.md index 1f37c7392..3ba6d459c 100644 --- a/docs/sdk_developers/merge_conflicts.md +++ b/docs/sdk_developers/merge_conflicts.md @@ -7,7 +7,8 @@ Merge conflicts are caused by working on out-dated versions of the codebase, or ## Table of Contents - [Handling Conflicts](#handling-conflicts) -- [Step by Step Guide to Resolve Merge Conflicts](#step-by-step-guide-to-resolve-merge-conflicts) +- [Resolving CHANGELOG.md Conflicts Using GitHub's Web Editor](#resolving-changelogmd-conflicts-using-githubs-web-editor) +- [Step-by-Step Guide to Resolve Merge Conflicts Locally](#step-by-step-guide-to-resolve-merge-conflicts-locally) - [1. See which files are conflicted](#1-see-which-files-are-conflicted) - [2. Understand what conflicts](#2-understand-what-conflicts) - [3. Decide what the final code should be](#3-decide-what-the-final-code-should-be) @@ -24,13 +25,55 @@ Merge conflicts are caused by working on out-dated versions of the codebase, or - [Common issues](#common-issues) - [If you need to stop](#if-you-need-to-stop) - [What NOT to do](#what-not-to-do) + - [When to Use This Method](#when-to-use-this-method) + - [Steps](#steps) - [Recovery Tips](#recovery-tips) - [If you are completely stuck](#if-you-are-completely-stuck) - [Helpful Resources](#helpful-resources) +## Resolving CHANGELOG.md Conflicts Using GitHub's Web Editor -## Step by Step Guide to Resolve Merge Conflicts +Changelog conflicts in CHANGELOG.md are very common in this repository because multiple pull requests frequently add new entries to the "Unreleased" section at the same time. + +For these conflicts, there is a much simpler alternative to the full local rebase process: you can resolve them directly in your Pull Request on GitHub using the built-in web editor. No local setup or force push is required. + +**Strong Recommendation**: Use the local VS Code rebase method (described below) to ensure full signing compliance. Use the web editor only if you are prepared to amend and re-sign the resolution commit locally afterward. + +### When to Use This Method + +- The conflict is only (or primarily) in `CHANGELOG.md`. +- You want to keep both sets of changes — this is almost always the correct choice for changelogs, as they are designed to accumulate all entries over time. + +**Important**: The warnings above about not blindly accepting changes apply to code files. For CHANGELOG.md, "accepting both" is safe and recommended. + +### Steps + +1. In your PR, go to **Files changed** and scroll to the bottom. In the merge‑conflict banner, click **Resolve conflicts** to open the web editor. +2. In GitHub's web-based editor: + - Conflicted sections are highlighted (usually in red/yellow). + - You'll see **Incoming change** (from main) and **Current change** (from your branch) marked in red/highlighted areas. + +3. **For a changelog conflict**: + - Click **Accept both** (if the button is available) — this combines all the bullet-point entries perfectly. + - If no "Accept both" button appears, manually remove the conflict markers (`<<<<<<< HEAD`, `=======`, `>>>>>>> your-branch`) and keep all entries from both sides. + - Optionally, sort or group similar entries for neatness. + +4. Once all conflicts are resolved, click **Mark as resolved** at the top of the editor. + +5. You'll see a **Sign off and Commit** section: + - Review your commit message + - Click **Commit merge** (or **Commit directly to...** depending on your GitHub UI). + +6. Your PR will update automatically, and the conflict will be resolved. + +This method is especially beginner-friendly and avoids the more advanced local rebase steps. + +For more details on GitHub's web-based editor, see: [The GitHub.dev web-based editor](https://docs.github.com/en/codespaces/the-githubdev-web-based-editor). +**Example**: See [PR `#1589`](https://github.com/hiero-ledger/hiero-sdk-python/pull/1589) for a real example of resolving a CHANGELOG.md conflict using this method. + + +## Step-by-Step Guide to Resolve Merge Conflicts Locally ### 1. See which files are conflicted ```bash diff --git a/docs/sdk_developers/setup.md b/docs/sdk_developers/setup.md index e7db3a4d2..545765a1f 100644 --- a/docs/sdk_developers/setup.md +++ b/docs/sdk_developers/setup.md @@ -108,7 +108,7 @@ brew install uv **On Windows:** ```powershell -powershell -c "irm https://astral.sh/uv/install.ps1 | iex" +powershell -ExecutionPolicy ByPass -c "irm https://astral.sh/uv/install.ps1 | iex" ``` **Other installation methods:** [uv Installation Guide](https://docs.astral.sh/uv/getting-started/installation/) diff --git a/docs/sdk_developers/training/network_and_client.md b/docs/sdk_developers/training/network_and_client.md index 310b2b57e..3604e00ba 100644 --- a/docs/sdk_developers/training/network_and_client.md +++ b/docs/sdk_developers/training/network_and_client.md @@ -94,7 +94,7 @@ Each network has its default mirror node: | mainnet | `https://mainnet-public.mirrornode.hedera.com` | | testnet | `https://testnet.mirrornode.hedera.com` | | previewnet | `https://previewnet.mirrornode.hedera.com` | -| solo | `http://localhost:8080` | +| solo | `http://localhost:5551` | --- diff --git a/docs/sdk_developers/training/protoBuf-training/ProtoBuf-Training.md b/docs/sdk_developers/training/protoBuf-training/ProtoBuf-Training.md new file mode 100644 index 000000000..ba8d29aba --- /dev/null +++ b/docs/sdk_developers/training/protoBuf-training/ProtoBuf-Training.md @@ -0,0 +1,235 @@ +# Protobuf Training for SDK Developers + +This training is a **linear, explanatory course** designed for SDK developers who may have **little or no prior experience with Protocol Buffers (protobufs)**. + +Its purpose is to explain *what protobufs are*, *why they exist*, and *how they are used in practice* inside the Hedera / Hiero SDK ecosystem. By the end of the course, you should not only be able to follow protobuf-based code, but also reason about it, debug it, and confidently work with it directly when necessary. + +The course progresses from general concepts to Hedera‑specific details, and finishes with a complete, end‑to‑end worked example that mirrors how the SDK interacts with the Hedera network internally. + +--- + +## Module 01: Google Protocol Buffers Fundamentals + +Before working with Hedera, it is important to understand protobufs on their own. + +Protocol Buffers are a **language-agnostic, strongly-typed, binary data format** created by Google. They are designed to solve problems that arise when systems need to exchange structured data reliably, efficiently, and in a way that can evolve over time without breaking compatibility. + +In practice, protobufs replace formats like JSON or XML in systems where performance, strict schemas, and backward compatibility matter. + +**Topics covered (with explanation):** + +* **What Protocol Buffers are and why they are used** + Protobufs define *what data is allowed* and *what shape it must have*. Unlike JSON, protobufs enforce structure at compile time and serialize into a compact binary form, making them faster and safer for network communication. + +* **`.proto` files and schema definitions** + A `.proto` file is the authoritative contract between systems. It defines messages, fields, and types. All generated code in every language comes from these files. + +* **`message` definitions and field numbering** + Messages describe structured data. Field numbers are not arbitrary — they are part of the binary encoding and must remain stable to preserve compatibility across versions. + +* **Scalar types vs message types** + Scalar fields represent primitive values (numbers, strings, booleans), while message fields allow nesting and composition of complex data structures. + +* **`oneof` fields** + A `oneof` enforces that only one field in a group may be set at a time. This is commonly used to model mutually exclusive options, such as different query or response types. + +* **Binary serialization concepts** + Protobufs encode data into bytes using field numbers and wire types. You do not need to know the wire format, but understanding that serialization is deterministic and compact helps with debugging. + +**Goal:** +Build a strong conceptual understanding of protobufs as structured, version-safe data contracts. + +--- + +## Module 02: Hedera Protobuf Architecture + +Hedera exposes its entire public API using protobuf definitions. Every interaction with the network — queries, transactions, and responses — is described by protobuf messages. + +This module explains how those messages are organized and why Hedera uses a layered envelope approach. + +**Topics covered (with explanation):** + +* **Hedera service definitions** + Services group related functionality (for example, crypto, consensus, token). Each service defines the protobuf messages used for its operations. + +* **Query and Response envelope pattern** + Rather than sending many different top-level message types, Hedera wraps all queries in a `Query` message and all replies in a `Response` message. + +* **Use of `oneof` for query and response selection** + The envelope uses `oneof` fields to indicate *which specific operation* is being requested or returned. + +* **Request / response pairing** + Every query message has a corresponding response message, and understanding this pairing is essential when navigating protobuf definitions. + +* **Headers and precheck status codes** + Response headers contain metadata about execution status before any business logic runs. These are critical when debugging failed requests. + +**Goal:** +Understand the structure of Hedera’s protobuf-based network API and how requests and responses are routed. + +--- + +## Module 03: Python Generated Protobuf Classes + +Protobuf schemas are not used directly at runtime. Instead, they are compiled into language-specific classes that enforce the schema rules. + +This module focuses on how `.proto` definitions become **Python objects** and how those objects behave in real code. + +**Topics covered (with explanation):** + +* **Generated `*_pb2.py` files** + These files contain Python classes that mirror the message definitions in `.proto` files. They should not be edited manually. + +* **Message instantiation and default values** + Creating a protobuf object does not mean fields are "set". Unset fields have implicit defaults, which can affect logic if misunderstood. + +* **Nested messages** + Some fields are themselves messages. Understanding how to access and populate nested messages is critical for correct request construction. + +* **`CopyFrom` vs direct assignment** + Certain message fields must be populated using `CopyFrom` to preserve type safety and internal consistency. + +* **Inspecting and printing protobuf messages** + Protobuf objects can be printed for debugging, but the output reflects structure, not wire format. + +**Goal:** +Gain confidence working directly with Python-generated protobuf classes without relying on SDK abstractions. + +--- + +## Module 04: Converting Data *To* Protobuf Messages + +Before data can be sent to the Hedera network, it must be translated into a protobuf message that exactly matches the expected schema. + +This module explains how application-level data becomes a valid protobuf request. + +**Topics covered (with explanation):** + +* **Manually constructing protobuf messages** + Understanding how to create messages step by step helps when debugging SDK behavior or writing low-level tooling. + +* **Populating nested message fields** + Many Hedera messages contain nested structures that must be explicitly populated. + +* **Working with IDs and basic types** + Identifiers such as `AccountID` are protobuf messages themselves, not simple integers. + +* **Validating message completeness** + While protobufs allow unset fields, the network may reject incomplete or malformed requests. + +**Goal:** +Understand how raw input data is translated into a network-ready protobuf message. + +--- + +## Module 05: Converting Data *From* Protobuf Messages + +When the network responds, it returns structured protobuf messages that must be interpreted correctly. + +This module explains how to safely extract and reason about response data. + +**Topics covered (with explanation):** + +* **Navigating nested protobuf structures** + Responses often contain deeply nested messages that must be accessed carefully. + +* **Reading scalar and message fields** + Understanding when a field is present versus when it is simply defaulted is critical. + +* **Unset fields and defaults** + Protobufs do not distinguish between "unset" and "set to default" in all cases, which can be surprising. + +* **Mapping protobufs to SDK objects** + SDKs typically convert protobuf responses into higher-level objects. Understanding this mapping aids debugging. + +**Goal:** +Correctly interpret network responses without misreading protobuf defaults or structure. + +--- + +## Module 06: Serializing Protobuf Messages + +Protobuf messages must be converted into bytes before they can be transmitted over the network. + +This module explains what serialization does and where it fits in the request lifecycle. + +**Topics covered (with explanation):** + +* **`SerializeToString()`** + This method converts a structured protobuf object into its binary wire representation. + +* **Binary encoding characteristics** + Serialized protobufs are compact and not human-readable, which is why logging must occur before serialization. + +* **Payload size considerations** + Field numbers, optional fields, and nesting all affect message size. + +* **Serialization in the SDK** + The SDK serializes messages immediately before network transmission. + +* **Common serialization mistakes** + Serializing incomplete messages or modifying messages after serialization can cause subtle bugs. + +**Goal:** +Understand how and when protobuf messages become raw network bytes. + +--- + +## Module 07: Deserializing Protobuf Messages + +Deserialization is the process of reconstructing structured protobuf objects from raw bytes received from the network. + +This module explains how incoming data becomes usable again. + +**Topics covered (with explanation):** + +* **`ParseFromString()`** + This method populates a protobuf object from binary data. + +* **Reconstructing message trees** + Nested structures are rebuilt automatically based on the schema. + +* **Error handling during parsing** + Corrupt or unexpected data can cause parsing failures. + +* **Validation after deserialization** + Even successfully parsed messages should be checked for expected content. + +**Goal:** +Safely reconstruct and validate protobuf messages received from the network. + +--- + +## Module 08: Worked Example (End‑to‑End) + +The final module brings together everything learned in the previous modules. + +Rather than introducing new concepts, it demonstrates how all of the pieces work together in a realistic scenario. + +**What the example demonstrates:** + +* Manually constructing a Hedera query protobuf +* Wrapping the query in a Hedera `Query` envelope +* Serializing the query into bytes +* Mocking a Hedera network response +* Serializing and deserializing the response +* Extracting SDK‑level data from the decoded protobuf + +**Goal:** +See how protobufs function across the full lifecycle of a Hedera SDK request. + +**Resources:** +[Worked Example Documentation](ProtoBuf_Example.md) +[Runnable Python Script](../../../../examples/protobuf_round_trip.py) + +--- + +## Next Steps + +After completing this training, developers should be able to: + +* Understand what protobufs are and why Hedera uses them +* Debug protobuf‑related SDK issues with confidence +* Manually construct and inspect Hedera protobuf messages +* Understand how SDK abstractions map to raw protobuf data +* Reason clearly about serialization and deserialization boundaries diff --git a/docs/sdk_developers/training/protoBuf-training/ProtoBuf_Example.md b/docs/sdk_developers/training/protoBuf-training/ProtoBuf_Example.md new file mode 100644 index 000000000..6ebe28b68 --- /dev/null +++ b/docs/sdk_developers/training/protoBuf-training/ProtoBuf_Example.md @@ -0,0 +1,89 @@ +# Worked Example: Hedera Protobuf Round Trip + +This worked example demonstrates a complete protobuf request/response lifecycle +as used internally by the Hedera SDK. + +The accompanying Python script (`examples/protobuf_round_trip.py`) is fully runnable and +contains no instructional narration — all explanation is contained here. + +--- + +## Overview of the Flow + +This example walks through the following steps: + +1. Constructing a `CryptoGetInfoQuery` protobuf message +2. Wrapping it in a Hedera `Query` envelope using a `oneof` +3. Serializing the query into bytes +4. Deserializing the bytes back into a protobuf message +5. Mocking a Hedera `Response` +6. Serializing and deserializing the response +7. Extracting account data from the decoded response + +This mirrors how the SDK constructs, sends, and interprets network messages. + +--- + +## Step 1: Building the CryptoGetInfoQuery + +A `CryptoGetInfoQuery` requires an `AccountID`, which itself is a protobuf +message. This reinforces an important concept: identifiers in Hedera are +structured types, not primitives. + +The query message is populated and then embedded inside a `Query` envelope. + +--- + +## Step 2: Query Envelope and `oneof` + +Hedera uses a single top-level `Query` message that contains a `oneof` +representing all possible query types. + +Setting `query.cryptoGetInfo` automatically selects the active query type +within the envelope. + +--- + +## Step 3: Serialization + +Before transmission, protobuf messages are serialized into a compact binary +format using `SerializeToString()`. + +This binary payload is what would be sent over the network. + +--- + +## Step 4: Deserialization + +The serialized bytes are parsed back into a protobuf object using +`ParseFromString()`, reconstructing the full message tree. + +--- + +## Step 5: Mocking a Network Response + +To simulate a network round-trip, the example constructs a +`CryptoGetInfoResponse` inside a `Response` envelope. + +Only a subset of fields are populated to demonstrate default handling. + +--- + +## Step 6: Interpreting the Response + +Finally, the decoded response is traversed and relevant account information +is extracted, similar to how the SDK converts protobuf responses into +higher-level objects. + +--- + +## Why This Example Matters + +Understanding this flow allows SDK developers to: + +- Debug protobuf serialization issues +- Reason about `oneof` selection +- Inspect raw network payloads +- Understand SDK abstraction boundaries + +The full runnable implementation can be found in `protobuf_round_trip.py`. diff --git a/docs/sdk_developers/training/setup/setup_windows.md b/docs/sdk_developers/training/setup/setup_windows.md new file mode 100644 index 000000000..f4435b3f5 --- /dev/null +++ b/docs/sdk_developers/training/setup/setup_windows.md @@ -0,0 +1,95 @@ +# Windows Setup Guide + +This guide provides a step-by-step walkthrough for setting up the Hiero Python SDK development environment specifically for Windows users. We will use PowerShell and `uv` for dependency management. + +--- + +## Table of Contents +- [Prerequisites](#prerequisites) +- [Fork and Clone](#fork-and-clone) +- [Install uv](#install-uv) +- [Install Dependencies](#install-dependencies) +- [Generate Protobufs](#generate-protobufs) +- [Troubleshooting](#troubleshooting) + +--- + +## Prerequisites + +Before you begin, ensure you have the following installed on your system: + +1. **Git for Windows**: [Download and install Git](https://gitforwindows.org/). +2. **Python 3.10+**: [Download and install Python](https://www.python.org/downloads/windows/). Ensure "Add Python to PATH" is checked during installation. +3. **GitHub Account**: You will need a GitHub account to fork the repository. + +--- + +## Fork and Clone + +1. Navigate to the [hiero-sdk-python repository](https://github.com/hiero-ledger/hiero-sdk-python) and click the **Fork** button. +2. Open **PowerShell** and run the following commands to clone your fork: + +```powershell +# Clone the repository +git clone https://github.com//hiero-sdk-python.git + +# Navigate into the project directory +cd hiero-sdk-python +``` +--- + +## Install uv + +The Hiero Python SDK uses `uv` for extremely fast Python package and environment management. + +1. In your PowerShell window, run the following command to install `uv`: + +```powershell +powershell -ExecutionPolicy ByPass -c "irm https://astral.sh/uv/install.ps1 | iex" +``` + +> ⚠️ **Important**: After the installation finishes, you **must** close your current PowerShell window and open a new one for the changes to take effect. Alternatively, you can reload your environment variables. + +2. Verify the installation by running: +```powershell +uv --version +``` + +--- + +## Install Dependencies + +Once `uv` is installed and you are inside the project directory, run: + +```powershell +uv sync +``` + +This command will create a virtual environment and install all necessary development dependencies automatically. + +--- + +## Generate Protobufs + +The SDK requires generated protobuf files to communicate with the network. Run the following command to generate them: + +```powershell +uv run python generate_proto.py +``` + +--- + +## Troubleshooting + +### `uv` is not recognized +If you receive an error stating that `uv` is not recognized as a cmdlet or function, ensure that the installation path (typically `%USERPROFILE%\.local\bin`) is added to your Windows Environment Variables (PATH). + +### Execution Policy Restrictions +If you encounter errors running scripts in PowerShell, you may need to adjust your execution policy. Run PowerShell as an Administrator and execute: +```powershell +Set-ExecutionPolicy -ExecutionPolicy RemoteSigned -Scope CurrentUser +``` + +### Git Bash Alternative +While this guide focuses on PowerShell, you can also use **Git Bash**. If using Git Bash, follow the [Standard Setup Guide](02_installing_hiero_python_sdk.md) as it behaves similarly to a Unix shell. + diff --git a/examples/account/account_id_populate_from_mirror.py b/examples/account/account_id_populate_from_mirror.py new file mode 100644 index 000000000..463787373 --- /dev/null +++ b/examples/account/account_id_populate_from_mirror.py @@ -0,0 +1,144 @@ +""" +uv run examples/account/account_id_populate_from_mirror.py +python examples/account/account_id_populate_from_mirror.py + +This example demonstrates how to populate AccountId fields +using mirror node lookups: + +1. Create an AccountId from an EVM address +2. Trigger auto account creation via HBAR transfer +3. Populate account number (num) from mirror node +4. Populate EVM address from mirror node +""" + +import sys +import time + +from hiero_sdk_python import ( + Client, + AccountId, + PrivateKey, + TransferTransaction, + Hbar, + TransactionGetReceiptQuery, +) + + +def generate_evm_address(): + """ + Generates a new ECDSA key pair and returns its EVM address. + """ + private_key = PrivateKey.generate_ecdsa() + return private_key.public_key().to_evm_address() + + +def auto_create_account(client, evm_address): + """ + Triggers auto account creation by transferring HBAR + to an EVM address. + """ + print("\nAuto Account Creation...") + + try: + evm_account_id = AccountId.from_evm_address(evm_address, 0, 0) + + transfer_tx = ( + TransferTransaction() + .add_hbar_transfer(evm_account_id, Hbar(1).to_tinybars()) + .add_hbar_transfer(client.operator_account_id, Hbar(-1).to_tinybars()) + .execute(client) + ) + + receipt = ( + TransactionGetReceiptQuery() + .set_transaction_id(transfer_tx.transaction_id) + .set_include_children(True) + .execute(client) + ) + except Exception as e: + print(f"Failed during auto account creation tx: {e}") + sys.exit(1) + + if not receipt.children: + print("Auto account creation failed: no child receipts found") + sys.exit(1) + + account_id = receipt.children[0].account_id + print(f"Auto-created account: {account_id}") + return account_id + + +def populate_account_num_example(client, evm_address, created_account_id): + """ + Demonstrates populating AccountId.num from the mirror node. + """ + print("\nExample 1: Populate Account Number from Mirror Node...") + + mirror_account_id = AccountId.from_evm_address(evm_address, 0, 0) + print(f"Before populate: num = {mirror_account_id.num}") + + time.sleep(5) + + try: + new_account_id = mirror_account_id.populate_account_num(client) + except Exception as e: + print(f"Failed to populate account number: {e}") + sys.exit(1) + + print("After populate:") + print(f" Shard: {new_account_id.shard}") + print(f" Realm: {new_account_id.realm}") + print(f" Num: {new_account_id.num}") + + if new_account_id.num != created_account_id.num: + print( + "Account number mismatch:\n" + f" Expected: {created_account_id.num}\n" + f" Got: {new_account_id.num}" + ) + sys.exit(1) + + +def populate_evm_address_example(client, created_account_id, evm_address): + """ + Demonstrates populating AccountId.evm_address from the mirror node. + """ + print("\nExample 2: Populate EVM Address from Mirror Node") + + print(f"Before populate: evm_address = {created_account_id.evm_address}") + + time.sleep(5) + + try: + new_account_id = created_account_id.populate_evm_address(client) + except Exception as e: + print(f"Failed to populate EVM address: {e}") + sys.exit(1) + + print(f"After populate: evm_address = {new_account_id.evm_address}") + + if new_account_id.evm_address != evm_address: + print( + "EVM address mismatch:\n" + f" Expected: {evm_address}\n" + f" Got: {new_account_id.evm_address}" + ) + sys.exit(1) + + +def main(): + client = Client.from_env() + + print(f"Client set up with operator id {client.operator_account_id}") + + evm_address = generate_evm_address() + print(f"Generated EVM address: {evm_address}") + + created_account_id = auto_create_account(client, evm_address) + + populate_account_num_example(client, evm_address, created_account_id) + populate_evm_address_example(client, created_account_id, evm_address) + + +if __name__ == "__main__": + main() diff --git a/examples/consensus/topic_create_transaction.py b/examples/consensus/topic_create_transaction.py index 38831cccf..553c729a7 100644 --- a/examples/consensus/topic_create_transaction.py +++ b/examples/consensus/topic_create_transaction.py @@ -2,56 +2,17 @@ uv run examples/consensus/topic_create_transaction.py python examples/consensus/topic_create_transaction.py """ +from hiero_sdk_python import Client, TopicCreateTransaction, ResponseCode, PrivateKey -import os -import sys -from typing import Tuple -from dotenv import load_dotenv - -from hiero_sdk_python import ( - Client, - AccountId, - PrivateKey, - TopicCreateTransaction, - Network, -) - -# Load environment variables from .env file -load_dotenv() -network_name = os.getenv("NETWORK", "testnet").lower() - - -def setup_client() -> Tuple[Client, PrivateKey]: +def setup_client(): """ - Sets up and configures the Hiero client for the testnet. - Reads OPERATOR_ID and OPERATOR_KEY from environment variables. + Sets up and configures the Hiero client. + Reads OPERATOR_ID and OPERATOR_KEY from environment variables via Client.from_env(). """ - network = Network(network_name) - print(f"Connecting to Hedera {network_name} network!") - client = Client(network) - - operator_id_str = os.getenv("OPERATOR_ID") - operator_key_str = os.getenv("OPERATOR_KEY") - - # Check if the environment variables are loaded correctly - if not operator_id_str or not operator_key_str: - print("Error: OPERATOR_ID or OPERATOR_KEY not found in environment.") - print("Please create a .env file in the project's root directory with:") - print("\nOPERATOR_ID=your_id_here") - print("OPERATOR_KEY=your_key_here\n") - sys.exit(1) - - try: - operator_id = AccountId.from_string(operator_id_str) - operator_key = PrivateKey.from_string(operator_key_str) - except (TypeError, ValueError) as e: - print(f"Error: Invalid OPERATOR_ID or OPERATOR_KEY format: {e}") - sys.exit(1) - - client.set_operator(operator_id, operator_key) + client = Client.from_env() + print(f"Network: {client.network.network}") print(f"Client set up with operator id {client.operator_account_id}") - return client, operator_key - + return client, client.operator_private_key def create_topic(client: Client, operator_key: PrivateKey): """ @@ -64,18 +25,18 @@ def create_topic(client: Client, operator_key: PrivateKey): .freeze_with(client) .sign(operator_key) ) - try: receipt = transaction.execute(client) - if receipt and receipt.topic_id: - print(f"Success! Topic created with ID: {receipt.topic_id}") - else: + if receipt.status != ResponseCode.SUCCESS: + print(f"Topic creation failed: {ResponseCode(receipt.status).name}") + raise SystemExit(1) + if not receipt.topic_id: print("Topic creation failed: Topic ID not returned in receipt.") - sys.exit(1) + raise SystemExit(1) + print(f"Success! Topic created with ID: {receipt.topic_id}") except Exception as e: print(f"Topic creation failed: {str(e)}") - sys.exit(1) - + raise SystemExit(1) def main(): """ @@ -84,6 +45,5 @@ def main(): client, operator_key = setup_client() create_topic(client, operator_key) - if __name__ == "__main__": - main() + main() \ No newline at end of file diff --git a/examples/protobuf_round_trip.py b/examples/protobuf_round_trip.py new file mode 100644 index 000000000..c804c935d --- /dev/null +++ b/examples/protobuf_round_trip.py @@ -0,0 +1,100 @@ +""" +Worked Example: Hedera Protobuf Round Trip + +This script demonstrates constructing, serializing, and deserializing +Hedera protobuf messages without connecting to the network. It shows: +- Building a CryptoGetInfoQuery with an AccountID +- Wrapping queries in Hedera's Query envelope +- Serializing and parsing protobuf messages +- Extracting data from parsed responses + +This example accompanies the ProtoBuff Training documentation. +""" +from hiero_sdk_python.hapi.services import ( + query_pb2, + crypto_get_info_pb2, + basic_types_pb2, + response_pb2, +) + + +def build_crypto_get_info_query(account_num: int) -> query_pb2.Query: + crypto_info = crypto_get_info_pb2.CryptoGetInfoQuery() + + account_id = basic_types_pb2.AccountID( + shardNum=0, + realmNum=0, + accountNum=account_num, + ) + + crypto_info.accountID.CopyFrom(account_id) + + query = query_pb2.Query() + query.cryptoGetInfo.CopyFrom(crypto_info) + + return query + + +def serialize_and_parse_query(query: query_pb2.Query) -> query_pb2.Query: + serialized = query.SerializeToString() + + parsed = query_pb2.Query() + parsed.ParseFromString(serialized) + + return parsed + + +def mock_crypto_get_info_response(account_num: int) -> response_pb2.Response: + response = response_pb2.Response() + crypto_resp = response.cryptoGetInfo + + crypto_resp.header.nodeTransactionPrecheckCode = 0 + + crypto_resp.accountInfo.accountID.CopyFrom( + basic_types_pb2.AccountID( + shardNum=0, + realmNum=0, + accountNum=account_num, + ) + ) + + crypto_resp.accountInfo.balance = 100_000 + crypto_resp.accountInfo.deleted = False + + return response + + +def serialize_and_parse_response( + response: response_pb2.Response, +) -> response_pb2.Response: + serialized = response.SerializeToString() + + parsed = response_pb2.Response() + parsed.ParseFromString(serialized) + + return parsed + + +def main(): + query = build_crypto_get_info_query(account_num=1234) + parsed_query = serialize_and_parse_query(query) + + print("Parsed Query:") + print(parsed_query) + + response = mock_crypto_get_info_response(account_num=1234) + parsed_response = serialize_and_parse_response(response) + + print("\nParsed Response:") + print(parsed_response) + + account_info = parsed_response.cryptoGetInfo.accountInfo + + print("\nExtracted Account Info:") + print("Account ID:", account_info.accountID.accountNum) + print("Balance:", account_info.balance) + print("Deleted:", account_info.deleted) + + +if __name__ == "__main__": + main() diff --git a/examples/tokens/token_freeze_transaction.py b/examples/tokens/token_freeze_transaction.py index 6698b0d84..ed755631f 100644 --- a/examples/tokens/token_freeze_transaction.py +++ b/examples/tokens/token_freeze_transaction.py @@ -1,11 +1,11 @@ -# uv run examples/tokens/token_freeze.py -# python examples/tokens/token_freeze.py """ Creates a freezeable token and demonstrates freezing and unfreezing the token for the operator (treasury) account. + uv run examples/tokens/token_freeze_transaction.py python examples/tokens/token_freeze_transaction.py """ + import os import sys @@ -19,12 +19,17 @@ ) - def setup_client(): + """Setup client from environment variables""" client = Client.from_env() + operator_id = client.operator_account_id + operator_key = client.operator_private_key + print(f"Network: {client.network.network}") - print(f"Client set up with operator id {client.operator_account_id}") - return client + print(f"Client set up with operator id {operator_id}") + + return client, operator_id, operator_key + def generate_freeze_key(): """Generate a Freeze Key""" @@ -38,6 +43,7 @@ def create_freezeable_token(client, operator_id, operator_key): """Create a token with the freeze key""" freeze_key = generate_freeze_key() print("\nSTEP 2: Creating a new freezeable token...") + try: tx = ( TokenCreateTransaction() @@ -45,52 +51,53 @@ def create_freezeable_token(client, operator_id, operator_key): .set_token_symbol("FRZ") .set_initial_supply(1000) .set_treasury_account_id(operator_id) - .set_freeze_key(freeze_key) # <-- THE FIX: Pass the private key directly + .set_freeze_key(freeze_key) ) - # Freeze, sign with BOTH operator and the new freeze key, then execute receipt = ( tx.freeze_with(client) .sign(operator_key) - .sign(freeze_key) # The new freeze key must sign to give consent + .sign(freeze_key) .execute(client) ) + token_id = receipt.token_id print(f"✅ Success! Created token with ID: {token_id}") + return freeze_key, token_id, client, operator_id, operator_key - except RuntimeError as e: + + except Exception as e: print(f"❌ Error creating token: {e}") sys.exit(1) def freeze_token(token_id, client, operator_id, freeze_key): - """ - Freeze the token for the operator account. - """ + """Freeze the token for the operator account""" print(f"\nSTEP 3: Freezing token {token_id} for operator account {operator_id}...") + try: receipt = ( TokenFreezeTransaction() .set_token_id(token_id) - .set_account_id(operator_id) # Target the operator account + .set_account_id(operator_id) .freeze_with(client) - .sign(freeze_key) # Must be signed by the freeze key + .sign(freeze_key) .execute(client) ) + print( f"✅ Success! Token freeze complete. Status: {ResponseCode(receipt.status).name}" ) - except RuntimeError as e: + except Exception as e: print(f"❌ Error freezing token: {e}") sys.exit(1) def verify_freeze(token_id, client, operator_id, operator_key): - """Attempt a token transfer to confirm the account - cannot perform the operation while frozen.""" + """Attempt a token transfer to confirm the account is frozen""" print("\nVerifying freeze: Attempting token transfer...") - # Try to transfer 1 token from operator to itself (should fail if frozen) + try: transfer_receipt = ( TransferTransaction() @@ -100,10 +107,10 @@ def verify_freeze(token_id, client, operator_id, operator_key): .sign(operator_key) .execute(client) ) - # Handle status code 165 (ACCOUNT_FROZEN_FOR_TOKEN) and print a clear message - status_code = transfer_receipt.status - status_name = ResponseCode(status_code).name - if status_name in ["ACCOUNT_FROZEN_FOR_TOKEN", "ACCOUNT_FROZEN"]: + + status_name = ResponseCode(transfer_receipt.status).name + + if status_name == "ACCOUNT_FROZEN_FOR_TOKEN": print( f"✅ Verified: Transfer blocked as expected due to freeze. Status: {status_name}" ) @@ -112,9 +119,11 @@ def verify_freeze(token_id, client, operator_id, operator_key): "❌ Error: Transfer succeeded, but should have failed because the account is frozen." ) else: - print(f"❌ Unexpected: Transfer result. Status: {status_name}") - except RuntimeError as e: - print(f"✅ Verified: Transfer failed as expected due to freeze. Error: {e}") + print(f"❌ Unexpected transfer result. Status: {status_name}") + + except Exception as e: + print(f"❌ Error during transfer verification: {e}") + sys.exit(1) def main(): @@ -122,18 +131,16 @@ def main(): 1. Create a freezeable token with a freeze key. 2. Freeze the token for the operator account using the freeze key. 3. Attempt a token transfer to verify the freeze (should fail). - 4. Return token details for further operations.""" - - client = setup_client() - operator_id = client.operator_account_id - operator_key = client.operator_private_key + """ + client, operator_id, operator_key = setup_client() freeze_key, token_id, client, operator_id, operator_key = create_freezeable_token( client, operator_id, operator_key ) + freeze_token(token_id, client, operator_id, freeze_key) verify_freeze(token_id, client, operator_id, operator_key) if __name__ == "__main__": - main() \ No newline at end of file + main() diff --git a/examples/transaction/transaction_freeze_manually.py b/examples/transaction/transaction_freeze_manually.py new file mode 100644 index 000000000..be7e9a8f0 --- /dev/null +++ b/examples/transaction/transaction_freeze_manually.py @@ -0,0 +1,119 @@ +""" +Demonstrates how to manually freeze, serialize, deserialize, +sign, and execute a transaction using hiero_sdk_python. + +uv run examples/transaction/transaction_freeze_manually.py +python examples/transaction/transaction_freeze_manually.py +""" +import os +import sys +from dotenv import load_dotenv + +from hiero_sdk_python import ( + AccountId, + PrivateKey, + TopicCreateTransaction, + TransactionId, + Client, + Network, + Transaction, + ResponseCode +) + +load_dotenv() + +NETWORK_NAME = os.getenv("NETWORK", "testnet").lower() +OPERATOR_ID = os.getenv("OPERATOR_ID") +OPERATOR_KEY = os.getenv("OPERATOR_KEY") +NODE_ACCOUNT_ID = AccountId.from_string("0.0.3") + +def setup_client(): + """ + Initialize and return a Hedera Client using operator credentials. + """ + if not OPERATOR_ID or not OPERATOR_KEY: + raise RuntimeError("OPERATOR_ID or OPERATOR_KEY not set in .env") + + print(f"Connecting to Hedera {NETWORK_NAME} network!") + + try: + client = Client(Network(NETWORK_NAME)) + + operator_id = AccountId.from_string(OPERATOR_ID) + operator_key = PrivateKey.from_string(OPERATOR_KEY) + + client.set_operator(operator_id, operator_key) + + except Exception as exc: + raise RuntimeError(f"Failed to initialize client: {exc}") from exc + + print(f"Client initialized with operator {client.operator_account_id}") + return client + +def build_unsigned_tx(executor_client): + """ + Build a Transaction, manually freeze it for a specific node, and return serialized unsigned bytes. + """ + tx_id = TransactionId.generate(executor_client.operator_account_id) + + tx = ( + TopicCreateTransaction() + .set_memo("Test Topic Creation") + .set_transaction_id(tx_id) + ) + + # Explicit node binding (important for deterministic freeze) + tx.node_account_id = NODE_ACCOUNT_ID + + # Freeze generates a body for ONLY the specified node + tx.freeze() + + print(f"Transaction frozen for node {NODE_ACCOUNT_ID}") + return tx.to_bytes() + +def sign_and_execute(unsigned_bytes, executor_client): + """ + Deserialize, sign, and execute a transaction. + """ + try: + # Deserialize + tx = Transaction.from_bytes(unsigned_bytes) + print("Transaction deserialized (unsigned).") + + # Sign with executor client private key + tx.sign(executor_client.operator_private_key) + print("Transaction signed.") + + receipt = tx.execute(executor_client) + + if receipt.status != ResponseCode.SUCCESS: + raise RuntimeError(f"Transaction failed with status: {ResponseCode(receipt.status).name}") + + print("Transaction executed successfully.") + print("Receipt:", receipt) + + except Exception as exc: + raise RuntimeError(f"Transaction execution failed: {exc}") from exc + +def main(): + """ + 1. Set up a client. + 2. Create a Transaction and explicitly: + - Set the TransactionId + - Set the NodeAccountId (e.g. 0.0.3) + - Call `freeze()` to build the TransactionBody for the specified node + - Serialize the unsigned transaction to bytes + 3. Deserialize the transaction from bytes, sign it, and execute it on the network. + """ + try: + client = setup_client() + unsigned_bytes = build_unsigned_tx(client) + sign_and_execute(unsigned_bytes, client) + + except Exception as exc: + print(f"Error: {exc}") + sys.exit(1) + + +if __name__ == "__main__": + main() diff --git a/examples/transaction/transaction_freeze_secondary_client.py b/examples/transaction/transaction_freeze_secondary_client.py new file mode 100644 index 000000000..146c520d0 --- /dev/null +++ b/examples/transaction/transaction_freeze_secondary_client.py @@ -0,0 +1,149 @@ +""" +Demonstrate Manually freezing using secondary client, serializing, deserializing, signing, +and executing a Hedera transaction using hiero_sdk_python. + +uv run examples/transaction/transaction_freeze_secondary_client.py +python examples/transaction/transaction_freeze_secondary_client.py +""" + +import os +import sys +from dotenv import load_dotenv + +from hiero_sdk_python import ( + AccountId, + PrivateKey, + TopicCreateTransaction, + TransactionId, + Client, + Network, + Transaction, + AccountCreateTransaction, + ResponseCode +) + + +load_dotenv() + +NETWORK_NAME = os.getenv("NETWORK", "testnet").lower() +OPERATOR_ID = os.getenv("OPERATOR_ID") +OPERATOR_KEY = os.getenv("OPERATOR_KEY") + + +def setup_client(): + """ + Initialize and return the primary Hedera client using operator credentials. + """ + if not OPERATOR_ID or not OPERATOR_KEY: + raise RuntimeError("OPERATOR_ID or OPERATOR_KEY not set in .env") + + print(f"Connecting to Hedera {NETWORK_NAME} network!") + + try: + client = Client(Network(NETWORK_NAME)) + + operator_id = AccountId.from_string(OPERATOR_ID) + operator_key = PrivateKey.from_string(OPERATOR_KEY) + + client.set_operator(operator_id, operator_key) + + except Exception as exc: + raise RuntimeError(f"Failed to initialize client: {exc}") from exc + + print(f"Client initialized with operator {client.operator_account_id}") + return client + +def create_secondary_client(executor_client): + """ + Create a secondary account and client. + """ + private_key = PrivateKey.generate() + + receipt = ( + AccountCreateTransaction() + .set_key_without_alias(private_key) + .freeze_with(executor_client) + .sign(executor_client.operator_private_key) + .execute(executor_client) + ) + + account_id = receipt.account_id + print(f"Secondary account created: {account_id}") + + secondary_client = Client(Network(NETWORK_NAME)) + secondary_client.set_operator(account_id, private_key) + + return secondary_client + +def build_unsigned_bytes(executor_client, secondary_client): + """ + Build a TopicCreateTransaction, manually freeze it using a secondary client, + and return the serialized unsigned transaction bytes. + """ + tx_id = TransactionId.generate(executor_client.operator_account_id) + + tx = ( + TopicCreateTransaction() + .set_memo("Test Topic Creation") + .set_transaction_id(tx_id) + ) + + # Manually freeze the transaction using the secondary client + tx.freeze_with(secondary_client) + + unsigned_bytes = tx.to_bytes() + print(f"Transaction frozen and serialized ({len(unsigned_bytes)} bytes).") + + return unsigned_bytes + +def sign_and_execute(unsigned_bytes, executor_client): + """ + Deserialize a transaction from bytes, sign it using the executor client, + and execute it on the Hedera network. + """ + try: + tx = Transaction.from_bytes(unsigned_bytes) + print("Transaction deserialized (unsigned).") + + tx.sign(executor_client.operator_private_key) + print("Transaction signed by executor.") + + receipt = tx.execute(executor_client) + if receipt.status != ResponseCode.SUCCESS: + raise RuntimeError(f"Transaction failed with status: {ResponseCode(receipt.status).name}") + + print("Transaction executed successfully.") + print("Receipt:", receipt) + + except Exception as exc: + raise RuntimeError(f"Transaction execution failed: {exc}") from exc + + +def main(): + """ + 1. Setup an executor client. + 2. Created secondary client used to manually freeze a transaction. + 3. Create a Transaction and explicitly: + - Set the TransactionId + - Call `freezeWith()` to build the TransactionBody for the specified node with secondary client + - Serialize the unsigned transaction to bytes + 4. Deserialize the transaction from bytes, sign it, and execute it on the network. + """ + try: + executor_client = setup_client() + secondary_client = create_secondary_client(executor_client) + + unsigned_bytes = build_unsigned_bytes( + executor_client, + secondary_client, + ) + + sign_and_execute(unsigned_bytes, executor_client) + + except Exception as exc: + print(f"Error: {exc}") + sys.exit(1) + + +if __name__ == "__main__": + main() diff --git a/examples/transaction/transaction_freeze_without_operator.py b/examples/transaction/transaction_freeze_without_operator.py new file mode 100644 index 000000000..b84b52d69 --- /dev/null +++ b/examples/transaction/transaction_freeze_without_operator.py @@ -0,0 +1,135 @@ +""" +Demonstrate manually freezing with client having no operator set, +serializing, signing, and executing a transaction. + +uv run examples/transaction/transaction_freeze_without_operator.py +python examples/transaction/transaction_freeze_without_operator.py +""" +import os +import sys +from dotenv import load_dotenv + +from hiero_sdk_python import ( + AccountId, + PrivateKey, + TopicCreateTransaction, + TransactionId, + Client, + Network, + Transaction, + ResponseCode +) + + +load_dotenv() + +NETWORK_NAME = os.getenv("NETWORK", "testnet").lower() +OPERATOR_ID = os.getenv("OPERATOR_ID") +OPERATOR_KEY = os.getenv("OPERATOR_KEY") + + +def setup_client(): + """ + Initialize and return the primary Hedera client using operator credentials. + """ + if not OPERATOR_ID or not OPERATOR_KEY: + raise RuntimeError("OPERATOR_ID or OPERATOR_KEY not set in .env") + + print(f"Connecting to Hedera {NETWORK_NAME} network!") + + try: + client = Client(Network(NETWORK_NAME)) + + operator_id = AccountId.from_string(OPERATOR_ID) + operator_key = PrivateKey.from_string(OPERATOR_KEY) + + client.set_operator(operator_id, operator_key) + + except Exception as exc: + raise RuntimeError(f"Failed to initialize client: {exc}") from exc + + print(f"Client initialized with operator {client.operator_account_id}") + return client + + + +def create_client_without_operator(): + """ + Create a client without an operator. + """ + secondary_client = Client(Network(NETWORK_NAME)) + + return secondary_client + +def build_unsigned_bytes(executor_client, secondary_client): + """ + Build a TopicCreateTransaction, manually freeze it using a secondary client, + and return the serialized unsigned transaction bytes. + """ + tx_id = TransactionId.generate(executor_client.operator_account_id) + + tx = ( + TopicCreateTransaction() + .set_memo("Test Topic Creation") + .set_transaction_id(tx_id) + ) + + # Manually freeze the transaction using the secondary client having no operator + tx.freeze_with(secondary_client) + + unsigned_bytes = tx.to_bytes() + print(f"Transaction frozen and serialized ({len(unsigned_bytes)} bytes).") + + return unsigned_bytes + +def sign_and_execute(unsigned_bytes, executor_client): + """ + Deserialize a transaction from bytes, sign it using the executor client, + and execute it on the Hedera network. + """ + try: + tx = Transaction.from_bytes(unsigned_bytes) + print("Transaction deserialized (unsigned).") + + tx.sign(executor_client.operator_private_key) + print("Transaction signed by executor.") + + receipt = tx.execute(executor_client) + if receipt.status != ResponseCode.SUCCESS: + raise RuntimeError(f"Transaction failed with status: {ResponseCode(receipt.status).name}") + + print("Transaction executed successfully.") + print("Receipt:", receipt) + + except Exception as exc: + raise RuntimeError(f"Transaction execution failed: {exc}") from exc + + +def main(): + """ + 1. Setup an executor client. + 2. Create a secondary client without an operator. + 3. Create a Transaction and explicitly: + - Set the TransactionId + - Call `freezeWith()` to build the TransactionBody for the specified node with client without operator + - Serialize the unsigned transaction to bytes + 4. Deserialize the transaction from bytes, sign it, and execute it on the network. + """ + try: + executor_client = setup_client() + secondary_client = create_client_without_operator() + + unsigned_bytes = build_unsigned_bytes( + executor_client, + secondary_client, + ) + + sign_and_execute(unsigned_bytes, executor_client) + + except Exception as exc: + print(f"Error: {exc}") + sys.exit(1) + + +if __name__ == "__main__": + main() diff --git a/src/hiero_sdk_python/__init__.py b/src/hiero_sdk_python/__init__.py index 27d7d74b4..10519418d 100644 --- a/src/hiero_sdk_python/__init__.py +++ b/src/hiero_sdk_python/__init__.py @@ -51,6 +51,7 @@ from .tokens.token_unpause_transaction import TokenUnpauseTransaction from .tokens.token_pause_transaction import TokenPauseTransaction from .tokens.token_airdrop_claim import TokenClaimAirdropTransaction +from .tokens.assessed_custom_fee import AssessedCustomFee # Transaction from .transaction.transaction import Transaction @@ -70,6 +71,7 @@ # Timestamp from .timestamp import Timestamp +from .staking_info import StakingInfo # Duration from .Duration import Duration @@ -201,6 +203,7 @@ "HbarTransfer", "TokenPauseTransaction", "TokenUnpauseTransaction", + "AssessedCustomFee", # Transaction "Transaction", @@ -245,6 +248,7 @@ "ResponseCode", "Timestamp", "Duration", + "StakingInfo", # File "FileCreateTransaction", diff --git a/src/hiero_sdk_python/account/account_id.py b/src/hiero_sdk_python/account/account_id.py index ee343dd4c..35535689a 100644 --- a/src/hiero_sdk_python/account/account_id.py +++ b/src/hiero_sdk_python/account/account_id.py @@ -3,14 +3,17 @@ """ import re -from typing import TYPE_CHECKING +from typing import TYPE_CHECKING, Optional, Union +from hiero_sdk_python.crypto.evm_address import EvmAddress from hiero_sdk_python.crypto.public_key import PublicKey from hiero_sdk_python.hapi.services import basic_types_pb2 from hiero_sdk_python.utils.entity_id_helper import ( parse_from_string, validate_checksum, - format_to_string_with_checksum + format_to_string_with_checksum, + perform_query_to_mirror_node, + to_solidity_address, ) if TYPE_CHECKING: @@ -18,6 +21,7 @@ ALIAS_REGEX = re.compile(r"^(0|[1-9]\d*)\.(0|[1-9]\d*)\.((?:[0-9a-fA-F][0-9a-fA-F])+)$") + class AccountId: """ Represents an account ID on the network. @@ -28,12 +32,17 @@ class AccountId: The standard format is `..`, e.g., `0.0.10`. In addition to the account number, the account component can also be an alias: - - An alias can be either a public key (ED25519 or ECDSA) - - The alias format is `..`, where `alias` is the public key + - An alias can be either a public key (ED25519 or ECDSA) or an EVM address (20 bytes) + - The alias format is `..`, where `alias` is the public key or evm address """ def __init__( - self, shard: int = 0, realm: int = 0, num: int = 0, alias_key: PublicKey = None + self, + shard: int = 0, + realm: int = 0, + num: int = 0, + alias_key: Optional[PublicKey] = None, + evm_address: Optional[EvmAddress] = None, ) -> None: """ Initialize a new AccountId instance. @@ -42,48 +51,140 @@ def __init__( realm (int): The realm number of the account. num (int): The account number. alias_key (PublicKey): The public key of the account. + evm_address (EvmAddress): The public evm_address of the account. """ self.shard = shard self.realm = realm self.num = num self.alias_key = alias_key + self.evm_address = evm_address self.__checksum: str | None = None @classmethod def from_string(cls, account_id_str: str) -> "AccountId": """ - Creates an AccountId instance from a string in the format 'shard.realm.num'. + Creates an AccountId instance from a string. + Supported formats: + - shard.realm.num + - shard.realm.num-checksum + - shard.realm. + - 0x-prefixed or raw 20-byte hex EVM address + + Args: + account_id_str (str): Account ID string + + Returns: + AccountId: An instance of AccountId + + Raises: + ValueError: If the string format is invalid """ if account_id_str is None or not isinstance(account_id_str, str): - raise ValueError(f"Invalid account ID string '{account_id_str}'. Expected format 'shard.realm.num'.") + raise TypeError( + f"account_id_str must be a string, got {type(account_id_str).__name__}." + ) + + if cls._is_evm_address(account_id_str): + # Detect EVM address input (raw 20-byte hex or 0x-prefixed). + # EVM addresses do not encode shard or realm information, so both + # values default to 0. The numeric account ID can later be resolved + # via the mirror node using populate_account_num(). + return cls.from_evm_address(account_id_str, 0, 0) try: shard, realm, num, checksum = parse_from_string(account_id_str) account_id: AccountId = cls( - shard=int(shard), - realm=int(realm), - num=int(num) + shard=int(shard), realm=int(realm), num=int(num) ) account_id.__checksum = checksum return account_id except Exception as e: alias_match = ALIAS_REGEX.match(account_id_str) - + if alias_match: shard, realm, alias = alias_match.groups() + alias_bytes = bytes.fromhex(alias) + + is_evm_address = len(alias_bytes) == 20 + + # num is set to 0 because the numeric account ID is unknown at creation time. + # It can later be populated via the mirror node using populate_account_num(). return cls( shard=int(shard), realm=int(realm), num=0, - alias_key=PublicKey.from_bytes(bytes.fromhex(alias)) + alias_key=( + PublicKey.from_bytes(alias_bytes) + if not is_evm_address + else None + ), + evm_address=( + EvmAddress.from_bytes(alias_bytes) if is_evm_address else None + ), ) - + raise ValueError( - f"Invalid account ID string '{account_id_str}'. Expected format 'shard.realm.num'." + f"Invalid account ID string '{account_id_str}'." + "Supported formats: " + "'shard.realm.num', " + "'shard.realm.num-checksum', " + "'shard.realm.', " + "or a 20-byte EVM address." ) from e + @classmethod + def from_evm_address( + cls, evm_address: Union[str, EvmAddress], shard: int, realm: int + ) -> "AccountId": + """ + Create an AccountId from an EVM address. + In case shard and realm are unknown, they should be set to zero + + Args: + evm_address (Union[str, EvmAddress]): EVM address string or object + shard (int): Shard number + realm (int): Realm number + + Returns: + AccountId: An instance of AccountId + """ + if evm_address is None: + raise ValueError("evm_address must not be None") + + if isinstance(evm_address, str): + try: + evm_address = EvmAddress.from_string(evm_address) + except Exception as e: + raise ValueError(f"Invalid EVM address string: {evm_address}") from e + + elif not isinstance(evm_address, EvmAddress): + raise TypeError( + f"evm_address must be a str or EvmAddress, got {type(evm_address).__name__}" + ) + + return cls( + shard=shard, + realm=realm, + num=0, # numeric account ID unknown at creation time + alias_key=None, + evm_address=evm_address, + ) + + @classmethod + def from_bytes(cls, data: bytes) -> "AccountId": + """ + Deserialize an AccountId from protobuf-encoded bytes. + + Args: + data (bytes): Protobuf bytes + + Returns: + AccountId: An instance of AccountId + """ + return cls._from_proto(basic_types_pb2.AccountID.FromString(data)) + @classmethod def _from_proto(cls, account_id_proto: basic_types_pb2.AccountID) -> "AccountId": """ @@ -101,8 +202,13 @@ def _from_proto(cls, account_id_proto: basic_types_pb2.AccountID) -> "AccountId" num=account_id_proto.accountNum, ) if account_id_proto.alias: - alias = account_id_proto.alias[2:] # remove 0x prefix - result.alias_key = PublicKey.from_bytes(alias) + alias = account_id_proto.alias + if len(alias) == 20: + result.evm_address = EvmAddress.from_bytes(alias) + else: + alias = alias[2:] # remove 2 bytes, i.e prefix + result.alias_key = PublicKey.from_bytes(alias) + return result def _to_proto(self) -> basic_types_pb2.AccountID: @@ -121,6 +227,8 @@ def _to_proto(self) -> basic_types_pb2.AccountID: if self.alias_key: key = self.alias_key._to_proto().SerializeToString() account_id_proto.alias = key + elif self.evm_address: + account_id_proto.alias = self.evm_address.address_bytes return account_id_proto @@ -131,8 +239,10 @@ def checksum(self) -> str | None: def validate_checksum(self, client: "Client") -> None: """Validate the checksum for the accountId""" - if self.alias_key is not None: - raise ValueError("Cannot calculate checksum with an account ID that has a aliasKey") + if self.alias_key is not None or self.evm_address is not None: + raise ValueError( + "Cannot calculate checksum with an account ID that has a aliasKey or evmAddress" + ) validate_checksum( self.shard, @@ -142,29 +252,136 @@ def validate_checksum(self, client: "Client") -> None: client, ) + @staticmethod + def _is_evm_address(value: str) -> bool: + """Check if the given string value is an evm_address""" + if value.startswith("0x"): + value = value[2:] + + if len(value) != 40: + return False + + try: + bytes.fromhex(value) + except ValueError: + return False + + return True + def __str__(self) -> str: """ Returns the string representation of the AccountId in 'shard.realm.num' format. """ if self.alias_key: return f"{self.shard}.{self.realm}.{self.alias_key.to_string()}" + if self.evm_address: + return f"{self.shard}.{self.realm}.{self.evm_address.to_string()}" return f"{self.shard}.{self.realm}.{self.num}" def to_string_with_checksum(self, client: "Client") -> str: """ - Returns the string representation of the AccountId with checksum + Returns the string representation of the AccountId with checksum in 'shard.realm.num-checksum' format. """ - if self.alias_key is not None: - raise ValueError("Cannot calculate checksum with an account ID that has a aliasKey") + if self.alias_key is not None or self.evm_address is not None: + raise ValueError( + "Cannot calculate checksum with an account ID that has a aliasKey or evmAddress" + ) - return format_to_string_with_checksum( - self.shard, - self.realm, - self.num, - client + return format_to_string_with_checksum(self.shard, self.realm, self.num, client) + + def populate_account_num(self, client: "Client") -> "AccountId": + """ + Populate the numeric account ID using the Mirror Node. + Intended for AccountIds created from EVM addresses. + + Args: + client (Client): Client configured with a mirror network. + + Returns: + AccountId: New instance with the resolved account num. + + Raises: + ValueError: If no EVM address is present or the response is invalid. + RuntimeError: If the mirror node request fails. + """ + if not self.evm_address: + raise ValueError("Account evm_address is required before populating num") + + url = f"{client.network.get_mirror_rest_url()}/accounts/{self.evm_address.to_string()}" + + try: + data = perform_query_to_mirror_node(url) + + account_id = data.get("account") + if not account_id: + raise ValueError("Mirror node response missing 'account'") + + except RuntimeError as e: + raise RuntimeError( + "Failed to populate account number from mirror node for evm_address " + f"{self.evm_address.to_string()}" + ) from e + + try: + num = int(account_id.split(".")[-1]) + return AccountId( + shard=self.shard, + realm=self.realm, + num=num, + evm_address=self.evm_address, + ) + except (ValueError, AttributeError) as e: + raise ValueError(f"Invalid account format received: {account_id}") from e + + def populate_evm_address(self, client: "Client") -> "AccountId": + """ + Populate the EVM address using the Mirror Node. + + This method requires the AccountId to contain a num. + + Args: + client (Client): Client configured with a mirror network. + + Returns: + AccountId: New instance with the resolved account num. + + Raises: + ValueError: If no Account num is present or the response is invalid. + RuntimeError: If the mirror node request fails. + """ + if self.num is None or self.num == 0: + raise ValueError("Account number is required before populating evm_address") + + url = f"{client.network.get_mirror_rest_url()}/accounts/{self.num}" + try: + data = perform_query_to_mirror_node(url) + + evm_addr = data.get("evm_address") + if not evm_addr: + raise ValueError("Mirror node response missing 'evm_address'") + + except RuntimeError as e: + raise RuntimeError( + f"Failed to populate evm_address from mirror node for account {self.num}" + ) from e + + evm_address = EvmAddress.from_string(evm_addr) + return AccountId( + shard=self.shard, realm=self.realm, num=self.num, evm_address=evm_address ) + def to_evm_address(self) -> str: + """Return the EVM-compatible address for this account. Using account num""" + if self.evm_address: + return self.evm_address.to_string() + + return to_solidity_address(self.shard, self.realm, self.num) + + def to_bytes(self) -> bytes: + """Serialize this AccountId to protobuf bytes.""" + return self._to_proto().SerializeToString() + def __repr__(self) -> str: """ Returns the repr representation of the AccountId. @@ -174,6 +391,11 @@ def __repr__(self) -> str: f"AccountId(shard={self.shard}, realm={self.realm}, " f"alias_key={self.alias_key.to_string_raw()})" ) + if self.evm_address: + return ( + f"AccountId(shard={self.shard}, realm={self.realm}, " + f"evm_address={self.evm_address.to_string()})" + ) return f"AccountId(shard={self.shard}, realm={self.realm}, num={self.num})" def __eq__(self, other: object) -> bool: @@ -186,13 +408,14 @@ def __eq__(self, other: object) -> bool: """ if not isinstance(other, AccountId): return False - return (self.shard, self.realm, self.num, self.alias_key) == ( + return (self.shard, self.realm, self.num, self.alias_key, self.evm_address) == ( other.shard, other.realm, other.num, other.alias_key, + other.evm_address, ) def __hash__(self) -> int: """Returns a hash value for the AccountId instance.""" - return hash((self.shard, self.realm, self.num, self.alias_key)) + return hash((self.shard, self.realm, self.num)) diff --git a/src/hiero_sdk_python/client/client.py b/src/hiero_sdk_python/client/client.py index 93c3ddda8..928c2e475 100644 --- a/src/hiero_sdk_python/client/client.py +++ b/src/hiero_sdk_python/client/client.py @@ -3,8 +3,10 @@ """ from decimal import Decimal +import math import os from typing import NamedTuple, List, Union, Optional, Literal +import warnings from dotenv import load_dotenv import grpc @@ -22,17 +24,26 @@ DEFAULT_MAX_QUERY_PAYMENT = Hbar(1) +DEFAULT_GRPC_DEADLINE = 10 # seconds +DEFAULT_REQUEST_TIMEOUT = 120 # seconds +DEFAULT_MAX_BACKOFF = 8 # seconds +DEFAULT_MIN_BACKOFF = 0.25 # seconds + NetworkName = Literal["mainnet", "testnet", "previewnet"] + class Operator(NamedTuple): """A named tuple for the operator's account ID and private key.""" + account_id: AccountId private_key: PrivateKey + class Client: """ Client to interact with Hedera network services including mirror nodes and transactions. """ + def __init__(self, network: Network = None) -> None: """ Initializes the Client with a given network configuration. @@ -43,6 +54,7 @@ def __init__(self, network: Network = None) -> None: if network is None: network = Network() + self.network: Network = network self.mirror_channel: grpc.Channel = None @@ -51,6 +63,12 @@ def __init__(self, network: Network = None) -> None: self.max_attempts: int = 10 self.default_max_query_payment: Hbar = DEFAULT_MAX_QUERY_PAYMENT + self._min_backoff: float = DEFAULT_MIN_BACKOFF + self._max_backoff: float = DEFAULT_MAX_BACKOFF + + self._grpc_deadline: float = DEFAULT_GRPC_DEADLINE + self._request_timeout: float = DEFAULT_REQUEST_TIMEOUT + self._init_mirror_stub() self.logger: Logger = Logger(LogLevel.from_env(), "hiero_sdk_python") @@ -63,7 +81,7 @@ def from_env(cls, network: Optional[NetworkName] = None) -> "Client": Args: network (str, optional): Override the network ("testnet", "mainnet", "previewnet"). - If not provided, checks 'NETWORK' env var. + If not provided, checks 'NETWORK' env var. Defaults to 'testnet' if neither is set. Raises: @@ -74,14 +92,14 @@ def from_env(cls, network: Optional[NetworkName] = None) -> "Client": client = Client.from_env() """ load_dotenv() - + if network: network_name = network else: - network_name = os.getenv('NETWORK') or 'testnet' + network_name = os.getenv("NETWORK") or "testnet" network_name = network_name.lower() - + try: client = cls(Network(network_name)) except ValueError: @@ -91,9 +109,13 @@ def from_env(cls, network: Optional[NetworkName] = None) -> "Client": operator_key_str = os.getenv("OPERATOR_KEY") if not operator_id_str: - raise ValueError("OPERATOR_ID environment variable is required for Client.from_env()") + raise ValueError( + "OPERATOR_ID environment variable is required for Client.from_env()" + ) if not operator_key_str: - raise ValueError("OPERATOR_KEY environment variable is required for Client.from_env()") + raise ValueError( + "OPERATOR_KEY environment variable is required for Client.from_env()" + ) operator_id = AccountId.from_string(operator_id_str) operator_key = PrivateKey.from_string(operator_key_str) @@ -106,7 +128,7 @@ def from_env(cls, network: Optional[NetworkName] = None) -> "Client": def for_testnet(cls) -> "Client": """ Create a Client configured for Hedera Testnet. - + Note: Operator must be set manually using set_operator(). Returns: @@ -118,7 +140,7 @@ def for_testnet(cls) -> "Client": def for_mainnet(cls) -> "Client": """ Create a Client configured for Hedera Mainnet. - + Note: Operator must be set manually using set_operator(). Returns: @@ -130,7 +152,7 @@ def for_mainnet(cls) -> "Client": def for_previewnet(cls) -> "Client": """ Create a Client configured for Hedera Previewnet. - + Note: Operator must be set manually using set_operator(). Returns: @@ -145,11 +167,15 @@ def _init_mirror_stub(self) -> None: for a configurable mirror address, which should use port 443 for HTTPS connections. """ mirror_address = self.network.get_mirror_address() - if mirror_address.endswith(':50212') or mirror_address.endswith(':443'): - self.mirror_channel = grpc.secure_channel(mirror_address, grpc.ssl_channel_credentials()) + if mirror_address.endswith(":50212") or mirror_address.endswith(":443"): + self.mirror_channel = grpc.secure_channel( + mirror_address, grpc.ssl_channel_credentials() + ) else: self.mirror_channel = grpc.insecure_channel(mirror_address) - self.mirror_stub = mirror_consensus_grpc.ConsensusServiceStub(self.mirror_channel) + self.mirror_stub = mirror_consensus_grpc.ConsensusServiceStub( + self.mirror_channel + ) def set_operator(self, account_id: AccountId, private_key: PrivateKey) -> None: """ @@ -159,14 +185,15 @@ def set_operator(self, account_id: AccountId, private_key: PrivateKey) -> None: self.operator_private_key = private_key @property - def operator(self) -> Union[Operator,None]: + def operator(self) -> Union[Operator, None]: """ Returns an Operator namedtuple if both account ID and private key are set, otherwise None. """ if self.operator_account_id and self.operator_private_key: return Operator( - account_id=self.operator_account_id, private_key=self.operator_private_key + account_id=self.operator_account_id, + private_key=self.operator_private_key, ) return None @@ -175,7 +202,9 @@ def generate_transaction_id(self) -> TransactionId: Generates a new transaction ID, requiring that the operator_account_id is set. """ if self.operator_account_id is None: - raise ValueError("Operator account ID must be set to generate transaction ID.") + raise ValueError( + "Operator account ID must be set to generate transaction ID." + ) return TransactionId.generate(self.operator_account_id) def get_node_account_ids(self) -> List[AccountId]: @@ -183,7 +212,9 @@ def get_node_account_ids(self) -> List[AccountId]: Returns a list of node AccountIds that the client can use to send queries and transactions. """ if self.network and self.network.nodes: - return [node._account_id for node in self.network.nodes] # pylint: disable=W0212 + return [ + node._account_id for node in self.network.nodes + ] # pylint: disable=W0212 raise ValueError("No nodes available in the network configuration.") def close(self) -> None: @@ -201,7 +232,7 @@ def close(self) -> None: def set_transport_security(self, enabled: bool) -> "Client": """ Enable or disable TLS for consensus node connections. - + Note: TLS is enabled by default for hosted networks (mainnet, testnet, previewnet). For local networks (solo, localhost) and custom networks, TLS is disabled by default. @@ -219,7 +250,7 @@ def is_transport_security(self) -> bool: def set_verify_certificates(self, verify: bool) -> "Client": """ Enable or disable verification of server certificates when TLS is enabled. - + Note: Certificate verification is enabled by default for all networks. Use this method to disable verification (e.g., for testing with self-signed certificates). @@ -245,8 +276,10 @@ def get_tls_root_certificates(self) -> Optional[bytes]: Retrieve the configured root certificates for TLS connections. """ return self.network.get_tls_root_certificates() - - def set_default_max_query_payment(self, max_query_payment: Union[int, float, Decimal, Hbar]) -> "Client": + + def set_default_max_query_payment( + self, max_query_payment: Union[int, float, Decimal, Hbar] + ) -> "Client": """ Sets the default maximum Hbar amount allowed for any query executed by this client. @@ -259,14 +292,16 @@ def set_default_max_query_payment(self, max_query_payment: Union[int, float, Dec Returns: Client: The current client instance for method chaining. """ - if isinstance(max_query_payment, bool) or not isinstance(max_query_payment, (int, float, Decimal, Hbar)): + if isinstance(max_query_payment, bool) or not isinstance( + max_query_payment, (int, float, Decimal, Hbar) + ): raise TypeError( "max_query_payment must be int, float, Decimal, or Hbar, " f"got {type(max_query_payment).__name__}" ) - + value = ( - max_query_payment + max_query_payment if isinstance(max_query_payment, Hbar) else Hbar(max_query_payment) ) @@ -277,6 +312,156 @@ def set_default_max_query_payment(self, max_query_payment: Union[int, float, Dec self.default_max_query_payment = value return self + def set_max_attempts(self, max_attempts: int) -> "Client": + """ + Set the maximum number of execution attempts for all transactions and queries + executed by this client. + + Args: + max_attempts (int): Maximum number of attempts. Must be a positive integer. + + Returns: + Client: This client instance for fluent chaining. + """ + if isinstance(max_attempts, bool) or not isinstance(max_attempts, int): + raise TypeError( + f"max_attempts must be of type int, got {(type(max_attempts).__name__)}" + ) + + if max_attempts <= 0: + raise ValueError("max_attempts must be greater than 0") + + self.max_attempts = max_attempts + return self + + def set_grpc_deadline(self, grpc_deadline: Union[int, float]) -> "Client": + """ + Set the gRPC deadline (per-request timeout) used for all network calls + made by this client. + + The deadline represents the maximum time (in seconds) allowed for an + individual gRPC request to complete before it is cancelled by the client. + + Args: + grpc_deadline (Union[int, float]): gRPC deadline in seconds. + Must be greater than zero. + + Returns: + Client: This client instance for fluent chaining. + """ + if isinstance(grpc_deadline, bool) or not isinstance( + grpc_deadline, (float, int) + ): + raise TypeError( + f"grpc_deadline must be of type Union[int, float], got {type(grpc_deadline).__name__}" + ) + + if not math.isfinite(grpc_deadline) or grpc_deadline <= 0: + raise ValueError("grpc_deadline must be a finite value greater than 0") + + if grpc_deadline > self._request_timeout: + warnings.warn( + "grpc_deadline should be smaller than request_timeout. " + "This configuration may cause operations to fail unexpectedly.", + UserWarning, + ) + + self._grpc_deadline = float(grpc_deadline) + return self + + def set_request_timeout(self, request_timeout: Union[int, float]) -> "Client": + """ + Set the total execution timeout for a single transaction or query + made by this client. + + This timeout represents the maximum wall-clock time (in seconds) allowed + for the entire execution lifecycle, including retries and backoff delays. + Once exceeded, the request fails with a TimeoutError. + + Args: + request_timeout (Union[int, float]): Total execution timeout in seconds. + Must be greater than zero. + + Returns: + Client: This client instance for fluent chaining. + """ + if isinstance(request_timeout, bool) or not isinstance( + request_timeout, (float, int) + ): + raise TypeError( + f"request_timeout must be of type Union[int, float], got {type(request_timeout).__name__}" + ) + + if not math.isfinite(request_timeout) or request_timeout <= 0: + raise ValueError("request_timeout must be a finite value greater than 0") + + if request_timeout < self._grpc_deadline: + warnings.warn( + "request_timeout should be larger than grpc_deadline. " + "This configuration may cause operations to fail unexpectedly.", + UserWarning, + ) + + self._request_timeout = float(request_timeout) + return self + + def set_min_backoff(self, min_backoff: Union[int, float]) -> "Client": + """ + Set the minimum backoff delay used between retry attempts. + + Args: + min_backoff (Union[int, float]): Minimum backoff delay in seconds. + Must be finite and non-negative. + + Returns: + Client: This client instance for fluent chaining. + """ + if isinstance(min_backoff, bool) or not isinstance(min_backoff, (int, float)): + raise TypeError( + f"min_backoff must be of type int or float, got {(type(min_backoff).__name__)}" + ) + + if not math.isfinite(min_backoff) or min_backoff < 0: + raise ValueError("min_backoff must be a finite value >= 0") + + if self._max_backoff is not None and min_backoff > self._max_backoff: + raise ValueError("min_backoff cannot exceed max_backoff") + + self._min_backoff = float(min_backoff) + return self + + def set_max_backoff(self, max_backoff: Union[int, float]) -> "Client": + """ + Set the maximum backoff delay used between retry attempts. + + Args: + max_backoff (Union[int, float]): Maximum backoff delay in seconds. + Must be finite and greater than or equal to min_backoff. + + Returns: + Client: This client instance for fluent chaining. + """ + if isinstance(max_backoff, bool) or not isinstance(max_backoff, (int, float)): + raise TypeError( + f"max_backoff must be of type int or float, got {(type(max_backoff).__name__)}" + ) + + if not math.isfinite(max_backoff) or max_backoff < 0: + raise ValueError("max_backoff must be a finite value >= 0") + + if self._min_backoff is not None and max_backoff < self._min_backoff: + raise ValueError("max_backoff cannot be less than min_backoff") + + self._max_backoff = float(max_backoff) + return self + + def update_network(self) -> "Client": + """ + Refresh the network node list from the mirror node. + """ + self.network._set_network_nodes() + return self + def __enter__(self) -> "Client": """ Allows the Client to be used in a 'with' statement for automatic resource management. @@ -288,4 +473,4 @@ def __exit__(self, exc_type, exc_value, traceback) -> None: """ Automatically close channels when exiting 'with' block. """ - self.close() \ No newline at end of file + self.close() diff --git a/src/hiero_sdk_python/client/network.py b/src/hiero_sdk_python/client/network.py index e77295fbf..406a8cc5e 100644 --- a/src/hiero_sdk_python/client/network.py +++ b/src/hiero_sdk_python/client/network.py @@ -1,5 +1,7 @@ """Network module for managing Hedera SDK connections.""" + import secrets +import time from typing import Dict, List, Optional, Any, Tuple import requests @@ -9,30 +11,29 @@ from hiero_sdk_python.node import _Node - class Network: """ Manages the network configuration for connecting to the Hedera network. """ # Mirror node gRPC addresses (always use TLS, port 443 for HTTPS) - MIRROR_ADDRESS_DEFAULT: Dict[str,str] = { - 'mainnet': 'mainnet.mirrornode.hedera.com:443', - 'testnet': 'testnet.mirrornode.hedera.com:443', - 'previewnet': 'previewnet.mirrornode.hedera.com:443', - 'solo': 'localhost:5600' # Local development only + MIRROR_ADDRESS_DEFAULT: Dict[str, str] = { + "mainnet": "mainnet.mirrornode.hedera.com:443", + "testnet": "testnet.mirrornode.hedera.com:443", + "previewnet": "previewnet.mirrornode.hedera.com:443", + "solo": "localhost:5600", # Local development only } # Mirror node REST API base URLs (HTTPS for production networks, HTTP for localhost) - MIRROR_NODE_URLS: Dict[str,str] = { - 'mainnet': 'https://mainnet-public.mirrornode.hedera.com', - 'testnet': 'https://testnet.mirrornode.hedera.com', - 'previewnet': 'https://previewnet.mirrornode.hedera.com', - 'solo': 'http://localhost:8080' # Local development only + MIRROR_NODE_URLS: Dict[str, str] = { + "mainnet": "https://mainnet-public.mirrornode.hedera.com", + "testnet": "https://testnet.mirrornode.hedera.com", + "previewnet": "https://previewnet.mirrornode.hedera.com", + "solo": "http://localhost:5551", # Local development only } - DEFAULT_NODES: Dict[str,List[_Node]] = { - 'mainnet': [ + DEFAULT_NODES: Dict[str, List[_Node]] = { + "mainnet": [ ("35.237.200.180:50211", AccountId(0, 0, 3)), ("35.186.191.247:50211", AccountId(0, 0, 4)), ("35.192.2.25:50211", AccountId(0, 0, 5)), @@ -46,42 +47,36 @@ class Network: ("35.234.132.107:50211", AccountId(0, 0, 13)), ("35.236.2.27:50211", AccountId(0, 0, 14)), ], - 'testnet': [ + "testnet": [ ("0.testnet.hedera.com:50211", AccountId(0, 0, 3)), ("1.testnet.hedera.com:50211", AccountId(0, 0, 4)), ("2.testnet.hedera.com:50211", AccountId(0, 0, 5)), ("3.testnet.hedera.com:50211", AccountId(0, 0, 6)), ], - 'previewnet': [ + "previewnet": [ ("0.previewnet.hedera.com:50211", AccountId(0, 0, 3)), ("1.previewnet.hedera.com:50211", AccountId(0, 0, 4)), ("2.previewnet.hedera.com:50211", AccountId(0, 0, 5)), ("3.previewnet.hedera.com:50211", AccountId(0, 0, 6)), ], - 'solo': [ - ("localhost:50211", AccountId(0, 0, 3)) - ], - 'localhost': [ - ("localhost:50211", AccountId(0, 0, 3)) - ], - 'local': [ - ("localhost:50211", AccountId(0, 0, 3)) - ], + "solo": [("localhost:50211", AccountId(0, 0, 3))], + "localhost": [("localhost:50211", AccountId(0, 0, 3))], + "local": [("localhost:50211", AccountId(0, 0, 3))], } LEDGER_ID: Dict[str, bytes] = { - 'mainnet': bytes.fromhex('00'), - 'testnet': bytes.fromhex('01'), - 'previewnet': bytes.fromhex('02'), - 'solo': bytes.fromhex('03') + "mainnet": bytes.fromhex("00"), + "testnet": bytes.fromhex("01"), + "previewnet": bytes.fromhex("02"), + "solo": bytes.fromhex("03"), } def __init__( self, - network: str = 'testnet', + network: str = "testnet", nodes: Optional[List[_Node]] = None, mirror_address: Optional[str] = None, - ledger_id: bytes | None = None + ledger_id: bytes | None = None, ) -> None: """ Initializes the Network with the specified network name or custom config. @@ -89,54 +84,87 @@ def __init__( Args: network (str): One of 'mainnet', 'testnet', 'previewnet', 'solo', or a custom name if you prefer. - nodes (list, optional): A list of (node_address, AccountId) pairs. + nodes (list, optional): A list of (node_address, AccountId) pairs. If provided, we skip fetching from the mirror. mirror_address (str, optional): A mirror node address (host:port) for topic queries. If not provided, we'll use a default from MIRROR_ADDRESS_DEFAULT[network]. - + Note: TLS is enabled by default for hosted networks (mainnet, testnet, previewnet). For local networks (solo, localhost) and custom networks, TLS is disabled by default. Certificate verification is enabled by default for all networks. Use Client.set_transport_security() and Client.set_verify_certificates() to customize. """ - self.network: str = network or 'testnet' + self.network: str = network or "testnet" self.mirror_address: str = mirror_address or self.MIRROR_ADDRESS_DEFAULT.get( - network, 'localhost:5600' + network, "localhost:5600" ) - self.ledger_id = ledger_id or self.LEDGER_ID.get(network, bytes.fromhex('03')) - + self.ledger_id = ledger_id or self.LEDGER_ID.get(network, bytes.fromhex("03")) + # Default TLS configuration: enabled for hosted networks, disabled for local/custom - hosted_networks = ('mainnet', 'testnet', 'previewnet') + hosted_networks = ("mainnet", "testnet", "previewnet") self._transport_security: bool = self.network in hosted_networks self._verify_certificates: bool = True # Always enabled by default self._root_certificates: Optional[bytes] = None - if nodes is not None: - final_nodes = nodes - elif self.network in ('solo', 'localhost', 'local'): - final_nodes = self._fetch_nodes_from_default_nodes() - else: - fetched = self._fetch_nodes_from_mirror_node() - if not fetched and self.network in self.DEFAULT_NODES: - final_nodes = self._fetch_nodes_from_default_nodes() - elif fetched: - final_nodes = fetched - else: - raise ValueError(f"No default nodes for network='{self.network}'") - - self.nodes: List[_Node] = final_nodes - + self.nodes: List[_Node] = [] + self._healthy_nodes: List[_Node] = [] + + self._set_network_nodes(nodes) + + self._node_min_readmit_period = 8 # seconds + self._node_max_readmit_period = 3600 # seconds + self._earliest_readmit_time = time.monotonic() + self._node_min_readmit_period + + if not self._healthy_nodes: + raise ValueError("No healthy nodes available to initialize network") + + self._node_index: int = secrets.randbelow(len(self._healthy_nodes)) + self.current_node: _Node = self._healthy_nodes[self._node_index] + + def _set_network_nodes(self, nodes: Optional[List[_Node]] = None): + """ + Configure the consensus nodes used by this network. + """ + final_nodes = self._resolve_nodes(nodes) + # Apply TLS configuration to all nodes + for node in final_nodes: + node._apply_transport_security( + self._transport_security + ) # pylint: disable=protected-access + node._set_verify_certificates( + self._verify_certificates + ) # pylint: disable=protected-access + node._set_root_certificates( + self._root_certificates + ) # pylint: disable=protected-access + + self.nodes = final_nodes + self._healthy_nodes = [] + for node in self.nodes: - node._apply_transport_security(self._transport_security) # pylint: disable=protected-access - node._set_verify_certificates(self._verify_certificates) # pylint: disable=protected-access - node._set_root_certificates(self._root_certificates) # pylint: disable=protected-access + if not node.is_healthy(): + continue + self._healthy_nodes.append(node) - self._node_index: int = secrets.randbelow(len(self.nodes)) - self.current_node: _Node = self.nodes[self._node_index] + def _resolve_nodes(self, nodes: Optional[List[_Node]]) -> List[_Node]: + if nodes: + return nodes + + if self.network in ("solo", "localhost", "local"): + return self._fetch_nodes_from_default_nodes() + + fetched = self._fetch_nodes_from_mirror_node() + if fetched: + return fetched + + if self.network in self.DEFAULT_NODES: + return self._fetch_nodes_from_default_nodes() + + raise ValueError(f"No nodes available for network='{self.network}'") def _fetch_nodes_from_mirror_node(self) -> List[_Node]: """ @@ -146,19 +174,23 @@ def _fetch_nodes_from_mirror_node(self) -> List[_Node]: """ base_url: Optional[str] = self.MIRROR_NODE_URLS.get(self.network) if not base_url: - print(f"No known mirror node URL for network='{self.network}'. Skipping fetch.") + print( + f"No known mirror node URL for network='{self.network}'. Skipping fetch." + ) return [] url: str = f"{base_url}/api/v1/network/nodes?limit=100&order=desc" try: - response: requests.Response = requests.get(url, timeout=30) # Add 30 second timeout + response: requests.Response = requests.get( + url, timeout=30 + ) # Add 30 second timeout response.raise_for_status() data: Dict[str, Any] = response.json() nodes: List[_Node] = [] # Process each node from the mirror node API response - for node in data.get('nodes', []): + for node in data.get("nodes", []): address_book: NodeAddress = NodeAddress._from_dict(node) account_id: AccountId = address_book._account_id address: str = str(address_book._addresses[0]) @@ -182,22 +214,27 @@ def _fetch_nodes_from_default_nodes(self) -> List[_Node]: def _select_node(self) -> _Node: """ Select the next node in the collection of available nodes using round-robin selection. - + This method increments the internal node index, wrapping around when reaching the end of the node list, and updates the current_node reference. - + Raises: ValueError: If no nodes are available for selection. - + Returns: _Node: The selected node instance. """ - if not self.nodes: - raise ValueError("No nodes available to select.") - self._node_index = (self._node_index + 1) % len(self.nodes) - self.current_node = self.nodes[self._node_index] + self._readmit_nodes() + + if not self._healthy_nodes: + raise ValueError("No healthy node available to select") + + self._node_index %= len(self._healthy_nodes) + self._node_index = (self._node_index + 1) % len(self._healthy_nodes) + + self.current_node = self._healthy_nodes[self._node_index] return self.current_node - + def _get_node(self, account_id: AccountId) -> Optional[_Node]: """ Get a node matching the given account ID. @@ -208,6 +245,7 @@ def _get_node(self, account_id: AccountId) -> Optional[_Node]: Returns: Optional[_Node]: The matching node, or None if not found. """ + self._readmit_nodes() for node in self.nodes: if node._account_id == account_id: return node @@ -219,17 +257,17 @@ def get_mirror_address(self) -> str: Mirror nodes always use TLS, so addresses should use port 443 for HTTPS. """ return self.mirror_address - + def _parse_mirror_address(self) -> Tuple[str, int]: """ Parse mirror_address into host and port. - + Returns: Tuple[str, int]: (host, port) tuple """ mirror_addr = self.mirror_address - if ':' in mirror_addr: - host, port_str = mirror_addr.rsplit(':', 1) + if ":" in mirror_addr: + host, port_str = mirror_addr.rsplit(":", 1) try: port = int(port_str) except ValueError: @@ -238,49 +276,51 @@ def _parse_mirror_address(self) -> Tuple[str, int]: host = mirror_addr port = 443 return (host, port) - + def _determine_scheme_and_port(self, host: str, port: int) -> Tuple[str, int]: """ Determine the scheme (http/https) and port for the REST URL. - + Args: host: The hostname port: The port number - + Returns: Tuple[str, int]: (scheme, port) tuple """ - is_localhost = host in ('localhost', '127.0.0.1') - + is_localhost = host in ("localhost", "127.0.0.1") + if is_localhost: - scheme = 'http' + scheme = "http" if port == 443: port = 8080 # Default REST port for localhost else: - scheme = 'https' + scheme = "https" if port == 5600: # gRPC port, use 443 for REST port = 443 - + return (scheme, port) - + def _build_rest_url(self, scheme: str, host: str, port: int) -> str: """ Build the final REST URL with optional port. - + Args: scheme: URL scheme (http or https) host: Hostname port: Port number - + Returns: str: Complete REST URL with /api/v1 suffix """ - is_default_port = (scheme == 'https' and port == 443) or (scheme == 'http' and port == 80) - + is_default_port = (scheme == "https" and port == 443) or ( + scheme == "http" and port == 80 + ) + if is_default_port: return f"{scheme}://{host}/api/v1" return f"{scheme}://{host}:{port}/api/v1" - + def get_mirror_rest_url(self) -> str: """ Get the REST API base URL for the mirror node. @@ -291,7 +331,7 @@ def get_mirror_rest_url(self) -> str: if base_url: # MIRROR_NODE_URLS contains base URLs, append /api/v1 return f"{base_url}/api/v1" - + # Fallback: construct from mirror_address host, port = self._parse_mirror_address() scheme, port = self._determine_scheme_and_port(host, port) @@ -329,7 +369,9 @@ def set_tls_root_certificates(self, root_certificates: Optional[bytes]) -> None: """ self._root_certificates = root_certificates for node in self.nodes: - node._set_root_certificates(root_certificates) # pylint: disable=protected-access + node._set_root_certificates( + root_certificates + ) # pylint: disable=protected-access def get_tls_root_certificates(self) -> Optional[bytes]: """ @@ -342,3 +384,64 @@ def is_verify_certificates(self) -> bool: Determine if certificate verification is enabled. """ return self._verify_certificates + + def _readmit_nodes(self) -> None: + """ + Re-admit nodes whose backoff period has expired. + """ + now = time.monotonic() + + if self._earliest_readmit_time > now: + return + + next_readmit = float("inf") + + for node in self.nodes: + if node in self._healthy_nodes: + continue + + if node._readmit_time > now: + next_readmit = min(next_readmit, node._readmit_time) + continue + + self._mark_node_healthy(node) + + delay = min( + self._node_max_readmit_period, + max(self._node_min_readmit_period, next_readmit - now), + ) + + self._earliest_readmit_time = now + delay + + def _increase_backoff(self, node: _Node) -> None: + """ + Increase the node's backoff duration after a failure and remove node from healthy node. + """ + if not isinstance(node, _Node): + raise TypeError("node must be of type _Node") + + node._increase_backoff() + self._mark_node_unhealthy(node) + + def _decrease_backoff(self, node: _Node) -> None: + """ + Decrease the node's backoff duration after a successful operation. + """ + if not isinstance(node, _Node): + raise TypeError("node must be of type _Node") + + node._decrease_backoff() + + def _mark_node_unhealthy(self, node: _Node) -> None: + if not isinstance(node, _Node): + raise TypeError("node must be of type _Node") + + if node in self._healthy_nodes: + self._healthy_nodes.remove(node) + + def _mark_node_healthy(self, node: _Node) -> None: + if not isinstance(node, _Node): + raise TypeError("node must be of type _Node") + + if node not in self._healthy_nodes: + self._healthy_nodes.append(node) diff --git a/src/hiero_sdk_python/consensus/topic_id.py b/src/hiero_sdk_python/consensus/topic_id.py index 502728f79..dde808ec3 100644 --- a/src/hiero_sdk_python/consensus/topic_id.py +++ b/src/hiero_sdk_python/consensus/topic_id.py @@ -75,6 +75,15 @@ def __str__(self) -> str: """ return f"{self.shard}.{self.realm}.{self.num}" + def __repr__(self) -> str: + """ + Returns a detailed representation of the TopicId suitable for debugging. + + Returns: + str: A string in constructor format 'TopicId(shard=X, realm=Y, num=Z)'. + """ + return f"TopicId(shard={self.shard}, realm={self.realm}, num={self.num})" + @classmethod def from_string(cls, topic_id_str: str) -> "TopicId": """ diff --git a/src/hiero_sdk_python/consensus/topic_message_submit_transaction.py b/src/hiero_sdk_python/consensus/topic_message_submit_transaction.py index 4d79518ad..cece1190c 100644 --- a/src/hiero_sdk_python/consensus/topic_message_submit_transaction.py +++ b/src/hiero_sdk_python/consensus/topic_message_submit_transaction.py @@ -1,5 +1,5 @@ import math -from typing import List, Optional +from typing import List, Optional, Union from hiero_sdk_python.client.client import Client from hiero_sdk_python.consensus.topic_id import TopicId from hiero_sdk_python.crypto.private_key import PrivateKey @@ -284,11 +284,23 @@ def freeze_with(self, client: "Client") -> "TopicMessageSubmitTransaction": return super().freeze_with(client) - def execute(self, client: "Client"): + def execute(self, client: "Client", timeout: Optional[Union[int, float]] = None): + """ + Executes the topic message submit transaction. + + For multi-chunk transactions, this method will execute all chunks sequentially. + + Args: + client: The client to execute the transaction with. + timeout (Optional[Union[int, float]): The total execution timeout (in seconds) for this execution. + + Returns: + TransactionReceipt: The receipt from the first chunk execution. + """ self._validate_chunking() if self.get_required_chunks() == 1: - return super().execute(client) + return super().execute(client, timeout) # Multi-chunk transaction - execute all chunks responses = [] @@ -308,7 +320,7 @@ def execute(self, client: "Client"): super().sign(signing_key) # Execute the chunk - response = super().execute(client) + response = super().execute(client, timeout) responses.append(response) # Return the first response as the JS SDK does diff --git a/src/hiero_sdk_python/contract/contract_call_query.py b/src/hiero_sdk_python/contract/contract_call_query.py index dba493b1a..e492ca743 100644 --- a/src/hiero_sdk_python/contract/contract_call_query.py +++ b/src/hiero_sdk_python/contract/contract_call_query.py @@ -5,7 +5,7 @@ """ import traceback -from typing import Optional +from typing import Optional, Union from hiero_sdk_python.account.account_id import AccountId from hiero_sdk_python.channels import _Channel @@ -187,7 +187,7 @@ def _get_method(self, channel: _Channel) -> _Method: query_func=channel.smart_contract.contractCallLocalMethod, ) - def execute(self, client: Client) -> ContractFunctionResult: + def execute(self, client: Client, timeout: Optional[Union[int, float]] = None) -> ContractFunctionResult: """ Executes the contract call query. @@ -199,6 +199,7 @@ def execute(self, client: Client) -> ContractFunctionResult: Args: client (Client): The client instance to use for execution + timeout (Optional[Union[int, float]): The total execution timeout (in seconds) for this execution. Returns: ContractFunctionResult: The result of the contract call @@ -209,7 +210,7 @@ def execute(self, client: Client) -> ContractFunctionResult: ReceiptStatusError: If the query fails with a receipt status error """ self._before_execute(client) - response = self._execute(client) + response = self._execute(client, timeout) return ContractFunctionResult._from_proto( response.contractCallLocal.functionResult diff --git a/src/hiero_sdk_python/contract/contract_id.py b/src/hiero_sdk_python/contract/contract_id.py index ec585b2c2..97ed95c9b 100644 --- a/src/hiero_sdk_python/contract/contract_id.py +++ b/src/hiero_sdk_python/contract/contract_id.py @@ -9,11 +9,14 @@ from dataclasses import dataclass, field from typing import TYPE_CHECKING, Optional +from hiero_sdk_python.crypto.evm_address import EvmAddress from hiero_sdk_python.hapi.services import basic_types_pb2 from hiero_sdk_python.utils.entity_id_helper import ( parse_from_string, validate_checksum, - format_to_string_with_checksum + format_to_string_with_checksum, + to_solidity_address, + perform_query_to_mirror_node, ) if TYPE_CHECKING: @@ -60,6 +63,13 @@ def _from_proto(cls, contract_id_proto: basic_types_pb2.ContractID) -> "Contract ContractId: A new ContractId instance populated with data from the protobuf object. """ + if contract_id_proto.HasField("evm_address"): + return cls( + shard=contract_id_proto.shardNum, + realm=contract_id_proto.realmNum, + evm_address=contract_id_proto.evm_address, + ) + return cls( shard=contract_id_proto.shardNum, realm=contract_id_proto.realmNum, @@ -97,13 +107,13 @@ def from_string(cls, contract_id_str: str) -> "ContractId": ContractId: A new ContractId instance. Raises: + TypeError: If input is not a string. ValueError: If the contract ID string is None, not a string, or in an invalid format. """ if contract_id_str is None or not isinstance(contract_id_str, str): - raise ValueError( - f"Invalid contract ID string '{contract_id_str}'. " - f"Expected format 'shard.realm.contract'." + raise TypeError( + f"contract_id_str must be of type str, got {type(contract_id_str).__name__}" ) evm_address_match = EVM_ADDRESS_REGEX.match(contract_id_str) @@ -113,7 +123,7 @@ def from_string(cls, contract_id_str: str) -> "ContractId": return cls( shard=int(shard), realm=int(realm), - evm_address=bytes.fromhex(evm_address) + evm_address=bytes.fromhex(evm_address), ) else: @@ -121,9 +131,7 @@ def from_string(cls, contract_id_str: str) -> "ContractId": shard, realm, contract, checksum = parse_from_string(contract_id_str) contract_id: ContractId = cls( - shard=int(shard), - realm=int(realm), - contract=int(contract) + shard=int(shard), realm=int(realm), contract=int(contract) ) object.__setattr__(contract_id, "checksum", checksum) return contract_id @@ -134,7 +142,120 @@ def from_string(cls, contract_id_str: str) -> "ContractId": f"Expected format 'shard.realm.contract'." ) from e - def __str__(self): + @classmethod + def from_evm_address(cls, shard: int, realm: int, evm_address: str) -> "ContractId": + """ + Create a ContractId from an EVM address string. + + Args: + shard (int): Shard number. + realm (int): Realm number. + evm_address (str): Hex-encoded EVM address. + + Returns: + ContractId: A new ContractId instance. + + Raises: + TypeError: If any argument is of incorrect type. + ValueError: If shard or realm are negative, or the EVM address is invalid. + """ + if not isinstance(evm_address, str): + raise TypeError( + f"evm_address must be of type str, got {type(evm_address).__name__}" + ) + + for name, value in (("shard", shard), ("realm", realm)): + if isinstance(value, bool) or not isinstance(value, int): + raise TypeError(f"{name} must be int, got {type(value).__name__}") + if value < 0: + raise ValueError(f"{name} must be a non-negative integer") + + try: + # throw error internally if not valid evm_address + evm_addr = EvmAddress.from_string(evm_address=evm_address) + return cls(shard=shard, realm=realm, evm_address=evm_addr.address_bytes) + except Exception as e: + raise ValueError(f"Invalid EVM address: {evm_address}") from e + + @classmethod + def from_bytes(cls, data: bytes) -> "ContractId": + """ + Deserialize an ContractId from protobuf-encoded bytes. + + Args: + data (bytes): Protobuf-encoded `ContractID` message. + + Returns: + ContractId: Reconstructed ContractId instance. + + Raises: + TypeError: If data is not bytes. + ValueError: If deserialization fails. + """ + if not isinstance(data, (bytes, bytearray)): + raise TypeError("data must be bytes") + + try: + proto = basic_types_pb2.ContractID.FromString(data) + except Exception as exc: + raise ValueError("Failed to deserialize ContractId from bytes") from exc + + return cls._from_proto(proto) + + def to_bytes(self) -> "bytes": + """ + Serialize this ContractId to protobuf bytes. + + Returns: + bytes: Protobuf-encoded representation of this ContractId. + """ + return self._to_proto().SerializeToString() + + def populate_contract_num(self, client: "Client") -> "ContractId": + """ + Resolve and populate the numeric contract ID using the Mirror Node. + + This method requires the ContractId to contain an EVM address. + + Args: + client (Client): Client configured with a mirror network. + + Returns: + ContractId: New instance with the resolved contract number. + + Raises: + ValueError: If no EVM address is present or the response is invalid. + RuntimeError: If the mirror node request fails. + """ + if self.evm_address is None: + raise ValueError("evm_address is required to populate the contract number") + + url = f"{client.network.get_mirror_rest_url()}/contracts/{self.evm_address.hex()}" + + try: + response = perform_query_to_mirror_node(url) + contract_id = response.get("contract_id") + if not contract_id: + raise ValueError("Mirror node response missing 'contract_id'") + + except RuntimeError as e: + raise RuntimeError( + "Failed to populate contract num from mirror node for evm_address " + f"{self.evm_address.hex()}" + ) from e + + try: + contract = int(contract_id.split(".")[-1]) + return ContractId( + shard=self.shard, + realm=self.realm, + contract=contract, + evm_address=self.evm_address, + ) + except (ValueError, AttributeError) as e: + raise ValueError(f"Invalid contract_id format received: {contract_id}") from e + + def __str__(self) -> str: """ Returns the string representation of the ContractId. @@ -149,6 +270,19 @@ def __str__(self): return f"{self.shard}.{self.realm}.{self.contract}" + def __repr__(self) -> str: + """ + Returns a detailed string representation of the ContractId for debugging. + + Returns: + str: ContractId(shard=X, realm=Y, contract=Z) or + ContractId(shard=X, realm=Y, evm_address=...) if evm_address is set. + """ + if self.evm_address is not None: + return f"ContractId(shard={self.shard}, realm={self.realm}, evm_address={self.evm_address.hex()})" + + return f"ContractId(shard={self.shard}, realm={self.realm}, contract={self.contract})" + def to_evm_address(self) -> str: """ Converts the ContractId to a 20-byte EVM address string (hex). @@ -163,15 +297,7 @@ def to_evm_address(self) -> str: if self.evm_address is not None: return self.evm_address.hex() - # If evm_address is not set, compute the EVM address from shard, realm, and contract. - # The EVM address is a 20-byte value: - # [4 bytes shard][8 bytes realm][8 bytes contract], all big-endian. - shard_bytes = (0).to_bytes(4, "big") - realm_bytes = (0).to_bytes(8, "big") - contract_bytes = self.contract.to_bytes(8, "big") - evm_bytes = shard_bytes + realm_bytes + contract_bytes - - return evm_bytes.hex() + return to_solidity_address(self.shard, self.realm, self.contract) def validate_checksum(self, client: "Client") -> None: """ @@ -214,11 +340,11 @@ def to_string_with_checksum(self, client: "Client") -> str: as checksums cannot be applied to EVM addresses. """ if self.evm_address is not None: - raise ValueError("to_string_with_checksum cannot be applied to ContractId with evm_address") + raise ValueError( + "to_string_with_checksum cannot be applied to ContractId with evm_address" + ) return format_to_string_with_checksum( - self.shard, - self.realm, - self.contract, - client - ) \ No newline at end of file + self.shard, self.realm, self.contract, client + ) + diff --git a/src/hiero_sdk_python/contract/contract_info_query.py b/src/hiero_sdk_python/contract/contract_info_query.py index ae6547235..d9b34f69c 100644 --- a/src/hiero_sdk_python/contract/contract_info_query.py +++ b/src/hiero_sdk_python/contract/contract_info_query.py @@ -3,7 +3,7 @@ """ import traceback -from typing import Optional +from typing import Optional, Union from hiero_sdk_python.channels import _Channel from hiero_sdk_python.client.client import Client @@ -98,7 +98,7 @@ def _get_method(self, channel: _Channel) -> _Method: transaction_func=None, query_func=channel.smart_contract.getContractInfo ) - def execute(self, client: Client) -> ContractInfo: + def execute(self, client: Client, timeout: Optional[Union[int, float]] = None) -> ContractInfo: """ Executes the contract info query. @@ -110,6 +110,7 @@ def execute(self, client: Client) -> ContractInfo: Args: client (Client): The client instance to use for execution + timeout (Optional[Union[int, float]): The total execution timeout (in seconds) for this execution. Returns: ContractInfo: The contract info from the network @@ -120,7 +121,7 @@ def execute(self, client: Client) -> ContractInfo: ReceiptStatusError: If the query fails with a receipt status error """ self._before_execute(client) - response = self._execute(client) + response = self._execute(client, timeout) return ContractInfo._from_proto(response.contractGetInfo.contractInfo) diff --git a/src/hiero_sdk_python/executable.py b/src/hiero_sdk_python/executable.py index 0a4e492da..0a81893fa 100644 --- a/src/hiero_sdk_python/executable.py +++ b/src/hiero_sdk_python/executable.py @@ -1,20 +1,25 @@ -from os import error +import math +import re import time -from typing import Callable, Optional, Any, TYPE_CHECKING, List -import grpc +from typing import Callable, Optional, Any, TYPE_CHECKING, List, Union from abc import ABC, abstractmethod from enum import IntEnum +import warnings + +import grpc from hiero_sdk_python.channels import _Channel from hiero_sdk_python.exceptions import MaxAttemptsError from hiero_sdk_python.account.account_id import AccountId +from hiero_sdk_python.hapi.services import query_pb2, transaction_pb2 +from hiero_sdk_python.logger.logger import Logger +from hiero_sdk_python.response_code import ResponseCode + if TYPE_CHECKING: from hiero_sdk_python.client.client import Client -# Default values for retry and backoff configuration in miliseconds -DEFAULT_MAX_BACKOFF: int = 8000 -DEFAULT_MIN_BACKOFF: int = 250 -DEFAULT_GRPC_DEADLINE: int = 10000 + +RST_STREAM = re.compile(r"\brst[^0-9a-zA-Z]stream\b", re.IGNORECASE | re.DOTALL) class _Method: @@ -60,40 +65,200 @@ class _ExecutionState(IntEnum): class _Executable(ABC): """ Abstract base class for all executable operations (transactions and queries). - + This class defines the core interface for operations that can be executed on the Hedera network. It provides implementations for configuration properties with validation (max_backoff, min_backoff, grpc_deadline) and includes the execution flow with retry logic. - + Subclasses like Transaction and Query will extend this and implement the abstract methods to define specific behavior for different types of operations. """ def __init__(self): - self._max_backoff = DEFAULT_MAX_BACKOFF - self._min_backoff = DEFAULT_MIN_BACKOFF - self._grpc_deadline = DEFAULT_GRPC_DEADLINE - self.node_account_id = None + self._max_attempts: Optional[int] = None + self._max_backoff: Optional[float] = None + self._min_backoff: Optional[float] = None + self._grpc_deadline: Optional[float] = None + self._request_timeout: Optional[float] = None + self.node_account_id: Optional[AccountId] = None self.node_account_ids: List[AccountId] = [] + self._used_node_account_id: Optional[AccountId] = None self._node_account_ids_index: int = 0 def set_node_account_ids(self, node_account_ids: List[AccountId]): - """Select node account IDs for sending the request.""" + """ + Explicitly set the node account IDs to execute against. + + Args: + node_account_ids (List[AccountId]): List of node account IDs + + Returns: + The current instance of the class for chaining. + """ self.node_account_ids = node_account_ids return self def set_node_account_id(self, node_account_id: AccountId): - """Convenience wrapper to set a single node account ID.""" + """ + Convenience method to set a single node account ID. + + Args: + node_account_id (AccountId): Node account ID + + Returns: + The current instance of the class for chaining. + """ return self.set_node_account_ids([node_account_id]) + def set_max_attempts(self, max_attempts: int): + """ + Set the maximum number of execution attempts. + + Args: + max_attempts (int): Maximum number of attempts. + Must be a positive integer. + + Returns: + The current instance of the class for chaining. + """ + if isinstance(max_attempts, bool) or not isinstance(max_attempts, int): + raise TypeError( + f"max_attempts must be of type int, got {(type(max_attempts).__name__)}" + ) + + if max_attempts <= 0: + raise ValueError("max_attempts must be greater than 0") + + self._max_attempts = max_attempts + return self + + def set_grpc_deadline(self, grpc_deadline: Union[int, float]): + """ + Set the gRPC call deadline (per attempt). + + Args: + grpc_deadline (Union[int,float]): gRPC deadline in seconds. + Must be greater than zero. + + Returns: + The current instance of the class for chaining. + """ + if isinstance(grpc_deadline, bool) or not isinstance( + grpc_deadline, (float, int) + ): + raise TypeError( + f"grpc_deadline must be of type Union[int, float], got {type(grpc_deadline).__name__}" + ) + + if not math.isfinite(grpc_deadline) or grpc_deadline <= 0: + raise ValueError("grpc_deadline must be a finite value greater than 0") + + if self._request_timeout is not None and grpc_deadline > self._request_timeout: + warnings.warn( + "grpc_deadline should be smaller than request_timeout. " + "This configuration may cause operations to fail unexpectedly.", + UserWarning, + ) + + self._grpc_deadline = float(grpc_deadline) + return self + + def set_request_timeout(self, request_timeout: Union[int, float]): + """ + Set the total execution timeout for this operation. + + Args: + request_timeout (Union[int,float]: Total execution timeout in seconds. + Must be greater than zero. + + Returns: + The current instance of the class for chaining. + """ + if isinstance(request_timeout, bool) or not isinstance( + request_timeout, (float, int) + ): + raise TypeError( + f"request_timeout must be of type Union[int, float], got {type(request_timeout).__name__}" + ) + + if not math.isfinite(request_timeout) or request_timeout <= 0: + raise ValueError("request_timeout must be a finite value greater than 0") + + if self._grpc_deadline is not None and request_timeout < self._grpc_deadline: + warnings.warn( + "request_timeout should be larger than grpc_deadline. " + "This configuration may cause operations to fail unexpectedly.", + UserWarning, + ) + + self._request_timeout = float(request_timeout) + return self + + def set_min_backoff(self, min_backoff: Union[int, float]): + """ + Set the minimum backoff delay between retries. + + Args: + min_backoff ((Union[int,float]): Minimum backoff delay in seconds. + Must be finite and non-negative. + + Returns: + The current instance of the class for chaining. + """ + if isinstance(min_backoff, bool) or not isinstance(min_backoff, (int, float)): + raise TypeError( + f"min_backoff must be of type int or float, got {(type(min_backoff).__name__)}" + ) + + if not math.isfinite(min_backoff) or min_backoff < 0: + raise ValueError("min_backoff must be a finite value >= 0") + + if self._max_backoff is not None and min_backoff > self._max_backoff: + raise ValueError("min_backoff cannot exceed max_backoff") + + self._min_backoff = float(min_backoff) + return self + + def set_max_backoff(self, max_backoff: Union[int, float]): + """ + Set the maximum backoff delay between retries. + + Args: + max_backoff (Union[int,float]): Maximum backoff delay in seconds. + Must be finite and greater than or equal to min_backoff. + + Returns: + The current instance of the class for chaining. + """ + if isinstance(max_backoff, bool) or not isinstance(max_backoff, (int, float)): + raise TypeError( + f"max_backoff must be of type int or float, got {(type(max_backoff).__name__)}" + ) + + if not math.isfinite(max_backoff) or max_backoff < 0: + raise ValueError("max_backoff must be a finite value >= 0") + + if self._min_backoff is not None and max_backoff < self._min_backoff: + raise ValueError("max_backoff cannot be less than min_backoff") + + self._max_backoff = float(max_backoff) + return self + def _select_node_account_id(self) -> Optional[AccountId]: - """Pick the current node from the list if available, otherwise None.""" + """ + Select the next node account ID from node_account_ids in a round-robin fashion. + + Returns: + Selected AccountId or None if no nodes are configured + """ if self.node_account_ids: # Use modulo to cycle through the list - selected = self.node_account_ids[self._node_account_ids_index % len(self.node_account_ids)] + selected = self.node_account_ids[ + self._node_account_ids_index % len(self.node_account_ids) + ] self._used_node_account_id = selected return selected return None @@ -120,10 +285,10 @@ def _should_retry(self, response) -> _ExecutionState: def _map_status_error(self, response): """ Maps a response status code to an appropriate error object. - + Args: response: The response from the network - + Returns: Exception: An error object representing the error status """ @@ -166,19 +331,95 @@ def _map_response(self, response, node_id, proto_request): The appropriate response object for the operation """ raise NotImplementedError("_map_response must be implemented by subclasses") - + def _get_request_id(self): """ Format the request ID for the logger. """ return f"{self.__class__.__name__}:{time.time_ns()}" - def _execute(self, client: "Client"): + def _resolve_execution_config(self, client: "Client", timeout: Optional[Union[int, float]]) -> None: + """ + Resolve unset execution configuration from the Client defaults. + """ + # Set request_timeout explicitly set via set_request_timeout() + # If not set use timeout passed to execute() + # Else clients default request_timeout + if self._request_timeout is None: + self._request_timeout = timeout + + defaults = ( + ("_min_backoff", client._min_backoff), + ("_max_backoff", client._max_backoff), + ("_grpc_deadline", client._grpc_deadline), + ("_request_timeout", client._request_timeout), + ("_max_attempts", client.max_attempts), + ) + + for attr, default in defaults: + if getattr(self, attr) is None: + setattr(self, attr, default) + + # nodes to which the executaion must be run against, if not provided used nodes from client + if not self.node_account_ids: + self.node_account_ids = [ + node._account_id for node in client.network._healthy_nodes + ] + + if not self.node_account_ids: + raise RuntimeError("No healthy nodes available for execution") + + def _should_retry_exponentially(self, err: Exception) -> bool: + """ + Determine whether a gRPC error represents a failure that should be + retried using exponential backoff. + """ + if isinstance(err, grpc.RpcError): + return err.code() in ( + grpc.StatusCode.DEADLINE_EXCEEDED, + grpc.StatusCode.UNAVAILABLE, + grpc.StatusCode.RESOURCE_EXHAUSTED, + ) or ( + err.code() == grpc.StatusCode.INTERNAL + and bool(RST_STREAM.search(err.details())) + ) + + return True + + def _calculate_backoff(self, attempt: int): + """Calculate backoff for the given attempt, attempt start from 0""" + return min(self._max_backoff, self._min_backoff * (2 ** (attempt + 1))) + + def _handle_unhealthy_node(self, proto_request, attempt, logger, err) -> bool: + """Handle node switching and backoff for unhealthy node""" + # Check if the request is a transaction receipt or record because they are single node requests + if _is_transaction_receipt_or_record_request(proto_request): + _delay_for_attempt( + self._get_request_id(), + self._min_backoff, + attempt, + logger, + err, + ) + return True + + if self._node_account_ids_index == len(self.node_account_ids) - 1: + raise RuntimeError("All nodes are unhealthy") + + self._advance_node_index() + return True + + def _execute(self, client: "Client", timeout: Optional[Union[int, float]] = None): """ Execute a transaction or query with retry logic. Args: client (Client): The client instance to use for execution + timeout (Optional[Union[int, float]): The total execution timeout (in seconds) for this execution. + Precedence as follow: + 1. Explicitly set via set_request_timeout() + 2. Timeout passed to execute() + 3. Client default request_timeout Returns: The response from executing the operation: @@ -190,38 +431,34 @@ def _execute(self, client: "Client"): MaxAttemptsError: If the operation fails after the maximum number of attempts ReceiptStatusError: If the operation fails with a receipt status error """ - # Determine maximum number of attempts from client or executable - max_attempts = client.max_attempts - current_backoff = self._min_backoff + self._resolve_execution_config(client, timeout) + err_persistant = None - - tx_id = self.transaction_id if hasattr(self, "transaction_id") else None - + tx_id = getattr(self, "transaction_id", None) + logger = client.logger - - for attempt in range(max_attempts): - # Exponential backoff for retries - if attempt > 0 and current_backoff < self._max_backoff: - current_backoff *= 2 - - # Select preferred node if provided, fallback to client's default - selected = self._select_node_account_id() - - if selected is not None: - node = client.network._get_node(selected) - else: - node = client.network.current_node - - #Store for logging and receipts - self.node_account_id = node._account_id + start = time.monotonic() - # Advance to next node for the next attempt (if using explicit node list) - self._advance_node_index() + for attempt in range(self._max_attempts): + if time.monotonic() - start >= self._request_timeout: + break + + # Select node + node_id = self._select_node_account_id() + node = client.network._get_node(node_id) + + if node is None: + raise RuntimeError( + f"No node found for node_account_id: {self.node_account_id}" + ) + + # Store for logging and receipts + self.node_account_id = node._account_id # Create a channel wrapper from the client's channel channel = node._get_channel() - - logger.trace("Executing", "requestId", self._get_request_id(), "nodeAccountID", self.node_account_id, "attempt", attempt + 1, "maxAttempts", max_attempts) + + logger.trace("Executing", "requestId", self._get_request_id(), "nodeAccountID", self.node_account_id, "attempt", attempt + 1, "maxAttempts", self._max_attempts,) # Get the appropriate gRPC method to call method = self._get_method(channel) @@ -229,72 +466,104 @@ def _execute(self, client: "Client"): # Build the request using the executable's _make_request method proto_request = self._make_request() + if not node.is_healthy(): + self._handle_unhealthy_node( + proto_request, attempt, logger, err_persistant + ) + continue + + # Execute the GRPC call try: logger.trace("Executing gRPC call", "requestId", self._get_request_id()) - - # Execute the transaction method with the protobuf request - response = _execute_method(method, proto_request) - - # Map the response to an error - status_error = self._map_status_error(response) - - # Determine if we should retry based on the response - execution_state = self._should_retry(response) - - logger.trace(f"{self.__class__.__name__} status received", "nodeAccountID", self.node_account_id, "network", client.network.network, "state", execution_state.name, "txID", tx_id) - - # Handle the execution state - match execution_state: - case _ExecutionState.RETRY: - # If we should retry, wait for the backoff period and try again - err_persistant = status_error - # If not using explicit node list, switch to next node for retry - if not self.node_account_ids: - node = client.network._select_node() - logger.trace("Switched to a different node for retry", "error", err_persistant, "from node", self.node_account_id, "to node", node._account_id) - _delay_for_attempt(self._get_request_id(), current_backoff, attempt, logger, err_persistant) - continue - case _ExecutionState.EXPIRED: - raise status_error - case _ExecutionState.ERROR: - raise status_error - case _ExecutionState.FINISHED: - # If the transaction completed successfully, map the response and return it - logger.trace(f"{self.__class__.__name__} finished execution") - return self._map_response(response, self.node_account_id, proto_request) - except grpc.RpcError as e: - # Save the error - err_persistant = f"Status: {e.code()}, Details: {e.details()}" - # If not using explicit node list, switch to next node for retry - if not self.node_account_ids: - node = client.network._select_node() - logger.trace("Switched to a different node for the next attempt", "error", err_persistant, "from node", self.node_account_id, "to node", node._account_id) - _delay_for_attempt(self._get_request_id(), current_backoff, attempt, logger, err_persistant) - continue - - logger.error("Exceeded maximum attempts for request", "requestId", self._get_request_id(), "last exception being", err_persistant) - - raise MaxAttemptsError("Exceeded maximum attempts for request", self.node_account_id, err_persistant) + response = _execute_method(method, proto_request, self._grpc_deadline) + except Exception as e: + if not self._should_retry_exponentially(e): + raise e -def _delay_for_attempt(request_id: str, current_backoff: int, attempt: int, logger, error): + client.network._increase_backoff(node) + err_persistant = e + self._advance_node_index() + continue + + client.network._decrease_backoff(node) + + # Map the response to an error + status_error = self._map_status_error(response) + + # Determine if we should retry based on the response + execution_state = self._should_retry(response) + logger.trace(f"{self.__class__.__name__} status received", "nodeAccountID", self.node_account_id, "network", client.network.network, "state", execution_state.name, "txID", tx_id,) + + # Handle the execution state + match execution_state: + case _ExecutionState.RETRY: + if status_error.status == ResponseCode.INVALID_NODE_ACCOUNT: + client.network._increase_backoff(node) + # update nodes from the mirror_node + client.update_network() + + # If we should retry, wait for the backoff period and try again + err_persistant = status_error + _delay_for_attempt( + self._get_request_id(), + self._calculate_backoff(attempt), + attempt, + logger, + err_persistant, + ) + self._advance_node_index() + continue + case _ExecutionState.EXPIRED: + raise status_error + case _ExecutionState.ERROR: + raise status_error + case _ExecutionState.FINISHED: + # If the transaction completed successfully, map the response and return it + logger.trace(f"{self.__class__.__name__} finished execution") + return self._map_response( + response, self.node_account_id, proto_request + ) + + logger.error("Exceeded maximum attempts for request", "requestId", self._get_request_id(), "last exception being", err_persistant,) + raise MaxAttemptsError( + "Exceeded maximum attempts or request timeout", + self.node_account_id, + err_persistant, + ) + + +def _is_transaction_receipt_or_record_request( + request: Union[transaction_pb2.Transaction, query_pb2.Query], +) -> bool: + if not isinstance(request, query_pb2.Query): + return False + + return request.HasField("transactionGetReceipt") or request.HasField( + "transactionGetRecord" + ) + + +def _delay_for_attempt(request_id: str, backoff: float, attempt: int, logger: Logger, error) -> None: """ Delay for the specified backoff period before retrying. Args: attempt (int): The current attempt number (0-based) - current_backoff (int): The current backoff period in milliseconds + backoff (float): The current backoff period in seconds """ - logger.trace(f"Retrying request attempt", "requestId", request_id, "delay", current_backoff, "attempt", attempt, "error", error) - time.sleep(current_backoff * 0.001) + logger.trace("Retrying request attempt", "requestId", request_id, "delay", backoff, "attempt", attempt, "error", error,) + time.sleep(backoff) + -def _execute_method(method, proto_request): +def _execute_method(method, proto_request, timeout: float): """ Executes either a transaction or query method with the given protobuf request. Args: method (_Method): The method wrapper containing either a transaction or query function proto_request: The protobuf request object to pass to the method + timeout: The grpc deadline (timeout) in seconds Returns: The response from executing the method @@ -303,7 +572,7 @@ def _execute_method(method, proto_request): Exception: If neither a transaction nor query method is available to execute """ if method.transaction is not None: - return method.transaction(proto_request) + return method.transaction(proto_request, timeout=timeout) elif method.query is not None: - return method.query(proto_request) - raise Exception("No method to execute") \ No newline at end of file + return method.query(proto_request, timeout=timeout) + raise Exception("No method to execute") diff --git a/src/hiero_sdk_python/file/file_append_transaction.py b/src/hiero_sdk_python/file/file_append_transaction.py index dd2339e55..4e2ab4a93 100644 --- a/src/hiero_sdk_python/file/file_append_transaction.py +++ b/src/hiero_sdk_python/file/file_append_transaction.py @@ -14,7 +14,7 @@ """ import math -from typing import TYPE_CHECKING, Any, List, Optional +from typing import TYPE_CHECKING, Any, List, Optional, Union from hiero_sdk_python.file.file_id import FileId from hiero_sdk_python.hbar import Hbar from hiero_sdk_python.transaction.transaction import Transaction @@ -321,7 +321,7 @@ def freeze_with(self, client: "Client") -> FileAppendTransaction: return self - def execute(self, client: "Client") -> Any: + def execute(self, client: "Client", timeout: Optional[Union[int, float]] = None) -> Any: """ Executes the file append transaction. @@ -329,6 +329,7 @@ def execute(self, client: "Client") -> Any: Args: client: The client to execute the transaction with. + timeout (Optional[Union[int, float]): The total execution timeout (in seconds) for this execution. Returns: TransactionReceipt: The receipt from the first chunk execution. @@ -337,7 +338,7 @@ def execute(self, client: "Client") -> Any: if self.get_required_chunks() == 1: # Single chunk transaction - return super().execute(client) + return super().execute(client, timeout) # Multi-chunk transaction - execute all chunks responses = [] @@ -361,7 +362,7 @@ def execute(self, client: "Client") -> Any: super().sign(signing_key) # Execute the chunk - response = super().execute(client) + response = super().execute(client, timeout) responses.append(response) # Return the first response (as per JavaScript implementation) diff --git a/src/hiero_sdk_python/file/file_id.py b/src/hiero_sdk_python/file/file_id.py index bfc2e7271..c804258b2 100644 --- a/src/hiero_sdk_python/file/file_id.py +++ b/src/hiero_sdk_python/file/file_id.py @@ -106,6 +106,15 @@ def __str__(self) -> str: """ return f"{self.shard}.{self.realm}.{self.file}" + def __repr__(self) -> str: + """ + Returns a detailed representation of the FileId suitable for debugging. + + Returns: + str: A string in constructor format 'FileId(shard=X, realm=Y, file=Z)'. + """ + return f"FileId(shard={self.shard}, realm={self.realm}, file={self.file})" + def validate_checksum(self, client: Client) -> None: """ Validates the stored checksum against the calculated checksum using the provided client. diff --git a/src/hiero_sdk_python/node.py b/src/hiero_sdk_python/node.py index 68e0f93bd..7ffaf574f 100644 --- a/src/hiero_sdk_python/node.py +++ b/src/hiero_sdk_python/node.py @@ -1,6 +1,7 @@ import hashlib import socket -import ssl # Python's ssl module implements TLS (despite the name) +import ssl # Python's ssl module implements TLS (despite the name) +import time import grpc from typing import Optional from hiero_sdk_python.account.account_id import AccountId @@ -8,7 +9,6 @@ from hiero_sdk_python.address_book.node_address import NodeAddress from hiero_sdk_python.managed_node_address import _ManagedNodeAddress - # Timeout for fetching server certificates during TLS validation CERT_FETCH_TIMEOUT_SECONDS = 10 @@ -19,11 +19,11 @@ class _HederaTrustManager: Validates server certificates by comparing SHA-384 hashes of PEM-encoded certificates against expected hashes from the address book. """ - + def __init__(self, cert_hash: Optional[bytes], verify_certificate: bool): """ Initialize the trust manager. - + Args: cert_hash: Expected certificate hash from address book (UTF-8 encoded hex string) verify_certificate: Whether to enforce certificate verification @@ -38,53 +38,53 @@ def __init__(self, cert_hash: Optional[bytes], verify_certificate: bool): else: # Convert bytes to hex string (matching Java's String conversion) try: - self.cert_hash = cert_hash.decode('utf-8').strip().lower() - if self.cert_hash.startswith('0x'): + self.cert_hash = cert_hash.decode("utf-8").strip().lower() + if self.cert_hash.startswith("0x"): self.cert_hash = self.cert_hash[2:] except UnicodeDecodeError: self.cert_hash = cert_hash.hex().lower() - + def check_server_trusted(self, pem_cert: bytes) -> bool: """ Validate a server certificate by comparing its hash to the expected hash. - + Args: pem_cert: PEM-encoded certificate bytes - + Returns: True if certificate hash matches expected hash - + Raises: ValueError: If certificate hash doesn't match expected hash """ if self.cert_hash is None: return True - + # Compute SHA-384 hash of PEM certificate (matching Java implementation) cert_hash_bytes = hashlib.sha384(pem_cert).digest() actual_hash = cert_hash_bytes.hex().lower() - + if actual_hash != self.cert_hash: raise ValueError( f"Failed to confirm the server's certificate from a known address book. " f"Expected hash: {self.cert_hash}, received hash: {actual_hash}" ) - + return True class _Node: - + def __init__(self, account_id: AccountId, address: str, address_book: NodeAddress): """ Initialize a new Node instance. - + Args: account_id (AccountId): The account ID of the node. address (str): The address of the node. min_backoff (int): The minimum backoff time in seconds. """ - + self._account_id: AccountId = account_id self._channel: Optional[_Channel] = None self._address_book: NodeAddress = address_book @@ -92,11 +92,17 @@ def __init__(self, account_id: AccountId, address: str, address_book: NodeAddres self._verify_certificates: bool = True self._root_certificates: Optional[bytes] = None self._node_pem_cert: Optional[bytes] = None - + + self._min_backoff: float = 8 # seconds + self._max_backoff: float = 3600 # seconds + self._current_backoff: float = self._min_backoff + self._readmit_time: float = time.monotonic() + self._bad_grpc_response_count: int = 0 + def _close(self): """ Close the channel for this node. - + Returns: None """ @@ -107,13 +113,13 @@ def _close(self): def _get_channel(self): """ Get the channel for this node. - + Returns: _Channel: The channel for this node. """ if self._channel: return self._channel - + if self._address._is_transport_security(): if self._root_certificates: # Use the certificate that is provided @@ -124,23 +130,25 @@ def _get_channel(self): if not self._node_pem_cert: raise ValueError("No certificate available.") - + # Validate certificate if verification is enabled if self._verify_certificates: - self._validate_tls_certificate_with_trust_manager() - + self._validate_tls_certificate_with_trust_manager() + options = self._build_channel_options() credentials = grpc.ssl_channel_credentials( root_certificates=self._node_pem_cert, private_key=None, certificate_chain=None, ) - channel = grpc.secure_channel(str(self._address), credentials, options=options) + channel = grpc.secure_channel( + str(self._address), credentials, options=options + ) else: channel = grpc.insecure_channel(str(self._address)) - + self._channel = _Channel(channel) - + return self._channel def _apply_transport_security(self, enabled: bool): @@ -151,9 +159,9 @@ def _apply_transport_security(self, enabled: bool): return if not enabled and not self._address._is_transport_security(): return - + self._close() - + if enabled: self._address = self._address._to_secure() else: @@ -166,16 +174,16 @@ def _set_root_certificates(self, root_certificates: Optional[bytes]): self._root_certificates = root_certificates if self._channel and self._address._is_transport_security(): self._close() - + def _set_verify_certificates(self, verify: bool): """ Set whether TLS certificates should be verified. """ if self._verify_certificates == verify: return - + self._verify_certificates = verify - + if verify and self._channel and self._address._is_transport_security(): # Force channel recreation to ensure certificates are revalidated. self._close() @@ -188,15 +196,15 @@ def _build_channel_options(self): are intentionally set to a fixed value ("127.0.0.1") to bypass standard TLS hostname verification. - This is REQUIRED because Hedera nodes are connected to via IP addresses - from the address book, while their TLS certificates are not issued for - those IPs. As a result, standard hostname verification would fail even + This is REQUIRED because Hedera nodes are connected to via IP addresses + from the address book, while their TLS certificates are not issued for + those IPs. As a result, standard hostname verification would fail even for legitimate nodes. Although hostname verification is disabled, transport security is NOT weakened. Instead of relying on hostnames, the SDK validates the server - by performing certificate hash pinning. This guarantees the client is - communicating with the correct Hedera node regardless of the hostname + by performing certificate hash pinning. This guarantees the client is + communicating with the correct Hedera node regardless of the hostname or IP address used to connect. """ options = [ @@ -204,7 +212,7 @@ def _build_channel_options(self): ("grpc.ssl_target_name_override", "127.0.0.1"), ("grpc.keepalive_time_ms", 100000), ("grpc.keepalive_timeout_ms", 10000), - ("grpc.keepalive_permit_without_calls", 1) + ("grpc.keepalive_permit_without_calls", 1), ] return options @@ -214,23 +222,25 @@ def _validate_tls_certificate_with_trust_manager(self): Validate the remote TLS certificate using HederaTrustManager. This performs a pre-handshake validation by fetching the server certificate and comparing its hash to the expected hash from the address book. - + Note: If verification is enabled but no cert hash is available (e.g., in unit tests without address books), validation is skipped rather than raising an error. """ if not self._address._is_transport_security() or not self._verify_certificates: return - + cert_hash = None if self._address_book: # pylint: disable=protected-access - cert_hash = self._address_book._cert_hash # pylint: disable=protected-access - + cert_hash = ( + self._address_book._cert_hash + ) # pylint: disable=protected-access + # Skip validation if no cert hash is available (e.g., in unit tests) # This allows tests to run without address books while still enabling # verification in production where address books are available. if cert_hash is None or len(cert_hash) == 0: return - + # Create trust manager and validate certificate trust_manager = _HederaTrustManager(cert_hash, self._verify_certificates) trust_manager.check_server_trusted(self._node_pem_cert) @@ -241,7 +251,7 @@ def _normalize_cert_hash(cert_hash: bytes) -> str: Normalize the certificate hash to a lowercase hex string. """ try: - decoded = cert_hash.decode('utf-8').strip().lower() + decoded = cert_hash.decode("utf-8").strip().lower() if decoded.startswith("0x"): decoded = decoded[2:] @@ -252,7 +262,7 @@ def _normalize_cert_hash(cert_hash: bytes) -> str: def _fetch_server_certificate_pem(self) -> bytes: """ Perform a TLS handshake and retrieve the server certificate in PEM format. - + Returns: bytes: PEM-encoded certificate bytes """ @@ -266,7 +276,7 @@ def _fetch_server_certificate_pem(self) -> bytes: # Create TLS context that accepts any certificate (we validate hash ourselves) context = ssl.create_default_context() # Restrict SSL/TLS versions to TLSv1.2+ only for security - if hasattr(context, 'minimum_version') and hasattr(ssl, 'TLSVersion'): + if hasattr(context, "minimum_version") and hasattr(ssl, "TLSVersion"): context.minimum_version = ssl.TLSVersion.TLSv1_2 else: # Backwards compatibility for Python <3.7 that lacks minimum_version @@ -275,10 +285,38 @@ def _fetch_server_certificate_pem(self) -> bytes: context.check_hostname = False context.verify_mode = ssl.CERT_NONE - with socket.create_connection((host, port), timeout=CERT_FETCH_TIMEOUT_SECONDS) as sock: - with context.wrap_socket(sock, server_hostname=server_hostname) as tls_socket: + with socket.create_connection( + (host, port), timeout=CERT_FETCH_TIMEOUT_SECONDS + ) as sock: + with context.wrap_socket( + sock, server_hostname=server_hostname + ) as tls_socket: der_cert = tls_socket.getpeercert(True) # Convert DER to PEM format (matching Java's PEM encoding) - pem_cert = ssl.DER_cert_to_PEM_cert(der_cert).encode('utf-8') + pem_cert = ssl.DER_cert_to_PEM_cert(der_cert).encode("utf-8") return pem_cert + + def is_healthy(self) -> bool: + """ + Determine whether this node is currently eligible for use. + + A node is considered healthy if the current time is greater than or equal + to its scheduled readmission time (`_readmit_time`). Nodes + """ + return self._readmit_time <= time.monotonic() + + def _increase_backoff(self) -> None: + """ + Increase the node's backoff duration after a failure. + """ + self._bad_grpc_response_count += 1 + self._current_backoff = min(self._current_backoff * 2, self._max_backoff) + self._readmit_time = time.monotonic() + self._current_backoff + + def _decrease_backoff(self) -> None: + """ + Decrease the node's backoff duration after a successful operation. + """ + self._current_backoff = max(self._current_backoff / 2, self._min_backoff) + diff --git a/src/hiero_sdk_python/query/account_balance_query.py b/src/hiero_sdk_python/query/account_balance_query.py index c4d4d4d85..ea91e0298 100644 --- a/src/hiero_sdk_python/query/account_balance_query.py +++ b/src/hiero_sdk_python/query/account_balance_query.py @@ -1,5 +1,6 @@ import traceback -from typing import Optional, Any +from typing import Optional, Any, Union +from hiero_sdk_python.client.client import Client from hiero_sdk_python.query.query import Query from hiero_sdk_python.hapi.services import crypto_get_account_balance_pb2, query_pb2 from hiero_sdk_python.account.account_id import AccountId @@ -133,7 +134,7 @@ def _get_method(self, channel: _Channel) -> _Method: transaction_func=None, query_func=channel.crypto.cryptoGetBalance ) - def execute(self, client) -> AccountBalance: + def execute(self, client: Client, timeout: Optional[Union[int, float]] = None) -> AccountBalance: """ Executes the account balance query. @@ -144,6 +145,7 @@ def execute(self, client) -> AccountBalance: Args: client (Client): The client instance to use for execution + timeout (Optional[Union[int, float]): The total execution timeout (in seconds) for this execution. Returns: AccountBalance: The account balance from the network @@ -154,7 +156,7 @@ def execute(self, client) -> AccountBalance: ReceiptStatusError: If the query fails with a receipt status error """ self._before_execute(client) - response = self._execute(client) + response = self._execute(client, timeout) return AccountBalance._from_proto(response.cryptogetAccountBalance) diff --git a/src/hiero_sdk_python/query/account_info_query.py b/src/hiero_sdk_python/query/account_info_query.py index 4d5e77964..0c74c2f6a 100644 --- a/src/hiero_sdk_python/query/account_info_query.py +++ b/src/hiero_sdk_python/query/account_info_query.py @@ -1,4 +1,4 @@ -from typing import Optional +from typing import Optional, Union from hiero_sdk_python.query.query import Query from hiero_sdk_python.hapi.services import query_pb2, crypto_get_info_pb2 from hiero_sdk_python.executable import _Method @@ -90,7 +90,7 @@ def _get_method(self, channel: _Channel) -> _Method: query_func=channel.crypto.getAccountInfo ) - def execute(self, client): + def execute(self, client, timeout: Optional[Union[int, float]] = None): """ Executes the account info query. @@ -98,6 +98,7 @@ def execute(self, client): to return an AccountInfo object. This function delegates the core logic to `_execute()`, and may propagate exceptions raised by it. + timeout (Optional[Union[int, float]): The total execution timeout (in seconds) for this execution. Args: client (Client): The client instance to use for execution @@ -111,7 +112,7 @@ def execute(self, client): ReceiptStatusError: If the query fails with a receipt status error """ self._before_execute(client) - response = self._execute(client) + response = self._execute(client, timeout) return AccountInfo._from_proto(response.cryptoGetInfo.accountInfo) diff --git a/src/hiero_sdk_python/query/token_info_query.py b/src/hiero_sdk_python/query/token_info_query.py index 8e41f7c71..ffed0af2d 100644 --- a/src/hiero_sdk_python/query/token_info_query.py +++ b/src/hiero_sdk_python/query/token_info_query.py @@ -1,4 +1,4 @@ -from typing import Optional +from typing import Optional, Union from hiero_sdk_python.query.query import Query from hiero_sdk_python.hapi.services import query_pb2, token_get_info_pb2, response_pb2 from hiero_sdk_python.executable import _Method @@ -90,7 +90,7 @@ def _get_method(self, channel: _Channel) -> _Method: query_func=channel.token.getTokenInfo ) - def execute(self, client: Client) -> TokenInfo: + def execute(self, client: Client, timeout: Optional[Union[int, float]] = None) -> TokenInfo: """ Executes the token info query. @@ -101,6 +101,7 @@ def execute(self, client: Client) -> TokenInfo: Args: client (Client): The client instance to use for execution + timeout (Optional[Union[int, float]): The total execution timeout (in seconds) for this execution. Returns: TokenInfo: The token info from the network @@ -111,7 +112,7 @@ def execute(self, client: Client) -> TokenInfo: ReceiptStatusError: If the query fails with a receipt status error """ self._before_execute(client) - response = self._execute(client) + response = self._execute(client, timeout) return TokenInfo._from_proto(response.tokenGetInfo.tokenInfo) diff --git a/src/hiero_sdk_python/query/token_nft_info_query.py b/src/hiero_sdk_python/query/token_nft_info_query.py index 3140b1e12..6b19bbaf8 100644 --- a/src/hiero_sdk_python/query/token_nft_info_query.py +++ b/src/hiero_sdk_python/query/token_nft_info_query.py @@ -1,4 +1,4 @@ -from typing import Optional, Any +from typing import Optional, Any, Union from hiero_sdk_python.query.query import Query from hiero_sdk_python.hapi.services import query_pb2, response_pb2, token_get_nft_info_pb2 from hiero_sdk_python.executable import _Method @@ -88,7 +88,7 @@ def _get_method(self, channel: _Channel) -> _Method: query_func=channel.token.getTokenNftInfo ) - def execute(self, client: Client) -> TokenNftInfo: + def execute(self, client: Client, timeout: Optional[Union[int, float]] = None) -> TokenNftInfo: """ Executes the nft info query. @@ -99,6 +99,7 @@ def execute(self, client: Client) -> TokenNftInfo: Args: client (Client): The client instance to use for execution + timeout (Optional[Union[int, float]): The total execution timeout (in seconds) for this execution. Returns: TokenNftInfo: The token nft info from the network @@ -109,7 +110,7 @@ def execute(self, client: Client) -> TokenNftInfo: ReceiptStatusError: If the query fails with a receipt status error """ self._before_execute(client) - response = self._execute(client) + response = self._execute(client, timeout) return TokenNftInfo._from_proto(response.tokenGetNftInfo.nft) diff --git a/src/hiero_sdk_python/query/topic_info_query.py b/src/hiero_sdk_python/query/topic_info_query.py index cd6161609..e5c253220 100644 --- a/src/hiero_sdk_python/query/topic_info_query.py +++ b/src/hiero_sdk_python/query/topic_info_query.py @@ -1,4 +1,4 @@ -from typing import Optional, Any +from typing import Optional, Any, Union from hiero_sdk_python.query.query import Query from hiero_sdk_python.hapi.services import query_pb2, consensus_get_topic_info_pb2, response_pb2 from hiero_sdk_python.client.client import Client @@ -149,7 +149,7 @@ def _should_retry(self, response: Any) -> _ExecutionState: else: return _ExecutionState.ERROR - def execute(self, client: Client) -> TopicInfo: + def execute(self, client: Client, timeout: Optional[Union[int, float]] = None) -> TopicInfo: """ Executes the topic info query. @@ -160,6 +160,7 @@ def execute(self, client: Client) -> TopicInfo: Args: client (Client): The client instance to use for execution + timeout (Optional[Union[int, float]): The total execution timeout (in seconds) for this execution. Returns: TopicInfo: The topic info from the network @@ -170,7 +171,7 @@ def execute(self, client: Client) -> TopicInfo: ReceiptStatusError: If the query fails with a receipt status error """ self._before_execute(client) - response = self._execute(client) + response = self._execute(client, timeout) return TopicInfo._from_proto(response.consensusGetTopicInfo.topicInfo) diff --git a/src/hiero_sdk_python/query/transaction_get_receipt_query.py b/src/hiero_sdk_python/query/transaction_get_receipt_query.py index a35b605fe..fd4c76d17 100644 --- a/src/hiero_sdk_python/query/transaction_get_receipt_query.py +++ b/src/hiero_sdk_python/query/transaction_get_receipt_query.py @@ -263,7 +263,7 @@ def _map_receipt_list(self, receipts: List[transaction_receipt_pb2.TransactionR ] - def execute(self, client: Client) -> TransactionReceipt: + def execute(self, client: Client, timeout: Optional[Union[int, float]] = None) -> TransactionReceipt: """ Executes the transaction receipt query. @@ -274,6 +274,7 @@ def execute(self, client: Client) -> TransactionReceipt: Args: client (Client): The client instance to use for execution + timeout (Optional[Union[int, float]): The total execution timeout (in seconds) for this execution. Returns: TransactionReceipt: The transaction receipt from the network @@ -284,7 +285,7 @@ def execute(self, client: Client) -> TransactionReceipt: ReceiptStatusError: If the transaction receipt contains an error status """ self._before_execute(client) - response = self._execute(client) + response = self._execute(client, timeout) parent = TransactionReceipt._from_proto(response.transactionGetReceipt.receipt, self.transaction_id) if self.include_children: diff --git a/src/hiero_sdk_python/query/transaction_record_query.py b/src/hiero_sdk_python/query/transaction_record_query.py index 24aeeddf9..cb8183fd1 100644 --- a/src/hiero_sdk_python/query/transaction_record_query.py +++ b/src/hiero_sdk_python/query/transaction_record_query.py @@ -1,4 +1,5 @@ from typing import Optional, Any, Union +from hiero_sdk_python.client.client import Client from hiero_sdk_python.hapi.services import query_header_pb2, transaction_get_record_pb2, query_pb2 from hiero_sdk_python.query.query import Query from hiero_sdk_python.response_code import ResponseCode @@ -160,7 +161,7 @@ def _map_status_error(self, response: Any) -> Union[PrecheckError,ReceiptStatusE return ReceiptStatusError(status, self.transaction_id, TransactionReceipt._from_proto(receipt, self.transaction_id)) - def execute(self, client): + def execute(self, client: Client, timeout: Optional[Union[int, float]] = None): """ Executes the transaction record query. @@ -171,6 +172,7 @@ def execute(self, client): Args: client (Client): The client instance to use for execution + timeout (Optional[Union[int, float]): The total execution timeout (in seconds) for this execution. Returns: TransactionRecord: The transaction record from the network @@ -181,7 +183,7 @@ def execute(self, client): ReceiptStatusError: If the transaction record contains an error status """ self._before_execute(client) - response = self._execute(client) + response = self._execute(client, timeout) return TransactionRecord._from_proto(response.transactionGetRecord.transactionRecord, self.transaction_id) diff --git a/src/hiero_sdk_python/staking_info.py b/src/hiero_sdk_python/staking_info.py new file mode 100644 index 000000000..04998e62c --- /dev/null +++ b/src/hiero_sdk_python/staking_info.py @@ -0,0 +1,139 @@ +""" +StakingInfo class. +""" + +from dataclasses import dataclass +from typing import Optional + +from hiero_sdk_python.account.account_id import AccountId +from hiero_sdk_python.hapi.services.basic_types_pb2 import StakingInfo as StakingInfoProto +from hiero_sdk_python.hbar import Hbar +from hiero_sdk_python.timestamp import Timestamp + + +@dataclass(frozen=True) +class StakingInfo: + """ + Represents staking-related information for an account. + + Attributes: + decline_reward (Optional[bool]): Whether rewards are declined. + stake_period_start (Optional[Timestamp]): Start of the staking period. + pending_reward (Optional[Hbar]): Pending staking reward in Hbar. + staked_to_me (Optional[Hbar]): Amount staked to this account in Hbar. + staked_account_id (Optional[AccountId]): Account ID this account is staked to. + staked_node_id (Optional[int]): Node ID this account is staked to. + """ + + decline_reward: Optional[bool] = None + stake_period_start: Optional[Timestamp] = None + pending_reward: Optional[Hbar] = None + staked_to_me: Optional[Hbar] = None + staked_account_id: Optional[AccountId] = None + staked_node_id: Optional[int] = None + + def __post_init__(self) -> None: + if self.staked_account_id is not None and self.staked_node_id is not None: + raise ValueError("Only one of staked_account_id or staked_node_id can be set.") + + @classmethod + def _from_proto(cls, proto: StakingInfoProto) -> "StakingInfo": + """ + Creates a StakingInfo instance from its protobuf representation. + """ + if proto is None: + raise ValueError("Staking info proto is None") + + decline_reward = proto.decline_reward + + stake_period_start = None + if proto.HasField("stake_period_start"): + stake_period_start = Timestamp._from_protobuf(proto.stake_period_start) + + pending_reward=Hbar.from_tinybars(proto.pending_reward) + staked_to_me=Hbar.from_tinybars(proto.staked_to_me) + + staked_account_id = None + if proto.HasField("staked_account_id"): + staked_account_id = AccountId._from_proto(proto.staked_account_id) + + staked_node_id = None + if proto.HasField("staked_node_id"): + staked_node_id = proto.staked_node_id + + return cls( + decline_reward=proto.decline_reward, + stake_period_start=stake_period_start, + pending_reward=pending_reward, + staked_to_me=staked_to_me, + staked_account_id=staked_account_id, + staked_node_id=staked_node_id, + ) + + def _to_proto(self) -> StakingInfoProto: + """ + Converts this StakingInfo instance to its protobuf representation. + """ + proto = StakingInfoProto() + + if self.decline_reward is not None: + proto.decline_reward = bool(self.decline_reward) + if self.stake_period_start is not None: + proto.stake_period_start.CopyFrom(self.stake_period_start._to_protobuf()) + if self.pending_reward is not None: + proto.pending_reward = self.pending_reward.to_tinybars() + if self.staked_to_me is not None: + proto.staked_to_me = self.staked_to_me.to_tinybars() + if self.staked_account_id is not None: + proto.staked_account_id.CopyFrom(self.staked_account_id._to_proto()) + if self.staked_node_id is not None: + proto.staked_node_id = self.staked_node_id + + return proto + + @classmethod + def from_bytes(cls, data: bytes) -> "StakingInfo": + """ + Creates a StakingInfo instance from protobuf-encoded bytes. + """ + if not isinstance(data, bytes): + raise TypeError("data must be bytes") + if len(data) == 0: + raise ValueError("data cannot be empty") + + try: + proto = StakingInfoProto.FromString(data) + except Exception as exc: + raise ValueError(f"Failed to parse StakingInfo bytes: {exc}") from exc + + return cls._from_proto(proto) + + def to_bytes(self) -> bytes: + """ + Serializes this StakingInfo instance to protobuf-encoded bytes. + """ + return self._to_proto().SerializeToString() + + def __str__(self) -> str: + return ( + "StakingInfo(\n" + f" decline_reward={self.decline_reward},\n" + f" stake_period_start={self.stake_period_start},\n" + f" pending_reward={self.pending_reward},\n" + f" staked_to_me={self.staked_to_me},\n" + f" staked_account_id={self.staked_account_id},\n" + f" staked_node_id={self.staked_node_id}\n" + ")" + ) + + def __repr__(self) -> str: + return ( + "StakingInfo(" + f"decline_reward={self.decline_reward!r}, " + f"stake_period_start={self.stake_period_start!r}, " + f"pending_reward={self.pending_reward!r}, " + f"staked_to_me={self.staked_to_me!r}, " + f"staked_account_id={self.staked_account_id!r}, " + f"staked_node_id={self.staked_node_id!r}" + ")" + ) diff --git a/src/hiero_sdk_python/tokens/assessed_custom_fee.py b/src/hiero_sdk_python/tokens/assessed_custom_fee.py new file mode 100644 index 000000000..2866f93b5 --- /dev/null +++ b/src/hiero_sdk_python/tokens/assessed_custom_fee.py @@ -0,0 +1,98 @@ +from dataclasses import dataclass, field +from typing import Optional + +from hiero_sdk_python.account.account_id import AccountId +from hiero_sdk_python.hapi.services.custom_fees_pb2 import ( + AssessedCustomFee as AssessedCustomFeeProto, +) +from hiero_sdk_python.tokens.token_id import TokenId + + +@dataclass +class AssessedCustomFee: + """Assessed custom fee information included in transaction records. + + This class represents fees assessed due to custom fee schedules on tokens or + topics. It appears in `TransactionRecord.assessed_custom_fees` (repeated field). + + Example: + Suppose you have a TransactionRecord from getTransactionRecord(): + + record = client.get_transaction_record(tx_id) + + for fee in record.assessed_custom_fees: + if fee.token_id is None: + print(f"HBAR fee of {fee.amount} tinybars collected by {fee.fee_collector_account_id}") + else: + print(f"Token fee of {fee.amount} units of {fee.token_id} " + f"collected by {fee.fee_collector_account_id}, " + f"paid by {', '.join(str(p) for p in fee.effective_payer_account_ids)}") + """ + + amount: int + """The amount of the fee assessed, in the smallest units of the token (or tinybars for HBAR).""" + + token_id: Optional[TokenId] = None + """The ID of the token used to pay the fee; None if paid in HBAR.""" + + fee_collector_account_id: Optional[AccountId] = None + """The account ID that collects/receives this assessed custom fee (required field).""" + + effective_payer_account_ids: list[AccountId] = field(default_factory=list) + """The list of accounts that effectively paid this assessed fee (repeated field).""" + + def __post_init__(self) -> None: + if self.fee_collector_account_id is None: + raise ValueError( + "fee_collector_account_id is required for AssessedCustomFee" + ) + + @classmethod + def _from_proto(cls, proto: AssessedCustomFeeProto) -> "AssessedCustomFee": + """Create an AssessedCustomFee instance from the protobuf message.""" + token_id = ( + TokenId._from_proto(proto.token_id) if proto.HasField("token_id") else None + ) + + if not proto.HasField("fee_collector_account_id"): + raise ValueError( + "fee_collector_account_id is required in AssessedCustomFee proto" + ) + + return cls( + amount=proto.amount, + token_id=token_id, + fee_collector_account_id=AccountId._from_proto( + proto.fee_collector_account_id + ), + effective_payer_account_ids=[ + AccountId._from_proto(payer_proto) + for payer_proto in proto.effective_payer_account_id + ], + ) + + def _to_proto(self) -> AssessedCustomFeeProto: + """Convert this AssessedCustomFee instance back to a protobuf message.""" + proto = AssessedCustomFeeProto( + amount=self.amount, + fee_collector_account_id=self.fee_collector_account_id._to_proto(), + ) + + if self.token_id is not None: + proto.token_id.CopyFrom(self.token_id._to_proto()) + + for payer in self.effective_payer_account_ids: + proto.effective_payer_account_id.append(payer._to_proto()) + + return proto + + def __str__(self) -> str: + """Returns a human-readable string representation.""" + return ( + f"AssessedCustomFee(" + f"amount={self.amount}, " + f"token_id={self.token_id}, " + f"fee_collector_account_id={self.fee_collector_account_id}, " + f"effective_payer_account_ids={self.effective_payer_account_ids}" + f")" + ) diff --git a/src/hiero_sdk_python/transaction/transaction.py b/src/hiero_sdk_python/transaction/transaction.py index 5304c8c61..5fb589aac 100644 --- a/src/hiero_sdk_python/transaction/transaction.py +++ b/src/hiero_sdk_python/transaction/transaction.py @@ -1,12 +1,11 @@ import hashlib -from typing import List, Optional +from typing import List, Optional, Union from typing import TYPE_CHECKING from hiero_sdk_python.account.account_id import AccountId from hiero_sdk_python.client.client import Client -from hiero_sdk_python.crypto.private_key import PrivateKey from hiero_sdk_python.exceptions import PrecheckError from hiero_sdk_python.executable import _Executable, _ExecutionState from hiero_sdk_python.hapi.services import (basic_types_pb2, transaction_pb2, transaction_contents_pb2) @@ -19,6 +18,7 @@ from hiero_sdk_python.utils.key_utils import Key, key_to_proto if TYPE_CHECKING: + from hiero_sdk_python.crypto.private_key import PrivateKey from hiero_sdk_python.schedule.schedule_create_transaction import ( ScheduleCreateTransaction, ) @@ -103,7 +103,7 @@ def _map_response( ValueError: If proto_request is not a Transaction """ if not isinstance(proto_request, transaction_pb2.Transaction): - return ValueError(f"Expected Transaction but got {type(proto_request)}") + raise TypeError(f"Expected Transaction but got {type(proto_request)}") hash_obj = hashlib.sha384() hash_obj.update(proto_request.signedTransactionBytes) @@ -138,6 +138,7 @@ def _should_retry(self, response): ResponseCode.PLATFORM_TRANSACTION_NOT_CREATED, ResponseCode.PLATFORM_NOT_ACTIVE, ResponseCode.BUSY, + ResponseCode.INVALID_NODE_ACCOUNT } if status in retryable_statuses: @@ -166,7 +167,7 @@ def _map_status_error(self, response): return PrecheckError(error_code, tx_id) - def sign(self, private_key): + def sign(self, private_key: "PrivateKey") -> "Transaction": """ Signs the transaction using the provided private key. @@ -262,11 +263,19 @@ def freeze(self): if self.transaction_id is None: raise ValueError("Transaction ID must be set before freezing. Use freeze_with(client) or set_transaction_id().") - if self.node_account_id is None: - raise ValueError("Node account ID must be set before freezing. Use freeze_with(client) or manually set node_account_id.") + if self.node_account_id is None and len(self.node_account_ids) == 0: + raise ValueError("Node account ID must be set before freezing. Use freeze_with(client) or manually set node_account_ids.") + # Populate node_account_ids for backward compatibility + if self.node_account_id: + self.set_node_account_id(self.node_account_id) + self._transaction_body_bytes[self.node_account_id] = self.build_transaction_body().SerializeToString() + return self + # Build the transaction body for the single node - self._transaction_body_bytes[self.node_account_id] = self.build_transaction_body().SerializeToString() + for node_account_id in self.node_account_ids: + self.node_account_id = node_account_id + self._transaction_body_bytes[node_account_id] = self.build_transaction_body().SerializeToString() return self @@ -293,21 +302,34 @@ def freeze_with(self, client): # For each node, set the node_account_id and build the transaction body # This allows the transaction to be submitted to any node in the network - if self.batch_key is None: - for node in client.network.nodes: - self.node_account_id = node._account_id - self._transaction_body_bytes[node._account_id] = self.build_transaction_body().SerializeToString() - - # Set the node account id to the current node in the network - self.node_account_id = client.network.current_node._account_id - else: + if self.batch_key: # For Inner Transaction of batch transaction node_account_id=0.0.0 self.node_account_id = AccountId(0,0,0) self._transaction_body_bytes[AccountId(0,0,0)] = self.build_transaction_body().SerializeToString() + return self + # Single node + if self.node_account_id: + self.set_node_account_id(self.node_account_id) + self._transaction_body_bytes[self.node_account_id] = self.build_transaction_body().SerializeToString() + return self + + # Multiple node + if len(self.node_account_ids) > 0: + for node_account_id in self.node_account_ids: + self.node_account_id = node_account_id + self._transaction_body_bytes[node_account_id] = self.build_transaction_body().SerializeToString() + + else: + # Use all nodes from client network + for node in client.network.nodes: + self.node_account_id = node._account_id + self._transaction_body_bytes[node._account_id] = self.build_transaction_body().SerializeToString() + return self + - def execute(self, client): + def execute(self, client, timeout: Optional[Union[int, float]] = None): """ Executes the transaction on the Hedera network using the provided client. @@ -315,6 +337,7 @@ def execute(self, client): Args: client (Client): The client instance to use for execution. + timeout (Optional[Union[int, float]): The total execution timeout (in seconds) for this execution. Returns: TransactionReceipt: The receipt of the transaction. @@ -338,7 +361,7 @@ def execute(self, client): self.sign(client.operator_private_key) # Call the _execute function from executable.py to handle the actual execution - response = self._execute(client) + response = self._execute(client, timeout) response.validate_status = True response.transaction = self @@ -415,12 +438,13 @@ def build_base_transaction_body(self) -> transaction_pb2.TransactionBody: transaction_id_proto = self.transaction_id._to_proto() - if self.node_account_id is None: + selected_node = self.node_account_id or (self.node_account_ids[0] if self.node_account_ids else None) + if selected_node is None: raise ValueError("Node account ID is not set.") transaction_body = transaction_pb2.TransactionBody() transaction_body.transactionID.CopyFrom(transaction_id_proto) - transaction_body.nodeAccountID.CopyFrom(self.node_account_id._to_proto()) + transaction_body.nodeAccountID.CopyFrom(selected_node._to_proto()) fee = self.transaction_fee or self._default_transaction_fee if hasattr(fee, "to_tinybars"): @@ -544,7 +568,7 @@ def set_transaction_id(self, transaction_id: TransactionId): self.transaction_id = transaction_id return self - def to_bytes(self): + def to_bytes(self) -> bytes: """ Serializes the frozen transaction into its protobuf-encoded byte representation. @@ -574,7 +598,7 @@ def to_bytes(self): ``` Returns: - bytes: The protobuf-encoded transaction bytes. + bytes: The serialized transaction as bytes. Raises: Exception: If the transaction has not been frozen yet. @@ -801,6 +825,8 @@ def _from_protobuf(cls, transaction_body, body_bytes: bytes, sig_map): ] if transaction.node_account_id: + # restore for the original frozen node + transaction.set_node_account_id(transaction.node_account_id) transaction._transaction_body_bytes[transaction.node_account_id] = body_bytes if sig_map and sig_map.sigPair: diff --git a/src/hiero_sdk_python/utils/crypto_utils.py b/src/hiero_sdk_python/utils/crypto_utils.py index 90a79597e..43dab440c 100644 --- a/src/hiero_sdk_python/utils/crypto_utils.py +++ b/src/hiero_sdk_python/utils/crypto_utils.py @@ -56,8 +56,21 @@ def decompress_point(data: bytes) -> Tuple[int, int]: def compress_with_cryptography(encoded: bytes) -> bytes: """ - Takes either a 33-byte compressed or 65-byte uncompressed, - returns a 33-byte compressed via cryptography. + Compress an elliptic curve public key to SEC1 compressed format. + + Accepts either a 33-byte compressed or 65-byte uncompressed secp256k1 + public key and returns the 33-byte compressed representation using + the cryptography library. + + Args: + encoded: A secp256k1 public key in either compressed (33 bytes) + or uncompressed (65 bytes) SEC1 format. + + Returns: + bytes: The 33-byte compressed public key. + + Raises: + ValueError: If the input is not a valid SEC1 encoded point. """ pub = ec.EllipticCurvePublicKey.from_encoded_point(SECP256K1_CURVE, encoded) compressed = pub.public_bytes( diff --git a/src/hiero_sdk_python/utils/entity_id_helper.py b/src/hiero_sdk_python/utils/entity_id_helper.py index f85dcf28b..03f9a5134 100644 --- a/src/hiero_sdk_python/utils/entity_id_helper.py +++ b/src/hiero_sdk_python/utils/entity_id_helper.py @@ -1,6 +1,8 @@ import re +import struct +import requests -from typing import TYPE_CHECKING +from typing import TYPE_CHECKING, Any, Dict if TYPE_CHECKING: from hiero_sdk_python.client.client import Client @@ -11,15 +13,20 @@ P3 = 26**3 P5 = 26**5 -def parse_from_string(address: str): +def parse_from_string(address: str) -> tuple[str, str, str, str | None]: """ Parse an address string of the form: ..[-] + + Args: + address: The entity ID string to parse. + Examples: "0.0.123" "0.0.123-abcde" Returns: - An instance of cls with shard, realm, num, and optional checksum. + tuple[str, str, str, str | None]: A tuple of (shard, realm, num, checksum) + where checksum is None if not present in the input string. """ match = ID_REGEX.match(address) if not match: @@ -29,21 +36,22 @@ def parse_from_string(address: str): return shard, realm, num, checksum + def generate_checksum(ledger_id: bytes, address: str) -> str: """ Compute the 5-character checksum for a Hiero entity ID string (HIP-15). - + Args: ledger_id: The ledger identifier as raw bytes (e.g., b"\x00" for mainnet). address: A string of the form "shard.realm.num" (e.g., "0.0.123"). - + Returns: A 5-letter checksum string (e.g., "kfmza"). """ # Convert "0.0.123" into a digit list with '.' as 10 d = [] for ch in address: - if ch == '.': + if ch == ".": d.append(10) else: d.append(int(ch)) @@ -51,7 +59,7 @@ def generate_checksum(ledger_id: bytes, address: str) -> str: # Initialize running sums sd0 = 0 # sum of digits at even indices mod 11 sd1 = 0 # sum of digits at odd indices mod 11 - sd = 0 # weight sum of all position mod P3 + sd = 0 # weight sum of all position mod P3 for i in range(len(d)): sd = (sd * 31 + d[i]) % P3 @@ -74,12 +82,15 @@ def generate_checksum(ledger_id: bytes, address: str) -> str: letter = [] for _ in range(5): - letter.append(chr(ord('a') + (cp % 26))) + letter.append(chr(ord("a") + (cp % 26))) cp //= 26 return "".join(reversed(letter)) -def validate_checksum(shard: int, realm: int, num: int, checksum: str | None, client: "Client") -> None: + +def validate_checksum( + shard: int, realm: int, num: int, checksum: str | None, client: "Client" +) -> None: """ Validate a Hiero entity ID checksum against the current client's ledger. @@ -107,13 +118,17 @@ def validate_checksum(shard: int, realm: int, num: int, checksum: str | None, cl if expected_checksum != checksum: raise ValueError(f"Checksum mismatch for {address}") + def format_to_string(shard: int, realm: int, num: int) -> str: """ Convert an entity ID into its standard string representation. """ return f"{shard}.{realm}.{num}" -def format_to_string_with_checksum(shard: int, realm: int, num: int, client: "Client") -> str: + +def format_to_string_with_checksum( + shard: int, realm: int, num: int, client: "Client" +) -> str: """ Convert an entity ID into its string representation with checksum. """ @@ -123,3 +138,36 @@ def format_to_string_with_checksum(shard: int, realm: int, num: int, client: "Cl base_str = format_to_string(shard, realm, num) return f"{base_str}-{generate_checksum(ledger_id, format_to_string(shard, realm, num))}" + + +def perform_query_to_mirror_node(url: str, timeout: float = 10) -> Dict[str, Any]: + """Perform a GET request to the Hedera Mirror Node REST API.""" + if not isinstance(url, str) or not url: + raise ValueError("url must be a non-empty string") + + try: + response: requests.Response = requests.get(url, timeout=timeout) + response.raise_for_status() + + return response.json() + + except (requests.exceptions.HTTPError, requests.exceptions.ConnectionError) as e: + raise RuntimeError(f"Mirror node request failed for {url}: {e}") from e + + except requests.exceptions.Timeout as e: + raise RuntimeError(f"Mirror node request timed out for {url}") from e + + except requests.RequestException as e: + raise RuntimeError(f"Unexpected error while querying mirror node: {url}") from e + + +def to_solidity_address(shard: int, realm: int, num: int) -> str: + """Convert entity ID components to a 20-byte Solidity-style address (long-zero format).""" + # Check shard fits in 32-bit range + if shard.bit_length() > 31: + raise ValueError(f"shard out of 32-bit range {shard}") + + # Pack into 20 bytes: shard(4 bytes), realm(8 bytes), num(8 bytes) (big-endian) + raw = struct.pack(">iqq", shard, realm, num) + + return raw.hex() diff --git a/tests/integration/account_id_population_e2e_test.py b/tests/integration/account_id_population_e2e_test.py new file mode 100644 index 000000000..7dc149499 --- /dev/null +++ b/tests/integration/account_id_population_e2e_test.py @@ -0,0 +1,108 @@ +""" +Integration tests for AccountId. +""" + +import pytest + +from hiero_sdk_python.account.account_id import AccountId +from hiero_sdk_python.crypto.private_key import PrivateKey +from hiero_sdk_python.hbar import Hbar +from hiero_sdk_python.query.transaction_get_receipt_query import ( + TransactionGetReceiptQuery, +) +from hiero_sdk_python.transaction.transfer_transaction import TransferTransaction +from tests.integration.utils import env, wait_for_mirror_node + + +@pytest.fixture +def evm_address(): + """Returns an evm_address.""" + private_key = PrivateKey.generate_ecdsa() + public_key = private_key.public_key() + + return public_key.to_evm_address() + + +@pytest.mark.integration +def test_populate_account_id_num(env, evm_address): + """Test populate AccountId num from mirror node.""" + evm_address_account = AccountId.from_evm_address(evm_address, 0, 0) + + # Auto account creation by doing transfer to an evm_address + transfer_tx = ( + TransferTransaction() + .add_hbar_transfer(evm_address_account, Hbar(1).to_tinybars()) + .add_hbar_transfer(env.operator_id, Hbar(-1).to_tinybars()) + ) + transfer_tx.execute(env.client) + + transfer_receipt = ( + TransactionGetReceiptQuery() + .set_transaction_id(transfer_tx.transaction_id) + .set_include_children(True) + .execute(env.client) + ) + + assert transfer_receipt is not None + assert ( + len(transfer_receipt.children) > 0 + ), "Expected child transaction for auto-account creation" + + created_account_id = transfer_receipt.children[0].account_id + assert created_account_id is not None, f"AccountId not found in child transaction: {transfer_receipt.children[0]}" + + mirror_account_id = AccountId.from_evm_address(evm_address, 0, 0) + assert mirror_account_id.num == 0 + + # Wait for mirrornode to update + resolved_account_id = wait_for_mirror_node( + fn=lambda: mirror_account_id.populate_account_num(env.client), + predicate=lambda acc: acc.num != 0, + ) + + assert resolved_account_id.evm_address == mirror_account_id.evm_address + assert resolved_account_id.shard == created_account_id.shard + assert resolved_account_id.realm == created_account_id.realm + assert resolved_account_id.num == created_account_id.num + + +@pytest.mark.integration +def test_populate_account_id_evm_address(env, evm_address): + """Test populate AccountId evm address from mirror node.""" + evm_address_account = AccountId.from_evm_address(evm_address, 0, 0) + + # Auto account creation by doing transfer to an evm_address + transfer_tx = ( + TransferTransaction() + .add_hbar_transfer(evm_address_account, Hbar(1).to_tinybars()) + .add_hbar_transfer(env.operator_id, Hbar(-1).to_tinybars()) + ) + transfer_tx.execute(env.client) + + transfer_receipt = ( + TransactionGetReceiptQuery() + .set_transaction_id(transfer_tx.transaction_id) + .set_include_children(True) + .execute(env.client) + ) + + assert transfer_receipt is not None + assert ( + len(transfer_receipt.children) > 0 + ), "Expected child transaction for auto-account creation" + + created_account_id = transfer_receipt.children[0].account_id + assert created_account_id is not None, f"AccountId not found in child transaction: {transfer_receipt.children[0]}" + + assert created_account_id.evm_address is None + + # Wait for mirror_node to update + resolved_account_id = wait_for_mirror_node( + fn=lambda: created_account_id.populate_evm_address(env.client), + predicate=lambda acc: acc.evm_address is not None, + ) + + assert resolved_account_id.shard == created_account_id.shard + assert resolved_account_id.realm == created_account_id.realm + assert resolved_account_id.num == created_account_id.num + assert resolved_account_id.evm_address == evm_address diff --git a/tests/integration/contract_id_population_e2e_test.py b/tests/integration/contract_id_population_e2e_test.py new file mode 100644 index 000000000..848e2ec9f --- /dev/null +++ b/tests/integration/contract_id_population_e2e_test.py @@ -0,0 +1,69 @@ +""" +Integration tests for ContractId. +""" + +import pytest + +from examples.contract.contracts.contract_utils import ( + CONTRACT_DEPLOY_GAS, + STATEFUL_CONTRACT_BYTECODE, +) +from hiero_sdk_python.contract.contract_create_transaction import ( + ContractCreateTransaction, +) +from hiero_sdk_python.contract.contract_function_parameters import ( + ContractFunctionParameters, +) +from hiero_sdk_python.contract.contract_id import ContractId +from hiero_sdk_python.contract.contract_info_query import ContractInfoQuery +from hiero_sdk_python.file.file_create_transaction import FileCreateTransaction +from hiero_sdk_python.response_code import ResponseCode +from tests.integration.utils import env, wait_for_mirror_node + + +@pytest.mark.integration +def test_populate_contract_id_num(env): + """Test populate ContractId num from mirror node.""" + # Create a contract transaction + file_receipt = ( + FileCreateTransaction() + .set_keys(env.operator_key.public_key()) + .set_contents(STATEFUL_CONTRACT_BYTECODE) + .set_file_memo("integration test contract") + .execute(env.client) + ) + assert file_receipt.status == ResponseCode.SUCCESS + file_id = file_receipt.file_id + assert file_id is not None + + constructor_params = ContractFunctionParameters().add_bytes32( + b"Initial message from constructor" + ) + contract_receipt = ( + ContractCreateTransaction() + .set_admin_key(env.operator_key.public_key()) + .set_gas(CONTRACT_DEPLOY_GAS) + .set_constructor_parameters(constructor_params) + .set_bytecode_file_id(file_id) + .execute(env.client) + ) + assert contract_receipt.status == ResponseCode.SUCCESS + + created_contract_id = contract_receipt.contract_id + assert created_contract_id is not None + + # Query contract info to get evm_address + info = ContractInfoQuery().set_contract_id(created_contract_id).execute(env.client) + contract_with_evm = ContractId.from_evm_address(0, 0, info.contract_account_id) + assert contract_with_evm.contract == 0 + + # Wait for mirror_node to update + resolved_contract_id = wait_for_mirror_node( + fn=lambda: contract_with_evm.populate_contract_num(env.client), + predicate=lambda contract: contract.contract != 0, + ) + + assert resolved_contract_id.shard == created_contract_id.shard + assert resolved_contract_id.realm == created_contract_id.realm + assert resolved_contract_id.contract == created_contract_id.contract + assert resolved_contract_id.evm_address == contract_with_evm.evm_address diff --git a/tests/integration/transaction_freeze_e2e_test.py b/tests/integration/transaction_freeze_e2e_test.py new file mode 100644 index 000000000..0522e3d6a --- /dev/null +++ b/tests/integration/transaction_freeze_e2e_test.py @@ -0,0 +1,148 @@ +import pytest + +from hiero_sdk_python.account.account_id import AccountId +from hiero_sdk_python.client.client import Client +from hiero_sdk_python.consensus.topic_create_transaction import TopicCreateTransaction +from hiero_sdk_python.crypto.private_key import PrivateKey +from hiero_sdk_python.response_code import ResponseCode +from hiero_sdk_python.transaction.transaction import Transaction +from hiero_sdk_python.transaction.transaction_id import TransactionId +from tests.integration.utils import env + +@pytest.mark.integration +def test_transaction_executes_successfully(env): + """Test transaction can be executed successfully.""" + executor_client = env.client + executor_key = env.operator_key + + tx = TopicCreateTransaction().set_memo("Test Topic Creation") + tx.freeze_with(executor_client) + tx.sign(executor_key) + receipt = tx.execute(executor_client) + + # Verify that the transaction_bodys are generated for all nodes present in client network + assert len(tx._transaction_body_bytes) == len(env.client.network.nodes) + assert set(tx._transaction_body_bytes.keys()) == {node._account_id for node in env.client.network.nodes} + + assert receipt.status == ResponseCode.SUCCESS, "Transaction must execute successfully" + +@pytest.mark.integration +def test_transaction_executes_successfully_with_node_account_ids(env): + """Test transaction can be executed successfully when node_account_ids are provided.""" + node_account_ids = [AccountId(0,0,3), AccountId(0,0,4)] + executor_client = env.client + executor_key = env.operator_key + + tx = TopicCreateTransaction().set_memo("Test Topic Creation") + tx.set_node_account_ids(node_account_ids) + tx.freeze_with(executor_client) + tx.sign(executor_key) + + # Verify that the transaction_bodys are generated for the provided node_account_ids only + assert len(tx._transaction_body_bytes) == 2 + assert set(tx._transaction_body_bytes.keys()) == set(node_account_ids) + + receipt = tx.execute(executor_client) + assert receipt.status == ResponseCode.SUCCESS, "Transaction must execute successfully" + +@pytest.mark.integration +def test_transaction_executes_successfully_with_single_node_account_id(env): + """Test transaction can be executed successfully when single node_account_id are provided.""" + node_account_id = AccountId(0,0,3) + executor_client = env.client + executor_key = env.operator_key + + tx = TopicCreateTransaction().set_memo("Test Topic Creation") + tx.set_node_account_id(node_account_id) + tx.freeze_with(executor_client) + tx.sign(executor_key) + + # Verify that the transaction_bodys are generated for the provided node_account_id only + assert len(tx._transaction_body_bytes) == 1 + assert set(tx._transaction_body_bytes.keys()) == {node_account_id} + + receipt = tx.execute(executor_client) + assert receipt.status == ResponseCode.SUCCESS, "Transaction must execute successfully" + +@pytest.mark.integration +def test_transaction_executes_successfully_after_manual_freeze(env): + """Test transaction can be manually frozen and then executed successfully.""" + executor_client = env.client + executor_key = env.operator_key + + tx = TopicCreateTransaction().set_memo("Test Topic Creation") + tx_id = TransactionId.generate(executor_client.operator_account_id) + + # Manually set Node and ID + tx.set_transaction_id(tx_id) + tx.node_account_id = AccountId.from_string("0.0.3") # Explicitly set to 0.0.3 + + # Manual Freeze (Generates body ONLY for 0.0.3) + tx.freeze() + unsigned_bytes = tx.to_bytes() + + assert unsigned_bytes is not None + + tx2 = Transaction.from_bytes(unsigned_bytes) + assert tx2 is not None + + tx2.sign(executor_key) + receipt = tx2.execute(executor_client) + + assert receipt.status == ResponseCode.SUCCESS, "Transaction must execute successfully" + +@pytest.mark.integration +def test_transaction_with_secondary_client_can_execute_successfully(env): + """Test transaction created by the secondary client and then executed successfully.""" + executor_client = env.client + executor_key = env.operator_key + + tx_freezer_account = env.create_account(1) + + # Secondary Client + tx_freezer_client = Client(network=env.client.network) + tx_freezer_client.set_operator(tx_freezer_account.id, tx_freezer_account.key) + + tx = TopicCreateTransaction().set_memo("Test Topic Creation") + tx_id = TransactionId.generate(executor_client.operator_account_id) + + tx.set_transaction_id(tx_id) + tx.freeze_with(tx_freezer_client) + + unsigned_bytes = tx.to_bytes() + assert unsigned_bytes is not None + + tx2 = Transaction.from_bytes(unsigned_bytes) + assert tx2 is not None + + tx2.sign(executor_key) + receipt = tx2.execute(executor_client) + + assert receipt.status == ResponseCode.SUCCESS, "Transaction must execute successfully" + +@pytest.mark.integration +def test_transaction_with_secondary_client_without_operator_can_execute_successfully(env): + """Test transaction created by the secondary client without operator and then executed successfully.""" + executor_client = env.client + executor_key = env.operator_key + + # Secondary Client with no operator account set + tx_freezer_client = Client(network=env.client.network) + + tx = TopicCreateTransaction().set_memo("Test Topic Creation") + tx_id = TransactionId.generate(executor_client.operator_account_id) + + tx.set_transaction_id(tx_id) + tx.freeze_with(tx_freezer_client) + + unsigned_bytes = tx.to_bytes() + assert unsigned_bytes is not None + + tx2 = Transaction.from_bytes(unsigned_bytes) + assert tx2 is not None + + tx2.sign(executor_key) + receipt = tx2.execute(executor_client) + + assert receipt.status == ResponseCode.SUCCESS, "Transaction must execute successfully" + diff --git a/tests/integration/utils.py b/tests/integration/utils.py index b814ab1ba..87c1ff573 100644 --- a/tests/integration/utils.py +++ b/tests/integration/utils.py @@ -1,8 +1,9 @@ import os +import time from pytest import fixture from dotenv import load_dotenv from dataclasses import dataclass -from typing import Optional +from typing import Callable, Optional, TypeVar from hiero_sdk_python.account.account_id import AccountId from hiero_sdk_python.client.client import Client from hiero_sdk_python.client.network import Network @@ -11,14 +12,23 @@ from hiero_sdk_python.logger.log_level import LogLevel from hiero_sdk_python.response_code import ResponseCode from hiero_sdk_python.tokens.supply_type import SupplyType -from hiero_sdk_python.tokens.token_create_transaction import TokenCreateTransaction, TokenKeys, TokenParams -from hiero_sdk_python.tokens.token_associate_transaction import TokenAssociateTransaction +from hiero_sdk_python.tokens.token_create_transaction import ( + TokenCreateTransaction, + TokenKeys, + TokenParams, +) +from hiero_sdk_python.tokens.token_associate_transaction import ( + TokenAssociateTransaction, +) from hiero_sdk_python.account.account_create_transaction import AccountCreateTransaction from hiero_sdk_python.transaction.transfer_transaction import TransferTransaction -from hiero_sdk_python.hbar import Hbar +from hiero_sdk_python.hbar import Hbar + +T = TypeVar("T") load_dotenv(override=True) + @fixture def env(): """Integration test environment with client/operator set up.""" @@ -26,28 +36,37 @@ def env(): yield e e.close() + @dataclass class Account: - id: AccountId - key: PrivateKey + id: AccountId + key: PrivateKey + class IntegrationTestEnv: def __init__(self) -> None: - network = Network(os.getenv('NETWORK')) + network_name = os.getenv("NETWORK", "solo").lower() + + if network_name == "mainnet": + raise ValueError("Running tests against mainnet is not allowed") + + network = Network(network=network_name) self.client = Client(network) self.operator_id: Optional[AccountId] = None self.operator_key: Optional[PrivateKey] = None - operator_id = os.getenv('OPERATOR_ID') - operator_key = os.getenv('OPERATOR_KEY') + operator_id = os.getenv("OPERATOR_ID") + operator_key = os.getenv("OPERATOR_KEY") if operator_id and operator_key: self.operator_id = AccountId.from_string(operator_id) self.operator_key = PrivateKey.from_string(operator_key) self.client.set_operator(self.operator_id, self.operator_key) + else: + raise ValueError("OPERATOR_ID and OPERATOR_KEY must be set for integration tests") self.client.logger.set_level(LogLevel.ERROR) self.public_operator_key = self.operator_key.public_key() - + def close(self): self.client.close() @@ -56,8 +75,8 @@ def create_account(self, initial_hbar: float = 1.0) -> Account: key = PrivateKey.generate() tx = ( AccountCreateTransaction() - .set_key_without_alias(key.public_key()) - .set_initial_balance(Hbar(initial_hbar)) + .set_key_without_alias(key.public_key()) + .set_initial_balance(Hbar(initial_hbar)) ) receipt = tx.execute(self.client) if receipt.status != ResponseCode.SUCCESS: @@ -66,18 +85,20 @@ def create_account(self, initial_hbar: float = 1.0) -> Account: ) return Account(id=receipt.account_id, key=key) - def associate_and_transfer(self, receiver: AccountId, receiver_key: PrivateKey, token_id, amount: int): + def associate_and_transfer( + self, receiver: AccountId, receiver_key: PrivateKey, token_id, amount: int + ): """ Associate the token with `receiver`, then transfer `amount` of the token from the operator to that receiver. """ assoc_receipt = ( TokenAssociateTransaction() - .set_account_id(receiver) - .add_token_id(token_id) - .freeze_with(self.client) - .sign(receiver_key) - .execute(self.client) + .set_account_id(receiver) + .add_token_id(token_id) + .freeze_with(self.client) + .sign(receiver_key) + .execute(self.client) ) if assoc_receipt.status != ResponseCode.SUCCESS: raise AssertionError( @@ -86,15 +107,16 @@ def associate_and_transfer(self, receiver: AccountId, receiver_key: PrivateKey, transfer_receipt = ( TransferTransaction() - .add_token_transfer(token_id, self.operator_id, -amount) - .add_token_transfer(token_id, receiver, amount) - .execute(self.client) # auto-signs with operator’s key + .add_token_transfer(token_id, self.operator_id, -amount) + .add_token_transfer(token_id, receiver, amount) + .execute(self.client) # auto-signs with operator’s key ) if transfer_receipt.status != ResponseCode.SUCCESS: raise AssertionError( f"Transfer failed: {ResponseCode(transfer_receipt.status).name}" ) + def create_fungible_token(env, opts=[]): """ Create a fungible token with the given options. @@ -106,36 +128,39 @@ def create_fungible_token(env, opts=[]): lambda tx: tx.set_treasury_account_id(custom_treasury_id).freeze_with(client) """ token_params = TokenParams( - token_name="PTokenTest34", - token_symbol="PTT34", - decimals=2, - initial_supply=1000, - treasury_account_id=env.operator_id, - token_type=TokenType.FUNGIBLE_COMMON, - supply_type=SupplyType.FINITE, - max_supply=10000 - ) - + token_name="PTokenTest34", + token_symbol="PTT34", + decimals=2, + initial_supply=1000, + treasury_account_id=env.operator_id, + token_type=TokenType.FUNGIBLE_COMMON, + supply_type=SupplyType.FINITE, + max_supply=10000, + ) + token_keys = TokenKeys( - admin_key=env.operator_key, - supply_key=env.operator_key, - freeze_key=env.operator_key, - wipe_key=env.operator_key - # pause_key= None # implicitly “no pause key” use opts to add one - ) - + admin_key=env.operator_key, + supply_key=env.operator_key, + freeze_key=env.operator_key, + wipe_key=env.operator_key, + # pause_key= None # implicitly “no pause key” use opts to add one + ) + token_transaction = TokenCreateTransaction(token_params, token_keys) - + # Apply optional functions to the token creation transaction for opt in opts: opt(token_transaction) - + token_receipt = token_transaction.execute(env.client) - - assert token_receipt.status == ResponseCode.SUCCESS, f"Token creation failed with status: {ResponseCode(token_receipt.status).name}" - + + assert ( + token_receipt.status == ResponseCode.SUCCESS + ), f"Token creation failed with status: {ResponseCode(token_receipt.status).name}" + return token_receipt.token_id + def create_nft_token(env, opts=[]): """ Create a non-fungible token (NFT) with the given options. @@ -154,15 +179,14 @@ def create_nft_token(env, opts=[]): treasury_account_id=env.operator_id, token_type=TokenType.NON_FUNGIBLE_UNIQUE, supply_type=SupplyType.FINITE, - max_supply=10000 + max_supply=10000, ) - + token_keys = TokenKeys( admin_key=env.operator_key, supply_key=env.operator_key, - freeze_key=env.operator_key + freeze_key=env.operator_key, # pause_key= None # implicitly “no pause key” use opts to add one - ) transaction = TokenCreateTransaction(token_params, token_keys) @@ -172,7 +196,51 @@ def create_nft_token(env, opts=[]): opt(transaction) token_receipt = transaction.execute(env.client) - - assert token_receipt.status == ResponseCode.SUCCESS, f"Token creation failed with status: {ResponseCode(token_receipt.status).name}" - - return token_receipt.token_id \ No newline at end of file + + assert ( + token_receipt.status == ResponseCode.SUCCESS + ), f"Token creation failed with status: {ResponseCode(token_receipt.status).name}" + + return token_receipt.token_id + + +def wait_for_mirror_node( + fn: Callable[[], T], + predicate: Callable[[T], bool], + timeout: float = 5, + interval: float = 1, +) -> T: + """ + Polls fn until predicate(result) returns True or timeout is reached + + Args: + fn: Function that fetches data from mirror node. + predicate: Condition that determines success. + timeout: Max time to wait (seconds). + interval: Sleep interval between retries (seconds). + + Returns: + T: The successful result. + """ + deadline = time.monotonic() + timeout + last_response = None + last_exception = None + + while time.monotonic() < deadline: + try: + last_response = fn() + if predicate(last_response): + return last_response + except Exception as e: + last_exception = e + + time.sleep(interval) + + if last_exception is not None: + raise TimeoutError( + "Timed out waiting for mirror node, Last call raised an exception" + ) from last_exception + + raise TimeoutError( + f"Timed out waiting for mirror node. Last response: {last_response}" + ) diff --git a/tests/unit/account_allowance_approve_transaction_test.py b/tests/unit/account_allowance_approve_transaction_test.py index e4b9b8391..67a555288 100644 --- a/tests/unit/account_allowance_approve_transaction_test.py +++ b/tests/unit/account_allowance_approve_transaction_test.py @@ -48,7 +48,9 @@ def test_account_allowance_transaction_initialization(account_allowance_transact assert account_allowance_transaction.hbar_allowances == [] assert account_allowance_transaction.token_allowances == [] assert account_allowance_transaction.nft_allowances == [] - assert account_allowance_transaction._default_transaction_fee == Hbar(1).to_tinybars() + assert ( + account_allowance_transaction._default_transaction_fee == Hbar(1).to_tinybars() + ) def test_approve_hbar_allowance(account_allowance_transaction, sample_accounts): @@ -57,7 +59,9 @@ def test_approve_hbar_allowance(account_allowance_transaction, sample_accounts): spender = sample_accounts["spender"] amount = Hbar(100) - result = account_allowance_transaction.approve_hbar_allowance(owner, spender, amount) + result = account_allowance_transaction.approve_hbar_allowance( + owner, spender, amount + ) assert result is account_allowance_transaction assert len(account_allowance_transaction.hbar_allowances) == 1 @@ -68,7 +72,9 @@ def test_approve_hbar_allowance(account_allowance_transaction, sample_accounts): assert allowance.amount == amount.to_tinybars() -def test_approve_hbar_allowance_multiple(account_allowance_transaction, sample_accounts): +def test_approve_hbar_allowance_multiple( + account_allowance_transaction, sample_accounts +): """Test approving multiple HBAR allowances""" owner = sample_accounts["owner"] spender1 = sample_accounts["spender"] @@ -78,18 +84,28 @@ def test_approve_hbar_allowance_multiple(account_allowance_transaction, sample_a account_allowance_transaction.approve_hbar_allowance(owner, spender2, Hbar(200)) assert len(account_allowance_transaction.hbar_allowances) == 2 - assert account_allowance_transaction.hbar_allowances[0].amount == Hbar(100).to_tinybars() - assert account_allowance_transaction.hbar_allowances[1].amount == Hbar(200).to_tinybars() + assert ( + account_allowance_transaction.hbar_allowances[0].amount + == Hbar(100).to_tinybars() + ) + assert ( + account_allowance_transaction.hbar_allowances[1].amount + == Hbar(200).to_tinybars() + ) -def test_approve_token_allowance(account_allowance_transaction, sample_accounts, sample_tokens): +def test_approve_token_allowance( + account_allowance_transaction, sample_accounts, sample_tokens +): """Test approving token allowance""" token_id = sample_tokens["token1"] owner = sample_accounts["owner"] spender = sample_accounts["spender"] amount = 1000 - result = account_allowance_transaction.approve_token_allowance(token_id, owner, spender, amount) + result = account_allowance_transaction.approve_token_allowance( + token_id, owner, spender, amount + ) assert result is account_allowance_transaction assert len(account_allowance_transaction.token_allowances) == 1 @@ -118,14 +134,18 @@ def test_approve_token_allowance_multiple( assert account_allowance_transaction.token_allowances[1].amount == 2000 -def test_approve_token_nft_allowance(account_allowance_transaction, sample_accounts, sample_tokens): +def test_approve_token_nft_allowance( + account_allowance_transaction, sample_accounts, sample_tokens +): """Test approving NFT allowance""" token_id = sample_tokens["token1"] nft_id = NftId(token_id, 1) owner = sample_accounts["owner"] spender = sample_accounts["spender"] - result = account_allowance_transaction.approve_token_nft_allowance(nft_id, owner, spender) + result = account_allowance_transaction.approve_token_nft_allowance( + nft_id, owner, spender + ) assert result is account_allowance_transaction assert len(account_allowance_transaction.nft_allowances) == 1 @@ -195,7 +215,9 @@ def test_approve_token_nft_allowance_all_serials_existing( account_allowance_transaction.approve_token_nft_allowance(nft_id, owner, spender) # Then approve all serials - account_allowance_transaction.approve_token_nft_allowance_all_serials(token_id, owner, spender) + account_allowance_transaction.approve_token_nft_allowance_all_serials( + token_id, owner, spender + ) assert len(account_allowance_transaction.nft_allowances) == 1 allowance = account_allowance_transaction.nft_allowances[0] @@ -226,7 +248,9 @@ def test_delete_token_nft_allowance_all_serials( assert allowance.approved_for_all is False -def test_add_all_token_nft_approval(account_allowance_transaction, sample_accounts, sample_tokens): +def test_add_all_token_nft_approval( + account_allowance_transaction, sample_accounts, sample_tokens +): """Test adding all token NFT approval""" token_id = sample_tokens["token1"] spender = sample_accounts["spender"] @@ -265,7 +289,9 @@ def test_build_proto_body_with_allowances( # Add all types of allowances account_allowance_transaction.approve_hbar_allowance(owner, spender, Hbar(100)) - account_allowance_transaction.approve_token_allowance(token_id, owner, spender, 1000) + account_allowance_transaction.approve_token_allowance( + token_id, owner, spender, 1000 + ) account_allowance_transaction.approve_token_nft_allowance(nft_id, owner, spender) proto_body = account_allowance_transaction._build_proto_body() @@ -325,7 +351,9 @@ def test_require_not_frozen(account_allowance_transaction, sample_accounts): ) -def test_mixed_allowance_types(account_allowance_transaction, sample_accounts, sample_tokens): +def test_mixed_allowance_types( + account_allowance_transaction, sample_accounts, sample_tokens +): """Test transaction with mixed allowance types""" owner = sample_accounts["owner"] spender = sample_accounts["spender"] @@ -334,7 +362,9 @@ def test_mixed_allowance_types(account_allowance_transaction, sample_accounts, s # Add all types of allowances account_allowance_transaction.approve_hbar_allowance(owner, spender, Hbar(100)) - account_allowance_transaction.approve_token_allowance(token_id, owner, spender, 1000) + account_allowance_transaction.approve_token_allowance( + token_id, owner, spender, 1000 + ) account_allowance_transaction.approve_token_nft_allowance(nft_id, owner, spender) account_allowance_transaction.approve_token_nft_allowance_all_serials( sample_tokens["token2"], owner, spender @@ -352,7 +382,9 @@ def test_mixed_allowance_types(account_allowance_transaction, sample_accounts, s assert len(proto_body.nftAllowances) == 2 -def test_zero_amount_allowances(account_allowance_transaction, sample_accounts, sample_tokens): +def test_zero_amount_allowances( + account_allowance_transaction, sample_accounts, sample_tokens +): """Test allowances with zero amounts (for removal)""" owner = sample_accounts["owner"] spender = sample_accounts["spender"] diff --git a/tests/unit/account_allowance_delete_transaction_test.py b/tests/unit/account_allowance_delete_transaction_test.py index f21fb43c9..ce9e03dc2 100644 --- a/tests/unit/account_allowance_delete_transaction_test.py +++ b/tests/unit/account_allowance_delete_transaction_test.py @@ -44,10 +44,15 @@ def sample_tokens(): } -def test_account_allowance_delete_transaction_initialization(account_allowance_delete_transaction): +def test_account_allowance_delete_transaction_initialization( + account_allowance_delete_transaction, +): """Test the initialization of the AccountAllowanceDeleteTransaction class""" assert account_allowance_delete_transaction.nft_wipe == [] - assert account_allowance_delete_transaction._default_transaction_fee == Hbar(1).to_tinybars() + assert ( + account_allowance_delete_transaction._default_transaction_fee + == Hbar(1).to_tinybars() + ) def test_account_allowance_delete_transaction_initialization_with_allowances( @@ -58,7 +63,9 @@ def test_account_allowance_delete_transaction_initialization_with_allowances( token_id = sample_tokens["token1"] nft_id = NftId(token_id, 1) - nft_wipe = [TokenNftAllowance(token_id=token_id, owner_account_id=owner, serial_numbers=[1])] + nft_wipe = [ + TokenNftAllowance(token_id=token_id, owner_account_id=owner, serial_numbers=[1]) + ] tx = AccountAllowanceDeleteTransaction(nft_wipe=nft_wipe) @@ -74,7 +81,9 @@ def test_delete_all_token_nft_allowances( nft_id = NftId(token_id, 1) owner = sample_accounts["owner"] - result = account_allowance_delete_transaction.delete_all_token_nft_allowances(nft_id, owner) + result = account_allowance_delete_transaction.delete_all_token_nft_allowances( + nft_id, owner + ) assert result is account_allowance_delete_transaction assert len(account_allowance_delete_transaction.nft_wipe) == 1 @@ -116,11 +125,15 @@ def test_delete_all_token_nft_allowances_different_owners( # Add NFT for first owner nft_id1 = NftId(token_id, 1) - account_allowance_delete_transaction.delete_all_token_nft_allowances(nft_id1, owner1) + account_allowance_delete_transaction.delete_all_token_nft_allowances( + nft_id1, owner1 + ) # Add NFT for second owner nft_id2 = NftId(token_id, 2) - account_allowance_delete_transaction.delete_all_token_nft_allowances(nft_id2, owner2) + account_allowance_delete_transaction.delete_all_token_nft_allowances( + nft_id2, owner2 + ) assert len(account_allowance_delete_transaction.nft_wipe) == 2 assert account_allowance_delete_transaction.nft_wipe[0].owner_account_id == owner1 @@ -190,7 +203,9 @@ def test_build_transaction_body(account_allowance_delete_transaction, sample_acc assert len(proto_body.nftAllowances) == 1 -def test_build_scheduled_body(account_allowance_delete_transaction, sample_accounts, sample_tokens): +def test_build_scheduled_body( + account_allowance_delete_transaction, sample_accounts, sample_tokens +): """Test building scheduled transaction body""" owner = sample_accounts["owner"] token_id = sample_tokens["token1"] @@ -203,7 +218,9 @@ def test_build_scheduled_body(account_allowance_delete_transaction, sample_accou assert scheduled_body.cryptoDeleteAllowance is not None -def test_require_not_frozen(account_allowance_delete_transaction, sample_accounts, sample_tokens): +def test_require_not_frozen( + account_allowance_delete_transaction, sample_accounts, sample_tokens +): """Test that methods require transaction not to be frozen""" owner = sample_accounts["owner"] token_id = sample_tokens["token1"] @@ -214,7 +231,9 @@ def test_require_not_frozen(account_allowance_delete_transaction, sample_account # This should raise an error with pytest.raises(Exception, match="Transaction is immutable"): - account_allowance_delete_transaction.delete_all_token_nft_allowances(nft_id, owner) + account_allowance_delete_transaction.delete_all_token_nft_allowances( + nft_id, owner + ) def test_duplicate_serial_number_handling( @@ -234,7 +253,9 @@ def test_duplicate_serial_number_handling( assert wipe_entry.serial_numbers == [1] # Should not have duplicates -def test_mixed_nft_deletions(account_allowance_delete_transaction, sample_accounts, sample_tokens): +def test_mixed_nft_deletions( + account_allowance_delete_transaction, sample_accounts, sample_tokens +): """Test transaction with mixed NFT deletions""" owner1 = sample_accounts["owner"] owner2 = sample_accounts["owner2"] @@ -242,12 +263,20 @@ def test_mixed_nft_deletions(account_allowance_delete_transaction, sample_accoun token2 = sample_tokens["token2"] # Add various NFT deletions - account_allowance_delete_transaction.delete_all_token_nft_allowances(NftId(token1, 1), owner1) - account_allowance_delete_transaction.delete_all_token_nft_allowances(NftId(token1, 2), owner1) - account_allowance_delete_transaction.delete_all_token_nft_allowances(NftId(token2, 1), owner2) + account_allowance_delete_transaction.delete_all_token_nft_allowances( + NftId(token1, 1), owner1 + ) + account_allowance_delete_transaction.delete_all_token_nft_allowances( + NftId(token1, 2), owner1 + ) + account_allowance_delete_transaction.delete_all_token_nft_allowances( + NftId(token2, 1), owner2 + ) # Verify all deletions are present - assert len(account_allowance_delete_transaction.nft_wipe) == 2 # Grouped by token+owner + assert ( + len(account_allowance_delete_transaction.nft_wipe) == 2 + ) # Grouped by token+owner # Verify protobuf body includes all deletions proto_body = account_allowance_delete_transaction._build_proto_body() diff --git a/tests/unit/account_balance_query_test.py b/tests/unit/account_balance_query_test.py index a74746836..f989cbea4 100644 --- a/tests/unit/account_balance_query_test.py +++ b/tests/unit/account_balance_query_test.py @@ -68,9 +68,9 @@ def test_set_account_id_returns_self_for_chaining(): """set_account_id should return self to enable method chaining.""" query = CryptoGetAccountBalanceQuery() account_id = AccountId(0, 0, 1800) - + result = query.set_account_id(account_id) - + assert result is query assert isinstance(result, CryptoGetAccountBalanceQuery) @@ -79,9 +79,9 @@ def test_set_contract_id_returns_self_for_chaining(): """set_contract_id should return self to enable method chaining.""" query = CryptoGetAccountBalanceQuery() contract_id = ContractId(0, 0, 1234) - + result = query.set_contract_id(contract_id) - + assert result is query assert isinstance(result, CryptoGetAccountBalanceQuery) @@ -107,7 +107,7 @@ def test_build_account_balance_query_with_contract_id(): assert query.contract_id == contract_id assert query.account_id is None assert isinstance(query.contract_id, ContractId) - assert hasattr(query, 'contract_id') + assert hasattr(query, "contract_id") def test_set_contract_id_method_chaining_resets_account_id(mock_account_ids): @@ -165,6 +165,7 @@ def test_make_request_populates_contract_id_only(): # accountID should be unset assert not balance_query.HasField("accountID") + def test_make_request_populates_account_id_only(mock_account_ids): """_make_request should populate accountID when only account_id is set.""" account_id_sender, *_ = mock_account_ids diff --git a/tests/unit/account_create_transaction_test.py b/tests/unit/account_create_transaction_test.py index 235991592..1c894d9a7 100644 --- a/tests/unit/account_create_transaction_test.py +++ b/tests/unit/account_create_transaction_test.py @@ -8,9 +8,16 @@ from hiero_sdk_python.response_code import ResponseCode from hiero_sdk_python.hapi.services import timestamp_pb2 from hiero_sdk_python.hapi.services import basic_types_pb2, response_pb2 -from hiero_sdk_python.hapi.services.transaction_response_pb2 import TransactionResponse as TransactionResponseProto -from hiero_sdk_python.hapi.services.transaction_receipt_pb2 import TransactionReceipt as TransactionReceiptProto -from hiero_sdk_python.hapi.services import transaction_get_receipt_pb2, response_header_pb2 +from hiero_sdk_python.hapi.services.transaction_response_pb2 import ( + TransactionResponse as TransactionResponseProto, +) +from hiero_sdk_python.hapi.services.transaction_receipt_pb2 import ( + TransactionReceipt as TransactionReceiptProto, +) +from hiero_sdk_python.hapi.services import ( + transaction_get_receipt_pb2, + response_header_pb2, +) from hiero_sdk_python.hapi.services.schedulable_transaction_body_pb2 import ( SchedulableTransactionBody, ) @@ -18,20 +25,21 @@ pytestmark = pytest.mark.unit + def generate_transaction_id(account_id_proto): """Generate a unique transaction ID based on the account ID and the current timestamp.""" current_time = time.time() timestamp_seconds = int(current_time) timestamp_nanos = int((current_time - timestamp_seconds) * 1e9) - tx_timestamp = timestamp_pb2.Timestamp(seconds=timestamp_seconds, nanos=timestamp_nanos) - - tx_id = TransactionId( - valid_start=tx_timestamp, - account_id=account_id_proto + tx_timestamp = timestamp_pb2.Timestamp( + seconds=timestamp_seconds, nanos=timestamp_nanos ) + + tx_id = TransactionId(valid_start=tx_timestamp, account_id=account_id_proto) return tx_id + # This test uses fixture mock_account_ids as parameter def test_account_create_transaction_build(mock_account_ids): """Test building an account create transaction body with valid parameters.""" @@ -57,6 +65,7 @@ def test_account_create_transaction_build(mock_account_ids): assert transaction_body.cryptoCreateAccount.initialBalance == 100000000 assert transaction_body.cryptoCreateAccount.memo == "Test account" + # This test uses fixture mock_account_ids as parameter def test_account_create_transaction_build_scheduled_body(mock_account_ids): """Test building a schedulable account create transaction body.""" @@ -91,6 +100,7 @@ def test_account_create_transaction_build_scheduled_body(mock_account_ids): assert schedulable_body.cryptoCreateAccount.memo == "Schedulable account" assert schedulable_body.cryptoCreateAccount.receiverSigRequired == True + # This test uses fixture (mock_account_ids, mock_client) as parameter def test_account_create_transaction_sign(mock_account_ids, mock_client): """Test signing the account create transaction.""" @@ -113,56 +123,58 @@ def test_account_create_transaction_sign(mock_account_ids, mock_client): # Add first signiture account_tx.sign(mock_client.operator_private_key) body_bytes = account_tx._transaction_body_bytes[node_account_id] - - assert body_bytes in account_tx._signature_map, "Body bytes should be a key in the signature map dictionary" - assert len(account_tx._signature_map[body_bytes].sigPair) == 1, \ - "Transaction should have exactly one signature" - + + assert ( + body_bytes in account_tx._signature_map + ), "Body bytes should be a key in the signature map dictionary" + assert ( + len(account_tx._signature_map[body_bytes].sigPair) == 1 + ), "Transaction should have exactly one signature" + # Add second signiture account_tx.sign(operator_private_key) body_bytes = account_tx._transaction_body_bytes[node_account_id] - - assert body_bytes in account_tx._signature_map, "Body bytes should be a key in the signature map dictionary" - assert len(account_tx._signature_map[body_bytes].sigPair) == 2, \ - "Transaction should have exactly two signatures" + + assert ( + body_bytes in account_tx._signature_map + ), "Body bytes should be a key in the signature map dictionary" + assert ( + len(account_tx._signature_map[body_bytes].sigPair) == 2 + ), "Transaction should have exactly two signatures" + def test_account_create_transaction(): """Integration test for AccountCreateTransaction with retry and response handling.""" # Create test transaction responses busy_response = TransactionResponseProto() busy_response.nodeTransactionPrecheckCode = ResponseCode.BUSY - + ok_response = TransactionResponseProto() ok_response.nodeTransactionPrecheckCode = ResponseCode.OK - + # Create a mock receipt for a successful account creation mock_receipt_proto = TransactionReceiptProto( status=ResponseCode.SUCCESS, - accountID=basic_types_pb2.AccountID( - shardNum=0, - realmNum=0, - accountNum=1234 - ) + accountID=basic_types_pb2.AccountID(shardNum=0, realmNum=0, accountNum=1234), ) - + # Create a response for the receipt query receipt_query_response = response_pb2.Response( transactionGetReceipt=transaction_get_receipt_pb2.TransactionGetReceiptResponse( header=response_header_pb2.ResponseHeader( nodeTransactionPrecheckCode=ResponseCode.OK ), - receipt=mock_receipt_proto + receipt=mock_receipt_proto, ) ) - + response_sequences = [ [ok_response, receipt_query_response], ] - + # Use the context manager to set up and tear down the mock environment - with mock_hedera_servers(response_sequences) as client, \ - patch('time.sleep'): - + with mock_hedera_servers(response_sequences) as client, patch("time.sleep"): + # Create the transaction new_key = PrivateKey.generate() transaction = ( @@ -170,21 +182,26 @@ def test_account_create_transaction(): .set_key_without_alias(new_key.public_key()) .set_initial_balance(100000000) # 1 HBAR ) - + # Execute the transaction and get receipt receipt = transaction.execute(client) - + # Verify the results - assert receipt.status == ResponseCode.SUCCESS, "Transaction should have succeeded" - assert receipt.account_id.num == 1234, "Should have created account with ID 1234" + assert ( + receipt.status == ResponseCode.SUCCESS + ), "Transaction should have succeeded" + assert ( + receipt.account_id.num == 1234 + ), "Should have created account with ID 1234" + def test_sign_account_create_without_freezing_raises_error(mock_account_ids): """Test that signing a transaction without freezing it first raises an error.""" operator_id, node_account_id = mock_account_ids - + new_private_key = PrivateKey.generate() new_public_key = new_private_key.public_key() - + account_tx = ( AccountCreateTransaction() .set_key_without_alias(new_public_key) @@ -197,6 +214,7 @@ def test_sign_account_create_without_freezing_raises_error(mock_account_ids): with pytest.raises(Exception, match="Transaction is not frozen"): account_tx.sign(new_private_key) + @pytest.fixture def mock_account_ids(): """Fixture to provide mock account IDs for testing.""" @@ -204,29 +222,31 @@ def mock_account_ids(): node_account_id = AccountId(0, 0, 3) return operator_account_id, node_account_id + def test_set_max_automatic_token_associations_validation(): """Test validation for max_automatic_token_associations.""" tx = AccountCreateTransaction() - + # Test good value: -1 for unlimited tx.set_max_automatic_token_associations(-1) assert tx.max_automatic_token_associations == -1 - + # Test good value: 0 for default tx.set_max_automatic_token_associations(0) assert tx.max_automatic_token_associations == 0 - + # Test good value: 100 tx.set_max_automatic_token_associations(100) assert tx.max_automatic_token_associations == 100 - + # Test bad value: -2 with pytest.raises(ValueError) as e: tx.set_max_automatic_token_associations(-2) - + # Check for the new error message assert "must be -1 (unlimited) or a non-negative integer" in str(e.value) + def test_account_create_build_with_max_auto_assoc(mock_account_ids): """Test building transaction with max_automatic_token_associations.""" operator_id, node_account_id = mock_account_ids @@ -235,24 +255,22 @@ def test_account_create_build_with_max_auto_assoc(mock_account_ids): account_tx = ( AccountCreateTransaction() .set_key_without_alias(new_public_key) - .set_max_automatic_token_associations(-1) # Test the new value + .set_max_automatic_token_associations(-1) # Test the new value ) account_tx.transaction_id = generate_transaction_id(operator_id) account_tx.node_account_id = node_account_id - + body = account_tx.build_transaction_body() - + # Verify the value is correctly set in the protobuf body assert body.cryptoCreateAccount.max_automatic_token_associations == -1 + def test_create_account_transaction_without_alias(mock_account_ids): """Test Create account transaction using set_key_without_alias method""" operator_id, node_id = mock_account_ids public_key = PrivateKey.generate().public_key() - tx = ( - AccountCreateTransaction() - .set_key_without_alias(public_key) - ) + tx = AccountCreateTransaction().set_key_without_alias(public_key) assert tx.key == public_key assert tx.alias is None @@ -262,17 +280,15 @@ def test_create_account_transaction_without_alias(mock_account_ids): tx_body = tx.build_transaction_body() assert tx_body.cryptoCreateAccount.key == public_key._to_proto() - assert tx_body.cryptoCreateAccount.alias == b'' + assert tx_body.cryptoCreateAccount.alias == b"" + def test_create_account_transaction_set_key_with_alias(mock_account_ids): """Test Create account transaction using set_key_with_alias method""" operator_id, node_id = mock_account_ids public_key = PrivateKey.generate_ecdsa().public_key() - tx = ( - AccountCreateTransaction() - .set_key_with_alias(public_key) - ) + tx = AccountCreateTransaction().set_key_with_alias(public_key) assert tx.key == public_key assert tx.alias == public_key.to_evm_address() @@ -282,18 +298,20 @@ def test_create_account_transaction_set_key_with_alias(mock_account_ids): tx_body = tx.build_transaction_body() assert tx_body.cryptoCreateAccount.key == public_key._to_proto() - assert tx_body.cryptoCreateAccount.alias == public_key.to_evm_address().address_bytes + assert ( + tx_body.cryptoCreateAccount.alias == public_key.to_evm_address().address_bytes + ) -def test_create_account_transaction_set_key_with_seperate_key_for_alias(mock_account_ids): + +def test_create_account_transaction_set_key_with_seperate_key_for_alias( + mock_account_ids, +): """Test Create account transaction using set_key_with_alias method with seprate key""" operator_id, node_id = mock_account_ids public_key = PrivateKey.generate().public_key() alias_key = PrivateKey.generate_ecdsa().public_key() - tx = ( - AccountCreateTransaction() - .set_key_with_alias(public_key, alias_key) - ) + tx = AccountCreateTransaction().set_key_with_alias(public_key, alias_key) assert tx.key == public_key assert tx.alias == alias_key.to_evm_address() @@ -305,23 +323,19 @@ def test_create_account_transaction_set_key_with_seperate_key_for_alias(mock_acc assert tx_body.cryptoCreateAccount.key == public_key._to_proto() assert tx_body.cryptoCreateAccount.alias == alias_key.to_evm_address().address_bytes + def test_create_account_transaction_set_key_with_alias_non_ecdsa_key(): """Test Create account transaction using set_key_with_alias method""" public_key = PrivateKey.generate().public_key() with pytest.raises(ValueError): - ( - AccountCreateTransaction() - .set_key_with_alias(public_key) - ) - + (AccountCreateTransaction().set_key_with_alias(public_key)) + # With seperate key for deriving alias alias_key = PrivateKey.generate().public_key() with pytest.raises(ValueError): - ( - AccountCreateTransaction() - .set_key_with_alias(public_key, alias_key) - ) + (AccountCreateTransaction().set_key_with_alias(public_key, alias_key)) + def test_create_account_transaction_with_set_alias(mock_account_ids): """Test account creation transaction using a valid EvmAddress object.""" @@ -345,8 +359,11 @@ def test_create_account_transaction_with_set_alias(mock_account_ids): assert tx_body.cryptoCreateAccount.key == public_key._to_proto() assert tx_body.cryptoCreateAccount.alias == evm_address.address_bytes + @pytest.mark.parametrize("with_prefix", [False, True]) -def test_create_account_transaction_with_set_alias_from_string(mock_account_ids, with_prefix): +def test_create_account_transaction_with_set_alias_from_string( + mock_account_ids, with_prefix +): """Test account creation transaction using alias from string (with and without '0x' prefix).""" operator_id, node_id = mock_account_ids public_key = PrivateKey.generate().public_key() @@ -371,15 +388,19 @@ def test_create_account_transaction_with_set_alias_from_string(mock_account_ids, assert tx_body.cryptoCreateAccount.key == public_key._to_proto() assert tx_body.cryptoCreateAccount.alias == evm_address.address_bytes -@pytest.mark.parametrize("invalid_str", [ - "", - "0x", - "12345", - "0x12345", - "0x" + "g" * 40, - "0x" + "a" * 39, - "0x" + "a" * 41, -]) + +@pytest.mark.parametrize( + "invalid_str", + [ + "", + "0x", + "12345", + "0x12345", + "0x" + "g" * 40, + "0x" + "a" * 39, + "0x" + "a" * 41, + ], +) def test_create_account_transaction_with_set_alias_from_invalid_string(invalid_str): """Test invalid alias strings raise ValueError.""" public_key = PrivateKey.generate().public_key() @@ -388,6 +409,7 @@ def test_create_account_transaction_with_set_alias_from_invalid_string(invalid_s with pytest.raises(ValueError): tx.set_alias(invalid_str) + def test_create_account_transaction_with_set_alias_from_invalid_type(): """Test alias with invalid type raises TypeError.""" public_key = PrivateKey.generate().public_key() @@ -396,6 +418,7 @@ def test_create_account_transaction_with_set_alias_from_invalid_type(): with pytest.raises(TypeError): tx.set_alias(1234) + # This test uses fixture mock_account_ids as parameter def test_account_create_transaction_build_with_private_key(mock_account_ids): """AccountCreateTransaction should accept also PrivateKey and serialize the PublicKey in the proto.""" @@ -421,6 +444,7 @@ def test_account_create_transaction_build_with_private_key(mock_account_ids): assert body.cryptoCreateAccount.initialBalance == 100000000 assert body.cryptoCreateAccount.memo == "Account with private key" + # This test uses fixture mock_account_ids as parameter def test_create_account_transaction_set_key_with_alias_private_keys(mock_account_ids): """set_key_with_alias should work also with PrivateKey ECDSA (account key + alias key).""" @@ -435,9 +459,8 @@ def test_create_account_transaction_set_key_with_alias_private_keys(mock_account alias_public_key = alias_private_key.public_key() expected_evm_address = alias_public_key.to_evm_address() - tx = ( - AccountCreateTransaction() - .set_key_with_alias(account_private_key, alias_private_key) + tx = AccountCreateTransaction().set_key_with_alias( + account_private_key, alias_private_key ) assert tx.key == account_private_key @@ -452,8 +475,11 @@ def test_create_account_transaction_set_key_with_alias_private_keys(mock_account # Alias should be the address bytes from the key for the alias assert tx_body.cryptoCreateAccount.alias == expected_evm_address.address_bytes + # This test uses fixture mock_account_ids as parameter -def test_create_account_transaction_set_key_with_alias_private_key_without_ecdsa_key(mock_account_ids): +def test_create_account_transaction_set_key_with_alias_private_key_without_ecdsa_key( + mock_account_ids, +): """set_key_with_alias should work also without ecdsa_key.""" operator_id, node_id = mock_account_ids @@ -463,10 +489,7 @@ def test_create_account_transaction_set_key_with_alias_private_key_without_ecdsa expected_evm_address = account_public_key.to_evm_address() - tx = ( - AccountCreateTransaction() - .set_key_with_alias(account_private_key) - ) + tx = AccountCreateTransaction().set_key_with_alias(account_private_key) assert tx.key == account_private_key assert tx.alias == expected_evm_address @@ -480,6 +503,7 @@ def test_create_account_transaction_set_key_with_alias_private_key_without_ecdsa # Alias should be the address bytes from the key for the alias assert tx_body.cryptoCreateAccount.alias == expected_evm_address.address_bytes + def test_set_key_without_alias_then_with_alias_overrides_alias(): """set_key_with_alias should ovveride set_key_without_alias""" # Account key(ECDSA) @@ -491,10 +515,7 @@ def test_set_key_without_alias_then_with_alias_overrides_alias(): alias_public_key = alias_private_key.public_key() expected_evm_address = alias_public_key.to_evm_address() - tx = ( - AccountCreateTransaction() - .set_key_without_alias(account_private_key) - ) + tx = AccountCreateTransaction().set_key_without_alias(account_private_key) assert tx.alias is None @@ -502,6 +523,7 @@ def test_set_key_without_alias_then_with_alias_overrides_alias(): assert tx.alias == expected_evm_address + def test_set_key_with_alias_then_without_alias_overrides_alias(): """set_key_without_alias should ovveride set_key_with_alias""" # Account key(ECDSA) @@ -513,9 +535,8 @@ def test_set_key_with_alias_then_without_alias_overrides_alias(): alias_public_key = alias_private_key.public_key() expected_evm_address = alias_public_key.to_evm_address() - tx = ( - AccountCreateTransaction() - .set_key_with_alias(account_private_key, alias_private_key) + tx = AccountCreateTransaction().set_key_with_alias( + account_private_key, alias_private_key ) assert tx.alias == expected_evm_address diff --git a/tests/unit/account_id_test.py b/tests/unit/account_id_test.py index 9cf0b1bf5..d29da2ea9 100644 --- a/tests/unit/account_id_test.py +++ b/tests/unit/account_id_test.py @@ -2,9 +2,11 @@ Unit tests for the AccountId class. """ +from unittest.mock import MagicMock, patch import pytest from hiero_sdk_python.account.account_id import AccountId +from hiero_sdk_python.crypto.evm_address import EvmAddress from hiero_sdk_python.crypto.private_key import PrivateKey from hiero_sdk_python.hapi.services import basic_types_pb2 @@ -29,21 +31,30 @@ def alias_key_ecdsa(): return PrivateKey.generate_ecdsa().public_key() +@pytest.fixture +def evm_address(): + """Returns an EVM Address.""" + return PrivateKey.generate_ecdsa().public_key().to_evm_address() + + @pytest.fixture def account_id_100(): """AccountId with num=100 for testing.""" return AccountId(shard=0, realm=0, num=100) + @pytest.fixture def account_id_101(): """AccountId with num=101 for testing.""" return AccountId(shard=0, realm=0, num=101) + @pytest.fixture def client(mock_client): - mock_client.network.ledger_id = bytes.fromhex("00") # Mainnet ledger id + mock_client.network.ledger_id = bytes.fromhex("00") # Mainnet ledger id return mock_client + def test_default_initialization(): """Test AccountId initialization with default values.""" account_id = AccountId() @@ -53,6 +64,7 @@ def test_default_initialization(): assert account_id.num == 0 assert account_id.alias_key is None assert account_id.checksum is None + assert account_id.evm_address is None def test_custom_initialization(account_id_100): @@ -62,6 +74,7 @@ def test_custom_initialization(account_id_100): assert account_id_100.num == 100 assert account_id_100.alias_key is None assert account_id_100.checksum is None + assert account_id_100.evm_address is None def test_initialization_with_alias_key(alias_key): @@ -73,6 +86,19 @@ def test_initialization_with_alias_key(alias_key): assert account_id.num == 0 assert account_id.alias_key == alias_key assert account_id.checksum is None + assert account_id.evm_address is None + + +def test_initialization_with_evm_address(evm_address): + """Test AccountId initialization with evm_address.""" + account_id = AccountId(shard=0, realm=0, num=0, evm_address=evm_address) + + assert account_id.shard == 0 + assert account_id.realm == 0 + assert account_id.num == 0 + assert account_id.alias_key is None + assert account_id.checksum is None + assert account_id.evm_address == evm_address def test_str_representation(account_id_100): @@ -90,12 +116,17 @@ def test_str_representation_with_checksum(client, account_id_100): assert account_id_100.to_string_with_checksum(client) == "0.0.100-hhghj" -def test_str_representation_with_checksum_if_alias_key_present(client, account_id_100, alias_key): +def test_str_representation_with_checksum_if_alias_key_present( + client, account_id_100, alias_key +): """AccountId with aliasKey should raise ValueError on to_string_with_checksum""" account_id = account_id_100 account_id.alias_key = alias_key - with pytest.raises(ValueError, match="Cannot calculate checksum with an account ID that has a aliasKey"): + with pytest.raises( + ValueError, + match="Cannot calculate checksum with an account ID that has a aliasKey or evmAddress", + ): account_id.to_string_with_checksum(client) @@ -121,15 +152,65 @@ def test_repr_representation_with_alias_key(alias_key): assert repr(account_id) == expected -def test_from_string_valid(): +def test_repr_representation_with_evm_address(evm_address): + """Test repr representation of AccountId with evm_address.""" + account_id = AccountId(shard=0, realm=0, num=0, evm_address=evm_address) + + expected = f"AccountId(shard=0, realm=0, evm_address={evm_address.to_string()})" + assert repr(account_id) == expected + + +@pytest.mark.parametrize( + "account_str, expected", + [ + ("0.0.100", (0, 0, 100, None, None, None)), + ("0.0.100-abcde", (0, 0, 100, "abcde", None, None)), + ( + "302a300506032b6570032100114e6abc371b82da", + ( + 0, + 0, + 0, + None, + None, + EvmAddress.from_string("302a300506032b6570032100114e6abc371b82da"), + ), + ), + ( + "0x302a300506032b6570032100114e6abc371b82da", + ( + 0, + 0, + 0, + None, + None, + EvmAddress.from_string("302a300506032b6570032100114e6abc371b82da"), + ), + ), + ( + "0.0.302a300506032b6570032100114e6abc371b82da", + ( + 0, + 0, + 0, + None, + None, + EvmAddress.from_string("302a300506032b6570032100114e6abc371b82da"), + ), + ), + ], +) +def test_from_string_valid(account_str, expected): """Test creating AccountId from valid string format.""" - account_id = AccountId.from_string("0.0.100") + shard, realm, num, checksum, alias_key, evm_address = expected + account_id = AccountId.from_string(account_str) - assert account_id.shard == 0 - assert account_id.realm == 0 - assert account_id.num == 100 - assert account_id.alias_key is None - assert account_id.checksum is None + assert account_id.shard == shard + assert account_id.realm == realm + assert account_id.num == num + assert account_id.checksum == checksum + assert account_id.alias_key == alias_key + assert account_id.evm_address == evm_address def test_from_string_zeros(): @@ -151,54 +232,91 @@ def test_from_string_with_checksum(): assert account_id.realm == 0 assert account_id.num == 100 assert account_id.alias_key is None - assert account_id.checksum == 'abcde' + assert account_id.checksum == "abcde" -def test_from_string_with_alias_key(alias_key): - account_id_str = f"0.0.{alias_key.to_string()}" +@pytest.mark.parametrize( + "alias_fixture", ["alias_key", "alias_key_ecdsa", "evm_address"] +) +def test_from_string_with_alias(request, alias_fixture): + """Test create AccountId from string with different alias.""" + alias = request.getfixturevalue(alias_fixture) + + account_id_str = f"0.0.{alias.to_string()}" account_id = AccountId.from_string(account_id_str) assert account_id.shard == 0 assert account_id.realm == 0 assert account_id.num == 0 - assert account_id.alias_key.__eq__(alias_key) assert account_id.checksum is None -def test_from_string_with_alias_key_ecdsa(alias_key_ecdsa): - account_id_str = f"0.0.{alias_key_ecdsa.to_string()}" - account_id = AccountId.from_string(account_id_str) + if isinstance(alias, EvmAddress): + assert account_id.evm_address == alias + assert account_id.alias_key is None + else: + assert account_id.alias_key.__eq__(alias) + assert account_id.evm_address is None - assert account_id.shard == 0 - assert account_id.realm == 0 - assert account_id.num == 0 - assert account_id.alias_key.__eq__(alias_key_ecdsa) - assert account_id.checksum is None + +@pytest.mark.parametrize( + "input_str,expected", + [ + ("0x1234567890abcdef1234567890abcdef12345678", True), # valid 0x-prefixed + ("1234567890abcdef1234567890abcdef12345678", True), # valid raw + ("0x123", False), # too short + ("1234567890abcdef1234567890abcdef1234567890", False), # too long + ("0xZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZ", False), # invalid hex + ], +) +def test_is_evm_address(input_str, expected): + """Test _is_evm_address static method for all branches.""" + assert AccountId._is_evm_address(input_str) == expected @pytest.mark.parametrize( - 'invalid_id', + "invalid_id", [ - '1.2', # Too few parts - '1.2.3.4', # Too many parts - 'a.b.c', # Non-numeric parts - '', # Empty string - '1.a.3', # Partial numeric - 123, - None, - '0.0.-1', - 'abc.def.ghi', - '0.0.1-ad', - '0.0.1-addefgh', - '0.0.1 - abcde', - ' 0.0.100 ', - '0.0.302a300506032b6570032100114e6abc371b82dab5c15ea149f02d34a012087b163516dd70f44acafabf777g', - '0.0.302a300506032b6570032100114e6abc371b82dab5c15ea149f02d34a012087b163516dd70f44acafabf777' - ] + "1.2", # Too few parts + "1.2.3.4", # Too many parts + "a.b.c", # Non-numeric parts + "", # Empty string + "1.a.3", # Partial numeric + "0.0.-1", + "abc.def.ghi", + "0.0.1-ad", + "0.0.1-addefgh", + "0.0.1 - abcde", + " 0.0.100 ", + "0.0.302a300506032b6570032100114e6abc371b82dab5c15ea149f02d34a012087b163516dd70f44acafabf777g", + "0.0.302a300506032b6570032100114e6abc371b82dab5c15ea149f02d34a012087b163516dd70f44acafabf777", + "0.0.302a300506032b6570032100114e6abc371b82d", + "0.0.ZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZ", + "302a300506032b6570032100114e6abc371b82d", + "0x302a300506032b6570032100114e6abc371b82d", + "0xZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZ", # invalid hex + "ZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZ", + ], ) def test_from_string_for_invalid_format(invalid_id): """Should raise error when creating AccountId from invalid string input.""" with pytest.raises( - ValueError, match=f"Invalid account ID string '{invalid_id}'. Expected format 'shard.realm.num'." + ValueError, + match=f"Invalid account ID string '{invalid_id}'." + "Supported formats: " + "'shard.realm.num', " + "'shard.realm.num-checksum', " + "'shard.realm.', " + "or a 20-byte EVM address.", + ): + AccountId.from_string(invalid_id) + + +@pytest.mark.parametrize("invalid_id", [123, None, True, object, {}]) +def test_from_string_for_invalid_types(invalid_id): + """Should raise error when creating AccountId from invalid types.""" + with pytest.raises( + TypeError, + match=f"account_id_str must be a string, got {type(invalid_id).__name__}.", ): AccountId.from_string(invalid_id) @@ -249,6 +367,18 @@ def test_to_proto_with_ecdsa_alias_key(alias_key_ecdsa): assert proto.alias == alias_key_ecdsa._to_proto().SerializeToString() +def test_to_proto_with_evm_address(evm_address): + """Test converting AccountId with evm_address to protobuf format.""" + account_id = AccountId(shard=0, realm=0, num=100, evm_address=evm_address) + proto = account_id._to_proto() + + assert isinstance(proto, basic_types_pb2.AccountID) + assert proto.shardNum == 0 + assert proto.realmNum == 0 + assert proto.accountNum == 0 + assert proto.alias == evm_address.address_bytes + + def test_from_proto(): """Test creating AccountId from protobuf format.""" proto = basic_types_pb2.AccountID(shardNum=0, realmNum=0, accountNum=100) @@ -259,6 +389,7 @@ def test_from_proto(): assert account_id.realm == 0 assert account_id.num == 100 assert account_id.alias_key is None + assert account_id.evm_address is None def test_from_proto_zero_values(): @@ -271,6 +402,7 @@ def test_from_proto_zero_values(): assert account_id.realm == 0 assert account_id.num == 0 assert account_id.alias_key is None + assert account_id.evm_address is None def test_from_proto_with_alias(alias_key): @@ -286,6 +418,7 @@ def test_from_proto_with_alias(alias_key): assert account_id.shard == 0 assert account_id.realm == 0 assert account_id.num == 0 + assert account_id.evm_address is None assert account_id.alias_key is not None # Compare the raw bytes assert account_id.alias_key.to_bytes_raw() == alias_key.to_bytes_raw() @@ -304,11 +437,30 @@ def test_from_proto_with_ecdsa_alias(alias_key_ecdsa): assert account_id.shard == 0 assert account_id.realm == 0 assert account_id.num == 0 + assert account_id.evm_address is None assert account_id.alias_key is not None # Compare the raw bytes assert account_id.alias_key.to_bytes_raw() == alias_key_ecdsa.to_bytes_raw() +def test_from_proto_with_evm_address_as_alias(evm_address): + """Test creating AccountId from protobuf format with evm_address.""" + proto = basic_types_pb2.AccountID( + shardNum=0, + realmNum=0, + accountNum=3, + alias=evm_address.address_bytes, + ) + + account_id = AccountId._from_proto(proto) + assert account_id.shard == 0 + assert account_id.realm == 0 + assert account_id.num == 0 + assert account_id.alias_key is None + assert account_id.evm_address is not None + assert account_id.evm_address == evm_address + + def test_roundtrip_proto_conversion(account_id_100): """Test that converting to proto and back preserves values.""" proto = account_id_100._to_proto() @@ -359,6 +511,7 @@ def test_roundtrip_string_conversion(account_id_100): assert account_id_100.realm == reconstructed.realm assert account_id_100.num == reconstructed.num assert account_id_100.alias_key == reconstructed.alias_key + assert account_id_100.evm_address == reconstructed.evm_address def test_equality(account_id_100, account_id_101): @@ -386,6 +539,23 @@ def test_equality_with_alias_key(alias_key, alias_key2): assert account_id1 != account_id4 +def test_equality_with_evm_address(evm_address): + """Test AccountId equality comparison with alias keys.""" + account_id1 = AccountId(shard=0, realm=0, num=0, evm_address=evm_address) + account_id2 = AccountId(shard=0, realm=0, num=0, evm_address=evm_address) + account_id3 = AccountId( + shard=0, + realm=0, + num=0, + evm_address=EvmAddress.from_string("302a300506032b6570032100114e6abc371b82da"), + ) + account_id4 = AccountId(shard=0, realm=0, num=0, evm_address=None) + + assert account_id1 == account_id2 + assert account_id1 != account_id3 + assert account_id1 != account_id4 + + def test_equality_different_types(account_id_100): """Test AccountId equality with different types.""" assert account_id_100 != "1.2.3" @@ -404,19 +574,6 @@ def test_hash(account_id_100, account_id_101): assert hash(account_id_100) != hash(account_id_101) -def test_hash_with_alias_key(alias_key, alias_key2): - """Test AccountId hash with alias keys.""" - account_id1 = AccountId(shard=0, realm=0, num=0, alias_key=alias_key) - account_id2 = AccountId(shard=0, realm=0, num=0, alias_key=alias_key) - account_id3 = AccountId(shard=0, realm=0, num=0, alias_key=alias_key2) - - # Same alias key should have same hash - assert hash(account_id1) == hash(account_id2) - - # Different alias keys should have different hashes - assert hash(account_id1) != hash(account_id3) - - def test_alias_key_affects_proto_serialization(account_id_100, alias_key): """Test that alias key affects protobuf serialization correctly.""" # Without alias key @@ -484,6 +641,21 @@ def test_alias_key_affects_string_representation(alias_key, alias_key2, account_ # Account without alias should use num assert str3 == "0.0.100" + +def test_evm_address_affects_string_representation(evm_address): + """Test that evm_address changes string representation behavior.""" + account_id1 = AccountId(shard=0, realm=0, num=0, evm_address=evm_address) + account_id2 = AccountId(shard=0, realm=0, num=100) + + str1 = str(account_id1) + str2 = str(account_id2) + + assert str1 != str2 + + assert evm_address.to_string() in str1 + assert str2 == "0.0.100" + + def test_validate_checksum_for_id(client): """Test validateChecksum for accountId""" account_id = AccountId.from_string("0.0.100-hhghj") @@ -495,13 +667,308 @@ def test_validate_checksum_with_alias_key_set(client, alias_key): account_id = AccountId.from_string("0.0.100-hhghj") account_id.alias_key = alias_key - with pytest.raises(ValueError, match="Cannot calculate checksum with an account ID that has a aliasKey"): + with pytest.raises( + ValueError, + match="Cannot calculate checksum with an account ID that has a aliasKey or evmAddress", + ): + account_id.validate_checksum(client) + + +def test_validate_checksum_with_evm_address_key_set(client, evm_address): + """Test validateChecksum should raise ValueError if evm_address is set""" + account_id = AccountId.from_string("0.0.100-hhghj") + account_id.evm_address = evm_address + + with pytest.raises( + ValueError, + match="Cannot calculate checksum with an account ID that has a aliasKey or evmAddress", + ): account_id.validate_checksum(client) def test_validate_checksum_for_invalid_checksum(client): """Test Invalid Checksum for Id should raise ValueError""" account_id = AccountId.from_string("0.0.100-abcde") - + with pytest.raises(ValueError, match="Checksum mismatch for 0.0.100"): account_id.validate_checksum(client) + + +def test_populate_account_num(evm_address): + """Test that populate_account_num correctly queries the mirror node.""" + mock_client = MagicMock() + mock_client.network.get_mirror_rest_url.return_value = "http://mirror_node_rest_url" + + account_id = AccountId.from_evm_address(evm_address, 0, 0) + + response = {"account": "0.0.100"} + + with patch( + "hiero_sdk_python.account.account_id.perform_query_to_mirror_node" + ) as mock_query: + mock_query.return_value = response + new_account_id = account_id.populate_account_num(mock_client) + + assert account_id.num == 0 + assert new_account_id.num == 100 + + +def test_populate_account_num_missing_account(evm_address): + """ + Test that populate_account_num raises a ValueError when the mirror node + query does not return an account number. + """ + account_id = AccountId.from_evm_address(evm_address, 0, 0) + mock_client = MagicMock() + mock_client.network.get_mirror_rest_url.return_value = "http://mirror_node_rest_url" + + with patch( + "hiero_sdk_python.account.account_id.perform_query_to_mirror_node" + ) as mock_query: + mock_query.return_value = {} + with pytest.raises(ValueError, match="Mirror node response missing 'account'"): + account_id.populate_account_num(mock_client) + + +def test_populate_account_num_invalid_account_format(evm_address): + """Test populate_account_num raises ValueError for invalid account format.""" + account_id = AccountId.from_evm_address(evm_address, 0, 0) + mock_client = MagicMock() + mock_client.network.get_mirror_rest_url.return_value = "http://mirror_node_rest_url" + + # account value cannot be split into a valid int + response = {"account": "invalid.account.format"} + + with patch( + "hiero_sdk_python.account.account_id.perform_query_to_mirror_node" + ) as mock_query: + mock_query.return_value = response + with pytest.raises( + ValueError, match="Invalid account format received: invalid.account.format" + ): + account_id.populate_account_num(mock_client) + + +def test_populate_account_num_missing_evm_address(): + """Test that populate_account_num raises a ValueError when evm_address is none.""" + account_id = AccountId.from_string("0.0.100") + mock_client = MagicMock() + + with pytest.raises( + ValueError, match="Account evm_address is required before populating num" + ): + account_id.populate_account_num(mock_client) + + +def test_populate_account_num_mirror_node_failure(): + """Test populate_account_num should wrap mirror node RuntimeError with context""" + evm_address = EvmAddress.from_string("0x" + "11" * 20) + account_id = AccountId.from_evm_address(evm_address, 0, 0) + + mock_client = MagicMock() + mock_client.network.get_mirror_rest_url.return_value = "http://mirror-node" + + with patch( + "hiero_sdk_python.account.account_id.perform_query_to_mirror_node", + side_effect=RuntimeError("mirror node query error"), + ): + with pytest.raises( + RuntimeError, + match="Failed to populate account number from mirror node for evm_address", + ): + account_id.populate_account_num(mock_client) + + +def test_populate_account_evm_address(evm_address): + """Test that populate_evm_address correctly queries the mirror node.""" + mock_client = MagicMock() + mock_client.network.get_mirror_rest_url.return_value = "http://mirror_node_rest_url" + + account_id = AccountId.from_string("0.0.100") + + response = {"evm_address": evm_address.to_string()} + + with patch( + "hiero_sdk_python.account.account_id.perform_query_to_mirror_node" + ) as mock_query: + mock_query.return_value = response + new_account_id = account_id.populate_evm_address(mock_client) + + assert account_id.evm_address == None + assert new_account_id.evm_address == evm_address + + +def test_populate_evm_address_response_missing_evm_address(): + """ + Test that populate_evm_address raises a ValueError when the mirror node + query does not return an account evm_address. + """ + account_id = AccountId.from_string("0.0.100") + mock_client = MagicMock() + mock_client.network.get_mirror_rest_url.return_value = "http://mirror_node_rest_url" + + with patch( + "hiero_sdk_python.account.account_id.perform_query_to_mirror_node" + ) as mock_query: + mock_query.return_value = {} + with pytest.raises( + ValueError, match="Mirror node response missing 'evm_address'" + ): + account_id.populate_evm_address(mock_client) + + +def test_populate_evm_address_missing_num(evm_address): + """Test that populate_account_num raises a ValueError when num is none.""" + account_id = AccountId.from_evm_address(evm_address, 0, 0) # num == 0 + mock_client = MagicMock() + + with pytest.raises( + ValueError, match="Account number is required before populating evm_address" + ): + account_id.populate_evm_address(mock_client) + + +def test_populate_evm_address_mirror_node_failure(): + """Test populate_evm_address should wrap mirror node RuntimeError with context""" + account_id = AccountId(shard=0, realm=0, num=123) + + mock_client = MagicMock() + mock_client.network.get_mirror_rest_url.return_value = "http://mirror-node" + + with patch( + "hiero_sdk_python.account.account_id.perform_query_to_mirror_node", + side_effect=RuntimeError("mirror node query error"), + ): + with pytest.raises( + RuntimeError, + match="Failed to populate evm_address from mirror node for account 123", + ): + account_id.populate_evm_address(mock_client) + + +def test_populate_evm_address_requires_account_num(): + """Test populate_evm_address should raise ValueError when num is None""" + account_id = AccountId(shard=0, realm=0, num=None) + + mock_client = MagicMock() + + with pytest.raises( + ValueError, match="Account number is required before populating evm_address" + ): + account_id.populate_evm_address(mock_client) + + +def test_to_bytes_and_from_bytes_roundtrip(): + """Ensure basic numeric AccountId converts to bytes and back.""" + account_id = AccountId(0, 0, 100) + account_id_bytes = account_id.to_bytes() + + assert account_id_bytes is not None + + # Verify + new_account_id = AccountId.from_bytes(account_id_bytes) + assert new_account_id is not None + + assert new_account_id.shard == account_id.shard + assert new_account_id.realm == account_id.realm + assert new_account_id.num == account_id.num + assert new_account_id.alias_key == account_id.alias_key + assert new_account_id.evm_address == account_id.evm_address + + +def test_get_evm_address_from_account_num(): + """Test to_evm_address return the evm_address using the account num""" + account_id = AccountId.from_string("0.0.100") + assert account_id.to_evm_address() is not None + + +def test_to_bytes_and_from_bytes_with_alias_key(alias_key): + """Ensure alias key survives byte round-trip.""" + account_id = AccountId(0, 0, 0, alias_key=alias_key) + account_id_bytes = account_id.to_bytes() + + assert account_id_bytes is not None + + # Verify + new_account_id = AccountId.from_bytes(account_id_bytes) + assert new_account_id is not None + + assert new_account_id.shard == account_id.shard + assert new_account_id.realm == account_id.realm + # account.num is set to 0 as alias is set + assert new_account_id.alias_key.__eq__(account_id.alias_key) + assert new_account_id.evm_address == account_id.evm_address + + +def test_to_bytes_and_from_bytes_with_evm_address(evm_address): + """Ensure EVM address survives byte round-trip.""" + account_id = AccountId(0, 0, 0, evm_address=evm_address) + account_id_bytes = account_id.to_bytes() + + assert account_id_bytes is not None + + # Verify + new_account_id = AccountId.from_bytes(account_id_bytes) + assert new_account_id is not None + + assert new_account_id.shard == account_id.shard + assert new_account_id.realm == account_id.realm + # account.num is set to 0 as alias is set + assert new_account_id.alias_key == account_id.alias_key + assert new_account_id.evm_address == account_id.evm_address + + +def test_to_evm_address_returns_existing_evm_address(evm_address): + """Test to_evm_address returns stored evm_address if present.""" + account_id = AccountId(shard=0, realm=0, num=0, evm_address=evm_address) + + result = account_id.to_evm_address() + + assert result == evm_address.to_string() + + +def test_from_evm_address_with_hex_string(): + """Test AccountId from a valid 0x-prefixed EVM address string should succeed.""" + evm_str = "1234567890abcdef1234567890abcdef12345678" + evm_str_with_prefix = f"0x{evm_str}" + + account_id = AccountId.from_evm_address(evm_str, shard=0, realm=0) + + assert account_id.shard == 0 + assert account_id.realm == 0 + assert account_id.num == 0 + assert account_id.alias_key is None + assert account_id.evm_address is not None + assert account_id.evm_address.to_string() == evm_str.lower() + + # with prefix '0x' + account_id = AccountId.from_evm_address(evm_str_with_prefix, shard=0, realm=0) + + assert account_id.shard == 0 + assert account_id.realm == 0 + assert account_id.num == 0 + assert account_id.alias_key is None + assert account_id.evm_address is not None + assert account_id.evm_address.to_string() == evm_str.lower() + + +def test_from_evm_address_none(): + """Passing None as evm_address should raise ValueError.""" + with pytest.raises(ValueError, match="evm_address must not be None"): + AccountId.from_evm_address(None, shard=0, realm=0) + + +def test_from_evm_address_invalid_type(): + """Test passing an invalid type as evm_address should raise ValueError.""" + evm_address = 12345 + with pytest.raises( + TypeError, + match=f"evm_address must be a str or EvmAddress, got {type(evm_address).__name__}", + ): + AccountId.from_evm_address(evm_address, shard=0, realm=0) + + +def test_from_evm_address_invalid_string(): + """Test passing an invalid EVM address string should raise ValueError.""" + with pytest.raises(ValueError, match="Invalid EVM address string"): + AccountId.from_evm_address("0xINVALID", shard=0, realm=0) diff --git a/tests/unit/account_info_query_test.py b/tests/unit/account_info_query_test.py index 2e5a8d586..c8fa8ed91 100644 --- a/tests/unit/account_info_query_test.py +++ b/tests/unit/account_info_query_test.py @@ -7,8 +7,8 @@ from hiero_sdk_python.response_code import ResponseCode from hiero_sdk_python.hapi.services import ( response_pb2, - response_header_pb2, - crypto_get_info_pb2 + response_header_pb2, + crypto_get_info_pb2, ) from hiero_sdk_python.Duration import Duration from hiero_sdk_python.hapi.services.timestamp_pb2 import Timestamp as TimestampProto @@ -19,43 +19,49 @@ pytestmark = pytest.mark.unit + def test_constructor(): """Test initialization of AccountInfoQuery.""" account_id = AccountId(0, 0, 2) - + query = AccountInfoQuery() assert query.account_id is None - + query = AccountInfoQuery(account_id) assert query.account_id == account_id + def test_execute_fails_with_missing_account_id(mock_client): """Test request creation with missing Account ID.""" query = AccountInfoQuery() - - with pytest.raises(ValueError, match="Account ID must be set before making the request."): + + with pytest.raises( + ValueError, match=r"Account ID must be set before making the request\." + ): query.execute(mock_client) + def test_get_method(): """Test retrieving the gRPC method for the query.""" query = AccountInfoQuery() - + mock_channel = Mock() mock_crypto_stub = Mock() mock_channel.crypto = mock_crypto_stub - + method = query._get_method(mock_channel) - + assert method.transaction is None assert method.query == mock_crypto_stub.getAccountInfo + def test_account_info_query_execute(mock_account_ids, private_key): """Test basic functionality of AccountInfoQuery with mock server.""" account_id = mock_account_ids[0] expiration_time = TimestampProto(seconds=1718745600) # 90 days in seconds auto_renew_period = DurationProto(seconds=7890000) - + # Create account info response with test data account_info_response = crypto_get_info_pb2.CryptoGetInfoResponse.AccountInfo( accountID=account_id._to_proto(), @@ -68,21 +74,21 @@ def test_account_info_query_execute(mock_account_ids, private_key): expirationTime=expiration_time, autoRenewPeriod=auto_renew_period, memo="test memo", - ownedNfts=0 + ownedNfts=0, ) response_sequences = get_account_info_responses(account_info_response) - + with mock_hedera_servers(response_sequences) as client: query = AccountInfoQuery(account_id) - + # Get cost and verify it matches expected value cost = query.get_cost(client) assert cost.to_tinybars() == 2 - + # Execute query and get result result = query.execute(client) - + assert result.account_id == account_id assert result.contract_account_id == "" assert not result.is_deleted @@ -96,34 +102,37 @@ def test_account_info_query_execute(mock_account_ids, private_key): assert result.account_memo == "test memo" assert result.owned_nfts == 0 + def get_account_info_responses(account_info_response): - return [[ - response_pb2.Response( - cryptoGetInfo=crypto_get_info_pb2.CryptoGetInfoResponse( - header=response_header_pb2.ResponseHeader( - nodeTransactionPrecheckCode=ResponseCode.OK, - responseType=ResponseType.COST_ANSWER, - cost=2 + return [ + [ + response_pb2.Response( + cryptoGetInfo=crypto_get_info_pb2.CryptoGetInfoResponse( + header=response_header_pb2.ResponseHeader( + nodeTransactionPrecheckCode=ResponseCode.OK, + responseType=ResponseType.COST_ANSWER, + cost=2, + ) + ) + ), + response_pb2.Response( + cryptoGetInfo=crypto_get_info_pb2.CryptoGetInfoResponse( + header=response_header_pb2.ResponseHeader( + nodeTransactionPrecheckCode=ResponseCode.OK, + responseType=ResponseType.COST_ANSWER, + cost=2, + ) ) - ) - ), - response_pb2.Response( - cryptoGetInfo=crypto_get_info_pb2.CryptoGetInfoResponse( - header=response_header_pb2.ResponseHeader( - nodeTransactionPrecheckCode=ResponseCode.OK, - responseType=ResponseType.COST_ANSWER, - cost=2 + ), + response_pb2.Response( + cryptoGetInfo=crypto_get_info_pb2.CryptoGetInfoResponse( + header=response_header_pb2.ResponseHeader( + nodeTransactionPrecheckCode=ResponseCode.OK, + responseType=ResponseType.ANSWER_ONLY, + cost=2, + ), + accountInfo=account_info_response, ) - ) - ), - response_pb2.Response( - cryptoGetInfo=crypto_get_info_pb2.CryptoGetInfoResponse( - header=response_header_pb2.ResponseHeader( - nodeTransactionPrecheckCode=ResponseCode.OK, - responseType=ResponseType.ANSWER_ONLY, - cost=2 - ), - accountInfo=account_info_response - ) - ) - ]] \ No newline at end of file + ), + ] + ] diff --git a/tests/unit/account_info_test.py b/tests/unit/account_info_test.py index e645ffba2..ad0bbf29e 100644 --- a/tests/unit/account_info_test.py +++ b/tests/unit/account_info_test.py @@ -12,6 +12,7 @@ pytestmark = pytest.mark.unit + @pytest.fixture def account_info(): return AccountInfo( @@ -26,9 +27,10 @@ def account_info(): auto_renew_period=Duration(7776000), # 90 days token_relationships=[], account_memo="Test account memo", - owned_nfts=5 + owned_nfts=5, ) + @pytest.fixture def proto_account_info(): public_key = PrivateKey.generate_ed25519().public_key() @@ -44,10 +46,11 @@ def proto_account_info(): autoRenewPeriod=Duration(7776000)._to_proto(), tokenRelationships=[], memo="Test account memo", - ownedNfts=5 + ownedNfts=5, ) return proto + def test_account_info_initialization(account_info): """Test the initialization of the AccountInfo class""" assert account_info.account_id == AccountId(0, 0, 100) @@ -63,6 +66,7 @@ def test_account_info_initialization(account_info): assert account_info.account_memo == "Test account memo" assert account_info.owned_nfts == 5 + def test_account_info_default_initialization(): """Test the default initialization of the AccountInfo class""" account_info = AccountInfo() @@ -79,10 +83,11 @@ def test_account_info_default_initialization(): assert account_info.account_memo is None assert account_info.owned_nfts is None + def test_from_proto(proto_account_info): """Test the from_proto method of the AccountInfo class""" account_info = AccountInfo._from_proto(proto_account_info) - + assert account_info.account_id == AccountId(0, 0, 100) assert account_info.contract_account_id == "0.0.100" assert account_info.is_deleted is False @@ -96,6 +101,7 @@ def test_from_proto(proto_account_info): assert account_info.account_memo == "Test account memo" assert account_info.owned_nfts == 5 + def test_from_proto_with_token_relationships(): """Test the from_proto method of the AccountInfo class with token relationships""" # Create a minimal proto without a key to avoid the key parsing issue @@ -104,23 +110,25 @@ def test_from_proto_with_token_relationships(): accountID=AccountId(0, 0, 100)._to_proto(), key=public_key._to_proto(), balance=5000000, - tokenRelationships=[] + tokenRelationships=[], ) - + account_info = AccountInfo._from_proto(proto) assert account_info.account_id == AccountId(0, 0, 100) assert account_info.balance.to_tinybars() == 5000000 assert account_info.token_relationships == [] + def test_from_proto_none_raises_error(): """Test the from_proto method of the AccountInfo class with a None proto""" with pytest.raises(ValueError, match="Account info proto is None"): AccountInfo._from_proto(None) + def test_to_proto(account_info): """Test the to_proto method of the AccountInfo class""" proto = account_info._to_proto() - + assert proto.accountID == AccountId(0, 0, 100)._to_proto() assert proto.contractAccountID == "0.0.100" assert proto.deleted is False @@ -134,42 +142,58 @@ def test_to_proto(account_info): assert proto.memo == "Test account memo" assert proto.ownedNfts == 5 + def test_to_proto_with_none_values(): """Test the to_proto method of the AccountInfo class with none values""" account_info = AccountInfo() proto = account_info._to_proto() - + # Protobuf has default values, so we check the proto structure exists - assert hasattr(proto, 'accountID') + assert hasattr(proto, "accountID") assert proto.contractAccountID == "" # Empty string is default for protobuf assert proto.deleted is False # False is default for protobuf bool assert proto.proxyReceived == 0 # 0 is default for protobuf int when None is passed - assert hasattr(proto, 'key') + assert hasattr(proto, "key") assert proto.balance == 0 # 0 is default for protobuf int when None is passed assert proto.receiverSigRequired is False # False is default for protobuf bool - assert hasattr(proto, 'expirationTime') - assert hasattr(proto, 'autoRenewPeriod') + assert hasattr(proto, "expirationTime") + assert hasattr(proto, "autoRenewPeriod") assert proto.tokenRelationships == [] assert proto.memo == "" # Empty string is default for protobuf assert proto.ownedNfts == 0 # 0 is default for protobuf int + def test_proto_conversion(account_info): """Test converting AccountInfo to proto and back preserves data""" proto = account_info._to_proto() converted_account_info = AccountInfo._from_proto(proto) - + assert converted_account_info.account_id == account_info.account_id - assert converted_account_info.contract_account_id == account_info.contract_account_id + assert ( + converted_account_info.contract_account_id == account_info.contract_account_id + ) assert converted_account_info.is_deleted == account_info.is_deleted - assert converted_account_info.proxy_received.to_tinybars() == account_info.proxy_received.to_tinybars() - assert converted_account_info.balance.to_tinybars() == account_info.balance.to_tinybars() - assert converted_account_info.receiver_signature_required == account_info.receiver_signature_required + assert ( + converted_account_info.proxy_received.to_tinybars() + == account_info.proxy_received.to_tinybars() + ) + assert ( + converted_account_info.balance.to_tinybars() + == account_info.balance.to_tinybars() + ) + assert ( + converted_account_info.receiver_signature_required + == account_info.receiver_signature_required + ) assert converted_account_info.expiration_time == account_info.expiration_time assert converted_account_info.auto_renew_period == account_info.auto_renew_period - assert converted_account_info.token_relationships == account_info.token_relationships + assert ( + converted_account_info.token_relationships == account_info.token_relationships + ) assert converted_account_info.account_memo == account_info.account_memo assert converted_account_info.owned_nfts == account_info.owned_nfts + def test_str_and_repr(account_info): """Test the __str__ and __repr__ methods""" info_str = str(account_info) @@ -185,4 +209,4 @@ def test_str_and_repr(account_info): assert info_repr.startswith("AccountInfo(") assert "account_id=AccountId(shard=0, realm=0, num=100" in info_repr assert "contract_account_id='0.0.100'" in info_repr - assert "account_memo='Test account memo'" in info_repr \ No newline at end of file + assert "account_memo='Test account memo'" in info_repr diff --git a/tests/unit/account_records_query_test.py b/tests/unit/account_records_query_test.py index d9d527b49..c5de3c5bc 100644 --- a/tests/unit/account_records_query_test.py +++ b/tests/unit/account_records_query_test.py @@ -41,7 +41,9 @@ def test_execute_fails_with_missing_account_id(mock_client): """Test request creation with missing Account ID.""" query = AccountRecordsQuery() - with pytest.raises(ValueError, match="Account ID must be set before making the request."): + with pytest.raises( + ValueError, match="Account ID must be set before making the request." + ): query.execute(mock_client) diff --git a/tests/unit/account_update_transaction_test.py b/tests/unit/account_update_transaction_test.py index fac3526f2..62b3c5453 100644 --- a/tests/unit/account_update_transaction_test.py +++ b/tests/unit/account_update_transaction_test.py @@ -76,7 +76,9 @@ def test_constructor_with_account_params(): assert account_tx.expiration_time == expiration_time assert account_tx.max_automatic_token_associations == max_associations assert account_tx.staked_account_id == staked_account_id - assert account_tx.staked_node_id is None # Should be cleared when staked_account_id is set + assert ( + account_tx.staked_node_id is None + ) # Should be cleared when staked_account_id is set assert account_tx.decline_staking_reward == decline_reward @@ -135,7 +137,11 @@ def test_set_methods(): "receiver_signature_required", ), ("set_expiration_time", expiration_time, "expiration_time"), - ("set_max_automatic_token_associations", 100, "max_automatic_token_associations"), + ( + "set_max_automatic_token_associations", + 100, + "max_automatic_token_associations", + ), ("set_decline_staking_reward", True, "decline_staking_reward"), ] @@ -502,6 +508,7 @@ def test_build_transaction_body_with_none_auto_renew_period(mock_account_ids): # When auto_renew_period is None, the field should not be set in the protobuf assert not transaction_body.cryptoUpdateAccount.HasField("autoRenewPeriod") + def test_build_scheduled_body(mock_account_ids): """Test building a schedulable account update transaction body with valid values.""" operator_id, _, node_account_id, _, _ = mock_account_ids @@ -574,7 +581,9 @@ def test_constructor_with_new_fields(): assert account_tx.account_id == account_id assert account_tx.max_automatic_token_associations == max_associations assert account_tx.staked_account_id == staked_account_id - assert account_tx.staked_node_id is None # Should be cleared when staked_account_id is set + assert ( + account_tx.staked_node_id is None + ) # Should be cleared when staked_account_id is set assert account_tx.decline_staking_reward is True @@ -737,8 +746,9 @@ def test_build_transaction_body_with_new_fields(mock_account_ids): assert ( transaction_body.cryptoUpdateAccount.accountIDToUpdate == account_id._to_proto() ) - assert transaction_body.cryptoUpdateAccount.max_automatic_token_associations == Int32Value( - value=max_associations + assert ( + transaction_body.cryptoUpdateAccount.max_automatic_token_associations + == Int32Value(value=max_associations) ) assert ( transaction_body.cryptoUpdateAccount.staked_account_id.accountNum @@ -815,4 +825,4 @@ def test_build_transaction_body_with_cleared_staking(mock_account_ids): account_tx.node_account_id = node_account_id txn_body = account_tx.build_transaction_body().cryptoUpdateAccount assert txn_body.staked_node_id == -1 - assert not txn_body.HasField("staked_account_id") \ No newline at end of file + assert not txn_body.HasField("staked_account_id") diff --git a/tests/unit/assessed_custom_fee_test.py b/tests/unit/assessed_custom_fee_test.py new file mode 100644 index 000000000..dcc477f21 --- /dev/null +++ b/tests/unit/assessed_custom_fee_test.py @@ -0,0 +1,199 @@ +import pytest + +from hiero_sdk_python.account.account_id import AccountId +from hiero_sdk_python.hapi.services.custom_fees_pb2 import AssessedCustomFee as AssessedCustomFeeProto +from hiero_sdk_python.tokens.assessed_custom_fee import AssessedCustomFee +from hiero_sdk_python.tokens.token_id import TokenId + + +pytestmark = pytest.mark.unit + + +# If conftest.py has fixtures like sample_account_id or sample_token_id, use them. +# Otherwise, define simple ones here (adjust shard/realm/num as needed for realism). +@pytest.fixture +def sample_account_id() -> AccountId: + return AccountId(shard=0, realm=0, num=123456) + + +@pytest.fixture +def sample_token_id() -> TokenId: + return TokenId(shard=0, realm=0, num=789012) + + +@pytest.fixture +def another_account_id() -> AccountId: + return AccountId(shard=0, realm=0, num=999999) + + +def test_constructor_all_fields( + sample_account_id: AccountId, + sample_token_id: TokenId, + another_account_id: AccountId, +): + payers = [sample_account_id, another_account_id] + fee = AssessedCustomFee( + amount=1_500_000_000, + token_id=sample_token_id, + fee_collector_account_id=sample_account_id, + effective_payer_account_ids=payers, + ) + assert fee.amount == 1_500_000_000 + assert fee.token_id == sample_token_id + assert fee.fee_collector_account_id == sample_account_id + assert fee.effective_payer_account_ids == payers + # Protect against breaking changes + assert hasattr(fee, 'amount') + assert hasattr(fee, 'token_id') + assert hasattr(fee, 'fee_collector_account_id') + assert hasattr(fee, 'effective_payer_account_ids') + + +def test_constructor_hbar_case(sample_account_id: AccountId): + fee = AssessedCustomFee( + amount=100_000_000, + token_id=None, + fee_collector_account_id=sample_account_id, + ) + assert fee.amount == 100_000_000 + assert fee.token_id is None + assert fee.fee_collector_account_id == sample_account_id + assert fee.effective_payer_account_ids == [] + + +def test_constructor_empty_payers(sample_account_id: AccountId, sample_token_id: TokenId): + fee = AssessedCustomFee( + amount=420, + token_id=sample_token_id, + fee_collector_account_id=sample_account_id, + effective_payer_account_ids=[], + ) + assert fee.effective_payer_account_ids == [] + assert fee.token_id == sample_token_id + +def test_constructor_missing_fee_collector_raises(): + """Verify that omitting fee_collector_account_id raises ValueError.""" + with pytest.raises(ValueError, match="fee_collector_account_id is required"): + AssessedCustomFee( + amount=100, + token_id=None, + fee_collector_account_id=None, + ) + +def test_from_proto_missing_token_id(sample_account_id: AccountId): + """Verify that absence of token_id in protobuf correctly maps to None.""" + proto = AssessedCustomFeeProto( + amount=750_000, + fee_collector_account_id=sample_account_id._to_proto(), + # intentionally no token_id → proto.HasField("token_id") should be False + ) + + fee = AssessedCustomFee._from_proto(proto) + + assert fee.amount == 750_000 + assert fee.token_id is None, "token_id should be None when not present in proto" + assert fee.fee_collector_account_id == sample_account_id + assert fee.effective_payer_account_ids == [], "effective payers should default to empty list" + +def test_from_proto_with_token_id(sample_account_id: AccountId, sample_token_id: TokenId): + """Verify that token_id is correctly deserialized when present in proto.""" + proto = AssessedCustomFeeProto( + amount=500_000, + token_id=sample_token_id._to_proto(), + fee_collector_account_id=sample_account_id._to_proto(), + ) + proto.effective_payer_account_id.append(sample_account_id._to_proto()) + + fee = AssessedCustomFee._from_proto(proto) + + assert fee.amount == 500_000 + assert fee.token_id is not None + assert fee.token_id == sample_token_id + assert fee.fee_collector_account_id == sample_account_id + assert len(fee.effective_payer_account_ids) == 1 + +def test_from_proto_missing_fee_collector_raises(): + """Verify that missing fee_collector_account_id in proto raises ValueError.""" + proto = AssessedCustomFeeProto(amount=750_000) + with pytest.raises(ValueError, match="fee_collector_account_id is required"): + AssessedCustomFee._from_proto(proto) + +def test_to_proto_basic_fields( + sample_account_id: AccountId, + sample_token_id: TokenId, + another_account_id: AccountId, +): + """Verify that all basic fields are correctly serialized to protobuf.""" + payers = [sample_account_id, another_account_id] + + fee = AssessedCustomFee( + amount=2_000_000, + token_id=sample_token_id, + fee_collector_account_id=sample_account_id, + effective_payer_account_ids=payers, + ) + + proto = fee._to_proto() + + # Core presence and value checks + assert proto.amount == 2_000_000 + assert proto.HasField("token_id"), "token_id should be set when present" + assert proto.HasField("fee_collector_account_id") + assert len(proto.effective_payer_account_id) == 2, "should serialize both effective payers" + + # Deeper structural checks (helps catch broken _to_proto implementations) + assert proto.token_id.shardNum == sample_token_id.shard + assert proto.token_id.realmNum == sample_token_id.realm + assert proto.token_id.tokenNum == sample_token_id.num + + # Optional: verify collector (often useful when debugging) + assert proto.fee_collector_account_id.shardNum == sample_account_id.shard + assert proto.fee_collector_account_id.realmNum == sample_account_id.realm + assert proto.fee_collector_account_id.accountNum == sample_account_id.num + + +def test_round_trip_conversion( + sample_account_id: AccountId, + sample_token_id: TokenId, +): + original = AssessedCustomFee( + amount=987_654_321, + token_id=sample_token_id, + fee_collector_account_id=sample_account_id, + effective_payer_account_ids=[sample_account_id], + ) + + proto = original._to_proto() + reconstructed = AssessedCustomFee._from_proto(proto) + + assert reconstructed.amount == original.amount + assert reconstructed.token_id == original.token_id + assert reconstructed.fee_collector_account_id == original.fee_collector_account_id + assert reconstructed.effective_payer_account_ids == original.effective_payer_account_ids + + +def test_str_contains_expected_fields( + sample_account_id: AccountId, + sample_token_id: TokenId, +): + fee = AssessedCustomFee( + amount=5_000_000, + token_id=sample_token_id, + fee_collector_account_id=sample_account_id, + effective_payer_account_ids=[sample_account_id], + ) + + s = str(fee) + assert "AssessedCustomFee" in s + assert "amount=5000000" in s + assert str(sample_token_id) in s + assert str(sample_account_id) in s + assert "effective_payer_account_ids" in s + + # HBAR case + hbar_fee = AssessedCustomFee( + amount=123_456, + fee_collector_account_id=sample_account_id, + ) + hbar_str = str(hbar_fee) + assert "token_id=None" in hbar_str diff --git a/tests/unit/batch_transaction_test.py b/tests/unit/batch_transaction_test.py index ff28a5045..39164f3b4 100644 --- a/tests/unit/batch_transaction_test.py +++ b/tests/unit/batch_transaction_test.py @@ -52,8 +52,12 @@ def _make_tx(batch_key=None, freeze=True): def test_constructor_without_params_creates_empty_inner_transactions(): """Test create batch transaction without constructor params.""" batch_tx = BatchTransaction() - assert batch_tx.inner_transactions is not None, "inner_transactions should not be none" - assert len(batch_tx.inner_transactions) == 0, "inner_transactions should be empty by default" + assert ( + batch_tx.inner_transactions is not None + ), "inner_transactions should not be none" + assert ( + len(batch_tx.inner_transactions) == 0 + ), "inner_transactions should be empty by default" def test_constructor_with_params_accepts_valid_inner_transactions(mock_tx): @@ -69,7 +73,6 @@ def test_constructor_with_params_accepts_valid_inner_transactions(mock_tx): assert all(isinstance(t, TransferTransaction) for t in batch_tx.inner_transactions) - def test_constructor_rejects_transaction_without_batch_key(mock_tx): """Test create batch transaction should raise error if an inner transaction has no batch key.""" inner_tx1 = [mock_tx(batch_key=None, freeze=True)] @@ -106,12 +109,11 @@ def test_constructor_rejects_blacklisted_transaction_types(mock_client, mock_tx) batch_key = PrivateKey.generate() # FreezeTransaction is not allowed - inner_tx1 = [ - FreezeTransaction() - .set_batch_key(batch_key) - .freeze_with(mock_client) - ] - with pytest.raises(ValueError, match='Transaction type FreezeTransaction is not allowed in a batch transaction'): + inner_tx1 = [FreezeTransaction().set_batch_key(batch_key).freeze_with(mock_client)] + with pytest.raises( + ValueError, + match="Transaction type FreezeTransaction is not allowed in a batch transaction", + ): BatchTransaction(inner_transactions=inner_tx1) # BatchTransaction as an inner transaction is not allowed @@ -120,7 +122,10 @@ def test_constructor_rejects_blacklisted_transaction_types(mock_client, mock_tx) .set_batch_key(batch_key) .freeze_with(mock_client) ] - with pytest.raises(ValueError, match='Transaction type BatchTransaction is not allowed in a batch transaction'): + with pytest.raises( + ValueError, + match="Transaction type BatchTransaction is not allowed in a batch transaction", + ): BatchTransaction(inner_transactions=inner_tx2) @@ -149,16 +154,24 @@ def test_set_inner_transactions_invalid_param(mock_tx, mock_client): batch_tx.set_inner_transactions([mock_tx(batch_key=batch_key, freeze=False)]) # FreezeTransaction not allowed - with pytest.raises(ValueError, match='Transaction type FreezeTransaction is not allowed in a batch transaction'): - batch_tx.set_inner_transactions([ - FreezeTransaction().set_batch_key(batch_key).freeze_with(mock_client) - ]) + with pytest.raises( + ValueError, + match="Transaction type FreezeTransaction is not allowed in a batch transaction", + ): + batch_tx.set_inner_transactions( + [FreezeTransaction().set_batch_key(batch_key).freeze_with(mock_client)] + ) # BatchTransaction not allowed - nested_batch = BatchTransaction(inner_transactions=[mock_tx(batch_key=batch_key, freeze=True)]) + nested_batch = BatchTransaction( + inner_transactions=[mock_tx(batch_key=batch_key, freeze=True)] + ) nested_batch.set_batch_key(batch_key) nested_batch.freeze_with(mock_client) - with pytest.raises(ValueError, match='Transaction type BatchTransaction is not allowed in a batch transaction'): + with pytest.raises( + ValueError, + match="Transaction type BatchTransaction is not allowed in a batch transaction", + ): batch_tx.set_inner_transactions([nested_batch]) @@ -188,16 +201,24 @@ def test_add_inner_transaction_method_invalid_param(mock_tx, mock_client): batch_tx.add_inner_transaction(mock_tx(batch_key=batch_key, freeze=False)) # FreezeTransaction - with pytest.raises(ValueError, match='Transaction type FreezeTransaction is not allowed in a batch transaction'): + with pytest.raises( + ValueError, + match="Transaction type FreezeTransaction is not allowed in a batch transaction", + ): batch_tx.add_inner_transaction( FreezeTransaction().set_batch_key(batch_key).freeze_with(mock_client) ) # BatchTransaction - nested_batch = BatchTransaction(inner_transactions=[mock_tx(batch_key=batch_key, freeze=True)]) + nested_batch = BatchTransaction( + inner_transactions=[mock_tx(batch_key=batch_key, freeze=True)] + ) nested_batch.set_batch_key(batch_key) nested_batch.freeze_with(mock_client) - with pytest.raises(ValueError, match='Transaction type BatchTransaction is not allowed in a batch transaction'): + with pytest.raises( + ValueError, + match="Transaction type BatchTransaction is not allowed in a batch transaction", + ): batch_tx.add_inner_transaction(nested_batch) @@ -205,7 +226,9 @@ def test_get_inner_transactions_ids_returns_transaction_ids(mock_tx): """Test get_transaction_ids methods returns transaction_ids.""" batch_key = PrivateKey.generate() batch_tx = BatchTransaction() - assert batch_tx.get_inner_transaction_ids() == [], "No inner transactions should return an empty list" + assert ( + batch_tx.get_inner_transaction_ids() == [] + ), "No inner transactions should return an empty list" transaction = mock_tx(batch_key=batch_key, freeze=True) batch_tx.add_inner_transaction(transaction) @@ -255,12 +278,18 @@ def test_batchify_sets_required_fields(mock_account_ids, mock_client): .batchify(mock_client, batch_key) ) - assert tx._transaction_body_bytes is not None, "batchify should set _transaction_body_bytes" + assert ( + tx._transaction_body_bytes is not None + ), "batchify should set _transaction_body_bytes" assert tx.batch_key == batch_key - assert tx.node_account_id == AccountId(0, 0, 0), "node_account_id for batched tx should be 0.0.0" + assert tx.node_account_id == AccountId( + 0, 0, 0 + ), "node_account_id for batched tx should be 0.0.0" -def test_round_trip_to_bytes_and_back_preserves_inner_transactions(mock_account_ids, mock_client): +def test_round_trip_to_bytes_and_back_preserves_inner_transactions( + mock_account_ids, mock_client +): """Test round trip of converting transaction to_bytes and from_bytes.""" sender, receiver, _, _, _ = mock_account_ids batch_key = PrivateKey.generate() @@ -272,7 +301,12 @@ def test_round_trip_to_bytes_and_back_preserves_inner_transactions(mock_account_ .batchify(mock_client, batch_key) ) - batch_tx = BatchTransaction().add_inner_transaction(transfer_tx).freeze_with(mock_client).sign(batch_key) + batch_tx = ( + BatchTransaction() + .add_inner_transaction(transfer_tx) + .freeze_with(mock_client) + .sign(batch_key) + ) batch_tx_bytes = batch_tx.to_bytes() assert batch_tx_bytes and len(batch_tx_bytes) > 0 @@ -290,11 +324,13 @@ def test_round_trip_to_bytes_and_back_preserves_inner_transactions(mock_account_ def test_sign_transaction(mock_client, mock_tx): """Test signing the batch transaction with a private key.""" batch_tx = BatchTransaction() - batch_tx.set_inner_transactions([mock_tx(batch_key=PrivateKey.generate(), freeze=True)]) + batch_tx.set_inner_transactions( + [mock_tx(batch_key=PrivateKey.generate(), freeze=True)] + ) private_key = MagicMock() - private_key.sign.return_value = b'signature' - private_key.public_key().to_bytes_raw.return_value = b'public_key' + private_key.sign.return_value = b"signature" + private_key.public_key().to_bytes_raw.return_value = b"public_key" batch_tx.freeze_with(mock_client) batch_tx.sign(private_key) @@ -302,28 +338,34 @@ def test_sign_transaction(mock_client, mock_tx): node_id = mock_client.network.current_node._account_id body_bytes = batch_tx._transaction_body_bytes[node_id] - assert body_bytes in batch_tx._signature_map, "signature map must contain an entry for the tx body bytes" + assert ( + body_bytes in batch_tx._signature_map + ), "signature map must contain an entry for the tx body bytes" sig_pairs = batch_tx._signature_map[body_bytes].sigPair assert len(sig_pairs) == 1 sig_pair = sig_pairs[0] - assert sig_pair.pubKeyPrefix == b'public_key' - assert sig_pair.ed25519 == b'signature' + assert sig_pair.pubKeyPrefix == b"public_key" + assert sig_pair.ed25519 == b"signature" def test_to_proto(mock_client, mock_tx): """Test converting the batch transaction to protobuf format after signing.""" batch_tx = BatchTransaction() - batch_tx.set_inner_transactions([mock_tx(batch_key=PrivateKey.generate(), freeze=True)]) + batch_tx.set_inner_transactions( + [mock_tx(batch_key=PrivateKey.generate(), freeze=True)] + ) private_key = MagicMock() - private_key.sign.return_value = b'signature' - private_key.public_key().to_bytes_raw.return_value = b'public_key' + private_key.sign.return_value = b"signature" + private_key.public_key().to_bytes_raw.return_value = b"public_key" batch_tx.freeze_with(mock_client) batch_tx.sign(private_key) proto = batch_tx._to_proto() - assert getattr(proto, "signedTransactionBytes", None), "proto must include signedTransactionBytes" + assert getattr( + proto, "signedTransactionBytes", None + ), "proto must include signedTransactionBytes" assert len(proto.signedTransactionBytes) > 0 @@ -344,7 +386,7 @@ def test_batch_transaction_execute_successful(mock_account_ids, mock_client): header=response_header_pb2.ResponseHeader( nodeTransactionPrecheckCode=ResponseCode.OK ), - receipt=mock_receipt_proto + receipt=mock_receipt_proto, ) ) @@ -366,17 +408,19 @@ def test_batch_transaction_execute_successful(mock_account_ids, mock_client): ) receipt = transaction.execute(client) - assert receipt.status == ResponseCode.SUCCESS, f"Transaction should have succeeded, got {receipt.status}" + assert ( + receipt.status == ResponseCode.SUCCESS + ), f"Transaction should have succeeded, got {receipt.status}" -def test_batch_key_accepts_public_key(mock_client, mock_account_ids): +def test_batch_key_accepts_public_key(mock_account_ids): """Test that batch_key can accept PublicKey (not just PrivateKey).""" sender, receiver, _, _, _ = mock_account_ids - + # Generate a key pair private_key = PrivateKey.generate_ed25519() public_key = private_key.public_key() - + # Test using PublicKey as batch_key tx = ( TransferTransaction() @@ -384,7 +428,7 @@ def test_batch_key_accepts_public_key(mock_client, mock_account_ids): .add_hbar_transfer(account_id=receiver, amount=1) .set_batch_key(public_key) # Using PublicKey instead of PrivateKey ) - + # Verify batch_key was set correctly assert tx.batch_key == public_key assert isinstance(tx.batch_key, type(public_key)) @@ -393,11 +437,11 @@ def test_batch_key_accepts_public_key(mock_client, mock_account_ids): def test_batchify_with_public_key(mock_client, mock_account_ids): """Test that batchify method accepts PublicKey.""" sender, receiver, _, _, _ = mock_account_ids - + # Generate a key pair private_key = PrivateKey.generate_ed25519() public_key = private_key.public_key() - + # Test using PublicKey in batchify tx = ( TransferTransaction() @@ -405,23 +449,25 @@ def test_batchify_with_public_key(mock_client, mock_account_ids): .add_hbar_transfer(account_id=receiver, amount=1) .batchify(mock_client, public_key) # Using PublicKey ) - + # Verify batch_key was set and transaction was frozen assert tx.batch_key == public_key assert tx._transaction_body_bytes # Should be frozen -def test_batch_transaction_with_public_key_inner_transactions(mock_client, mock_account_ids): +def test_batch_transaction_with_public_key_inner_transactions( + mock_client, mock_account_ids +): """Test BatchTransaction can accept inner transactions with PublicKey batch_keys.""" sender, receiver, _, _, _ = mock_account_ids - + # Generate key pairs batch_key1 = PrivateKey.generate_ed25519() public_key1 = batch_key1.public_key() - + batch_key2 = PrivateKey.generate_ecdsa() public_key2 = batch_key2.public_key() - + # Create inner transactions with PublicKey batch_keys inner_tx1 = ( TransferTransaction() @@ -430,7 +476,7 @@ def test_batch_transaction_with_public_key_inner_transactions(mock_client, mock_ .set_batch_key(public_key1) .freeze_with(mock_client) ) - + inner_tx2 = ( TransferTransaction() .add_hbar_transfer(account_id=sender, amount=-1) @@ -438,10 +484,10 @@ def test_batch_transaction_with_public_key_inner_transactions(mock_client, mock_ .set_batch_key(public_key2) .freeze_with(mock_client) ) - + # BatchTransaction should accept these inner transactions batch_tx = BatchTransaction(inner_transactions=[inner_tx1, inner_tx2]) - + assert len(batch_tx.inner_transactions) == 2 assert batch_tx.inner_transactions[0].batch_key == public_key1 assert batch_tx.inner_transactions[1].batch_key == public_key2 @@ -450,11 +496,11 @@ def test_batch_transaction_with_public_key_inner_transactions(mock_client, mock_ def test_batch_key_mixed_private_and_public_keys(mock_client, mock_account_ids): """Test that BatchTransaction can handle inner transactions with mixed PrivateKey and PublicKey.""" sender, receiver, _, _, _ = mock_account_ids - + # Generate keys private_key = PrivateKey.generate_ed25519() public_key = PrivateKey.generate_ecdsa().public_key() - + # Inner transaction with PrivateKey inner_tx1 = ( TransferTransaction() @@ -463,7 +509,7 @@ def test_batch_key_mixed_private_and_public_keys(mock_client, mock_account_ids): .set_batch_key(private_key) .freeze_with(mock_client) ) - + # Inner transaction with PublicKey inner_tx2 = ( TransferTransaction() @@ -472,10 +518,10 @@ def test_batch_key_mixed_private_and_public_keys(mock_client, mock_account_ids): .set_batch_key(public_key) .freeze_with(mock_client) ) - + # BatchTransaction should accept mixed key types batch_tx = BatchTransaction(inner_transactions=[inner_tx1, inner_tx2]) - + assert len(batch_tx.inner_transactions) == 2 assert isinstance(batch_tx.inner_transactions[0].batch_key, PrivateKey) assert isinstance(batch_tx.inner_transactions[1].batch_key, type(public_key)) @@ -485,9 +531,9 @@ def test_set_batch_key_with_private_key(): """Test that batch_key can be set with PrivateKey.""" private_key = PrivateKey.generate_ed25519() transaction = TransferTransaction() - + result = transaction.set_batch_key(private_key) - + assert transaction.batch_key == private_key assert result == transaction # Check method chaining @@ -497,9 +543,9 @@ def test_set_batch_key_with_public_key(): private_key = PrivateKey.generate_ed25519() public_key = private_key.public_key() transaction = TransferTransaction() - + result = transaction.set_batch_key(public_key) - + assert transaction.batch_key == public_key assert result == transaction # Check method chaining @@ -507,12 +553,12 @@ def test_set_batch_key_with_public_key(): def test_batch_key_type_annotation(): """Test that batch_key accepts both PrivateKey and PublicKey types.""" transaction = TransferTransaction() - + # Test with PrivateKey private_key = PrivateKey.generate_ecdsa() transaction.set_batch_key(private_key) assert isinstance(transaction.batch_key, PrivateKey) - + # Test with PublicKey public_key = private_key.public_key() transaction.set_batch_key(public_key) diff --git a/tests/unit/client_test.py b/tests/unit/client_test.py index 2085b0831..f2c0fc41f 100644 --- a/tests/unit/client_test.py +++ b/tests/unit/client_test.py @@ -5,16 +5,17 @@ from decimal import Decimal import os import pytest -from unittest.mock import patch +from unittest.mock import MagicMock, patch from hiero_sdk_python.client import client as client_module -from hiero_sdk_python import ( - Client, - AccountId, - PrivateKey -) +from hiero_sdk_python import Client, AccountId, PrivateKey from hiero_sdk_python.hbar import Hbar +from hiero_sdk_python.node import _Node +from hiero_sdk_python.transaction.transaction_id import TransactionId + +pytestmark = pytest.mark.unit + @pytest.mark.parametrize( "factory_method, expected_network", @@ -27,62 +28,67 @@ def test_factory_basic_setup(factory_method, expected_network): """Test that factory methods return a Client with correct network and no operator.""" client = factory_method() - + assert isinstance(client, Client) assert client.network.network == expected_network assert client.operator_account_id is None assert client.operator_private_key is None - + client.close() + def test_for_testnet_then_set_operator(): """Test that we can manually set the operator on a factory client.""" client = Client.for_testnet() - + # Generate dummy credentials operator_id = AccountId(0, 0, 12345) operator_key = PrivateKey.generate_ed25519() - + client.set_operator(operator_id, operator_key) - + assert client.operator_account_id == operator_id assert client.operator_private_key.to_string() == operator_key.to_string() assert client.operator is not None - + client.close() + def test_for_mainnet_then_set_operator(): """Test that we can manually set the operator on a mainnet client.""" client = Client.for_mainnet() - + operator_id = AccountId(0, 0, 67890) operator_key = PrivateKey.generate_ecdsa() client.set_operator(operator_id, operator_key) - + assert client.operator_account_id == operator_id assert client.operator_private_key.to_string() == operator_key.to_string() - + client.close() + def test_from_env_missing_operator_id_raises_error(): """Test that from_env raises ValueError when OPERATOR_ID is missing.""" dummy_key = PrivateKey.generate_ed25519().to_string_der() - - with patch.object(client_module, 'load_dotenv'): + + with patch.object(client_module, "load_dotenv"): with patch.dict(os.environ, {"OPERATOR_KEY": dummy_key}, clear=True): with pytest.raises(ValueError) as exc_info: Client.from_env() assert "OPERATOR_ID" in str(exc_info.value) + def test_from_env_missing_operator_key_raises_error(): """Test that from_env raises ValueError when OPERATOR_KEY is missing.""" - with patch.object(client_module, 'load_dotenv'): + with patch.object(client_module, "load_dotenv"): with patch.dict(os.environ, {"OPERATOR_ID": "0.0.1234"}, clear=True): with pytest.raises(ValueError) as exc_info: Client.from_env() assert "OPERATOR_KEY" in str(exc_info.value) + def test_from_env_with_valid_credentials(): """Test that from_env creates client with valid environment variables.""" test_key = PrivateKey.generate_ed25519() @@ -93,13 +99,14 @@ def test_from_env_with_valid_credentials(): "OPERATOR_KEY": test_key_str, } - with patch.object(client_module, 'load_dotenv'): + with patch.object(client_module, "load_dotenv"): with patch.dict(os.environ, env_vars, clear=True): client = Client.from_env() assert isinstance(client, Client) assert client.operator_account_id == AccountId.from_string("0.0.1234") client.close() + def test_from_env_with_explicit_network_parameter(): """Test that from_env uses explicit network parameter over env var.""" test_key = PrivateKey.generate_ed25519() @@ -111,12 +118,13 @@ def test_from_env_with_explicit_network_parameter(): "NETWORK": "testnet", } - with patch.object(client_module, 'load_dotenv'): + with patch.object(client_module, "load_dotenv"): with patch.dict(os.environ, env_vars, clear=True): client = Client.from_env(network="mainnet") assert client.network.network == "mainnet" client.close() + def test_from_env_defaults_to_testnet(): """Test that from_env defaults to testnet when NETWORK not set.""" test_key = PrivateKey.generate_ed25519() @@ -127,12 +135,13 @@ def test_from_env_defaults_to_testnet(): "OPERATOR_KEY": test_key_str, } - with patch.object(client_module, 'load_dotenv'): + with patch.object(client_module, "load_dotenv"): with patch.dict(os.environ, env_vars, clear=True): client = Client.from_env() assert client.network.network == "testnet" client.close() + def test_from_env_uses_network_env_var(): """Test that from_env uses NETWORK env var when no argument is provided.""" test_key = PrivateKey.generate_ed25519() @@ -144,12 +153,13 @@ def test_from_env_uses_network_env_var(): "NETWORK": "previewnet", } - with patch.object(client_module, 'load_dotenv'): + with patch.object(client_module, "load_dotenv"): with patch.dict(os.environ, env_vars, clear=True): client = Client.from_env() assert client.network.network == "previewnet" client.close() + def test_from_env_with_invalid_network_name(): """Test that from_env raises error for invalid network name.""" test_key = PrivateKey.generate_ed25519() @@ -157,12 +167,13 @@ def test_from_env_with_invalid_network_name(): "OPERATOR_ID": "0.0.1234", "OPERATOR_KEY": test_key.to_string_der(), } - - with patch.object(client_module, 'load_dotenv'): + + with patch.object(client_module, "load_dotenv"): with patch.dict(os.environ, env_vars, clear=True): with pytest.raises(ValueError, match="Invalid network name"): Client.from_env(network="mars_network") + def test_from_env_with_malformed_operator_id(): """Test that from_env raises error for malformed OPERATOR_ID.""" test_key = PrivateKey.generate_ed25519() @@ -170,33 +181,35 @@ def test_from_env_with_malformed_operator_id(): "OPERATOR_ID": "not-an-account-id", "OPERATOR_KEY": test_key.to_string_der(), } - - with patch.object(client_module, 'load_dotenv'): + + with patch.object(client_module, "load_dotenv"): with patch.dict(os.environ, env_vars, clear=True): with pytest.raises(ValueError, match="Invalid account ID"): Client.from_env() + def test_from_env_with_malformed_operator_key(): """Test that from_env raises error for malformed OPERATOR_KEY.""" env_vars = { "OPERATOR_ID": "0.0.1234", "OPERATOR_KEY": "not-a-valid-key", } - - with patch.object(client_module, 'load_dotenv'): + + with patch.object(client_module, "load_dotenv"): with patch.dict(os.environ, env_vars, clear=True): with pytest.raises(ValueError): Client.from_env() + @pytest.mark.parametrize( - 'valid_amount,expected', + "valid_amount,expected", [ (1, Hbar(1)), (0.1, Hbar(0.1)), - (Decimal('0.1'), Hbar(Decimal('0.1'))), + (Decimal("0.1"), Hbar(Decimal("0.1"))), (Hbar(1), Hbar(1)), - (Hbar(0), Hbar(0)) - ] + (Hbar(0), Hbar(0)), + ], ) def test_set_default_max_query_payment_valid_param(valid_amount, expected): """Test that set_default_max_query_payment correctly converts various input types to Hbar.""" @@ -206,9 +219,9 @@ def test_set_default_max_query_payment_valid_param(valid_amount, expected): client.set_default_max_query_payment(valid_amount) assert client.default_max_query_payment == expected + @pytest.mark.parametrize( - 'negative_amount', - [-1, -0.1, Decimal('-0.1'), Decimal('-1'), Hbar(-1)] + "negative_amount", [-1, -0.1, Decimal("-0.1"), Decimal("-1"), Hbar(-1)] ) def test_set_default_max_query_payment_negative_value(negative_amount): """Test set_default_max_query_payment for negative amount values.""" @@ -217,27 +230,307 @@ def test_set_default_max_query_payment_negative_value(negative_amount): with pytest.raises(ValueError, match="max_query_payment must be non-negative"): client.set_default_max_query_payment(negative_amount) -@pytest.mark.parametrize( - 'invalid_amount', - ['1', 'abc', True, False, None, object()] -) + +@pytest.mark.parametrize("invalid_amount", ["1", "abc", True, False, None, object()]) def test_set_default_max_query_payment_invalid_param(invalid_amount): """Test that set_default_max_query_payment raise error for invalid param.""" client = Client.for_testnet() - with pytest.raises(TypeError, match=( - "max_query_payment must be int, float, Decimal, or Hbar, " - f"got {type(invalid_amount).__name__}" - )): + with pytest.raises( + TypeError, + match=( + "max_query_payment must be int, float, Decimal, or Hbar, " + f"got {type(invalid_amount).__name__}" + ), + ): client.set_default_max_query_payment(invalid_amount) -@pytest.mark.parametrize( - 'invalid_amount', - [float('inf'), float('nan')] -) + +@pytest.mark.parametrize("invalid_amount", [float("inf"), float("nan")]) def test_set_default_max_query_payment_non_finite_value(invalid_amount): """Test that set_default_max_query_payment raise error for non finite value.""" client = Client.for_testnet() with pytest.raises(ValueError, match="Hbar amount must be finite"): client.set_default_max_query_payment(invalid_amount) + + +# Set max_attempts +def test_set_max_attempts_with_valid_param(): + """Test that set_max_attempts updates default max_attempts value for client.""" + client = Client.for_testnet() + assert client.max_attempts == 10 # default max_attempt 10 + + returned = client.set_max_attempts(20) + assert client.max_attempts == 20 + assert returned is client + + client.close() + + +@pytest.mark.parametrize("invalid_max_attempts", ["1", 0.2, True, False, object(), {}]) +def test_set_max_attempts_with_invalid_type(invalid_max_attempts): + """Test that set_max_attempts raises TypeError for non-int values.""" + client = Client.for_testnet() + + with pytest.raises( + TypeError, + match=f"max_attempts must be of type int, got {type(invalid_max_attempts).__name__}", + ): + client.set_max_attempts(invalid_max_attempts) + + +@pytest.mark.parametrize("invalid_max_attempts", [0, -10]) +def test_set_max_attempts_with_invalid_value(invalid_max_attempts): + """Test that set_max_attempts raises ValueError for non-positive values.""" + client = Client.for_testnet() + + with pytest.raises(ValueError, match="max_attempts must be greater than 0"): + client.set_max_attempts(invalid_max_attempts) + + +# Set grpc_deadline +def test_set_grpc_deadline_with_valid_param(): + """Test that set_grpc_deadline updates default value of _grpc_deadline.""" + client = Client.for_testnet() + assert client._grpc_deadline == 10 # default grpc_deadline 10 sec + + returned = client.set_grpc_deadline(20) + assert client._grpc_deadline == 20 + assert returned is client + + client.close() + + +@pytest.mark.parametrize("invalid_grpc_deadline", ["1", True, False, object(), {}]) +def test_set_grpc_deadline_with_invalid_type(invalid_grpc_deadline): + """Test that set_grpc_deadline raises TypeError for invalid types.""" + client = Client.for_testnet() + + with pytest.raises( + TypeError, + match=f"grpc_deadline must be of type Union\\[int, float\\], got {type(invalid_grpc_deadline).__name__}", + ): + client.set_grpc_deadline(invalid_grpc_deadline) + + +@pytest.mark.parametrize( + "invalid_grpc_deadline", [0, -10, 0.0, -2.3, float("inf"), float("nan")] +) +def test_set_grpc_deadline_with_invalid_value(invalid_grpc_deadline): + """Test that set_grpc_deadline raises ValueError for non-positive values.""" + client = Client.for_testnet() + + with pytest.raises( + ValueError, match="grpc_deadline must be a finite value greater than 0" + ): + client.set_grpc_deadline(invalid_grpc_deadline) + + +# Set request_timeout +def test_set_request_timeout_with_valid_param(): + """Test that set_request_timeout updates default value of _request_timeout.""" + client = Client.for_testnet() + assert client._request_timeout == 120 # default request_timeout 120 sec + + returned = client.set_request_timeout(200) + assert client._request_timeout == 200 + assert returned is client + + client.close() + + +@pytest.mark.parametrize("invalid_request_timeout", ["1", True, False, object(), {}]) +def test_set_request_timeout_with_invalid_type(invalid_request_timeout): + """Test that set_request_timeout raises TypeError for invalid types.""" + client = Client.for_testnet() + + with pytest.raises( + TypeError, + match=f"request_timeout must be of type Union\\[int, float\\], got {type(invalid_request_timeout).__name__}", + ): + client.set_request_timeout(invalid_request_timeout) + + +@pytest.mark.parametrize( + "invalid_request_timeout", [0, -10, 0.0, -2.3, float("inf"), float("nan")] +) +def test_set_request_timeout_with_invalid_value(invalid_request_timeout): + """Test that set_request_timeout raises ValueError for non-positive values.""" + client = Client.for_testnet() + + with pytest.raises( + ValueError, match="request_timeout must be a finite value greater than 0" + ): + client.set_request_timeout(invalid_request_timeout) + + +# Set min_backoff +def test_set_min_backoff_with_valid_param(): + """Test that set_min_backoff updates default value of _min_backoff.""" + client = Client.for_testnet() + assert client._min_backoff == 0.25 # default min_backoff = 0.25 sec + + returned = client.set_min_backoff(2) + assert client._min_backoff == 2 + assert returned is client + + client.close() + + +@pytest.mark.parametrize("invalid_min_backoff", ["1", True, False, object(), {}]) +def test_set_min_backoff_with_invalid_type(invalid_min_backoff): + """Test that set_min_backoff raises TypeError for invalid types.""" + client = Client.for_testnet() + + with pytest.raises( + TypeError, + match=f"min_backoff must be of type int or float, got {type(invalid_min_backoff).__name__}", + ): + client.set_min_backoff(invalid_min_backoff) + + +@pytest.mark.parametrize( + "invalid_min_backoff", [-1, -10, float("inf"), float("-inf"), float("nan")] +) +def test_set_min_backoff_with_invalid_value(invalid_min_backoff): + """Test that set_min_backoff raises ValueError for invalid values.""" + client = Client.for_testnet() + + with pytest.raises(ValueError, match="min_backoff must be a finite value >= 0"): + client.set_min_backoff(invalid_min_backoff) + + +def test_set_min_backoff_exceeds_max_backoff(): + """Test that set_min_backoff raises ValueError if it exceeds max_backoff.""" + client = Client.for_testnet() + client.set_max_backoff(5) + + with pytest.raises(ValueError, match="min_backoff cannot exceed max_backoff"): + client.set_min_backoff(10) + + +# Set max_backoff +def test_set_max_backoff_with_valid_param(): + """Test that set_max_backoff updates default value of _max_backoff.""" + client = Client.for_testnet() + assert client._max_backoff == 8 # default max_backoff = 8 sec + + returned = client.set_max_backoff(20) + assert client._max_backoff == 20 + assert returned is client + + client.close() + + +@pytest.mark.parametrize("invalid_max_backoff", ["1", True, False, object(), {}]) +def test_set_max_backoff_with_invalid_type(invalid_max_backoff): + """Test that set_max_backoff raises TypeError for invalid types.""" + client = Client.for_testnet() + + with pytest.raises( + TypeError, + match=f"max_backoff must be of type int or float, got {type(invalid_max_backoff).__name__}", + ): + client.set_max_backoff(invalid_max_backoff) + + +@pytest.mark.parametrize( + "invalid_max_backoff", [-1, -10, float("inf"), float("-inf"), float("nan")] +) +def test_set_max_backoff_with_invalid_value(invalid_max_backoff): + """Test that set_max_backoff raises ValueError for invalid values.""" + client = Client.for_testnet() + + with pytest.raises(ValueError, match="max_backoff must be a finite value >= 0"): + client.set_max_backoff(invalid_max_backoff) + + +def test_set_max_backoff_less_than_min_backoff(): + """Test that set_max_backoff raises ValueError if it is less than min_backoff.""" + client = Client.for_testnet() + client.set_min_backoff(5) + + with pytest.raises(ValueError, match="max_backoff cannot be less than min_backoff"): + returned = client.set_max_backoff(2) + assert returned is client + + +# Test update_network +def test_update_network_refreshes_nodes_and_returns_self(): + """Test that update_network refreshes network nodes and returns the client.""" + client = Client.for_testnet() + + with patch.object(client.network, "_set_network_nodes") as mock_set_nodes: + returned = client.update_network() + + mock_set_nodes.assert_called_once() + assert returned is client + + client.close() + +def test_warning_when_grpc_deadline_exceeds_request_timeout(): + """Warn when grpc_deadline is greater than request_timeout.""" + client = Client.for_testnet() + client.set_request_timeout(2) + + with pytest.warns(UserWarning): + client.set_grpc_deadline(7) + + +def test_warning_when_request_timeout_less_than_grpc_deadline(): + """Warn when request_timeout is less than grpc_deadline.""" + client = Client.for_testnet() + client.set_grpc_deadline(7) + + with pytest.warns(UserWarning): + client.set_request_timeout(2) + + +def test_generate_transaction_id_requires_operator_set(): + """Test that generate_transaction_id raises ValueError if operator_account_id is not set.""" + client = Client.for_testnet() + client.operator_account_id = None # ensure not set + + with pytest.raises(ValueError, match="Operator account ID must be set"): + client.generate_transaction_id() + + client.close() + + +def test_generate_transaction_id_returns_transaction_id(monkeypatch): + """Test that generate_transaction_id returns a TransactionId object when operator is set.""" + client = Client.for_testnet() + client.operator_account_id = AccountId(0, 0, 1234) + + txid = client.generate_transaction_id() + assert isinstance(txid, TransactionId) + assert txid.account_id == client.operator_account_id + + client.close() + + +def test_get_node_account_ids_returns_correct_list(): + """Test that get_node_account_ids returns a list of node AccountIds.""" + client = Client.for_testnet() + + # Some nodes with _account_id attributes + node1 = _Node(AccountId(0, 0, 101), "127.0.0.1:50211", None) + node2 = _Node(AccountId(0, 0, 102), "127.0.0.1:50212", None) + client.network.nodes = [node1, node2] + + node_ids = client.get_node_account_ids() + assert node_ids == [node1._account_id, node2._account_id] + + client.close() + + +def test_get_node_account_ids_raises_when_no_nodes(): + """Test that get_node_account_ids raises ValueError if no nodes are configured.""" + client = Client.for_testnet() + client.network.nodes = [] + + with pytest.raises(ValueError, match="No nodes available"): + client.get_node_account_ids() + + client.close() diff --git a/tests/unit/conftest.py b/tests/unit/conftest.py index 96fc6674b..edbbfc897 100644 --- a/tests/unit/conftest.py +++ b/tests/unit/conftest.py @@ -25,6 +25,7 @@ FAKE_CERT_HASH = hashlib.sha384(FAKE_CERT_PEM).hexdigest().encode("utf-8") + @pytest.fixture def mock_account_ids(): """Fixture to provide mock account IDs and token IDs.""" @@ -33,33 +34,45 @@ def mock_account_ids(): node_account_id = AccountId(0, 0, 3) token_id_1 = TokenId(1, 1, 1) token_id_2 = TokenId(2, 2, 2) - return account_id_sender, account_id_recipient, node_account_id, token_id_1, token_id_2 + return ( + account_id_sender, + account_id_recipient, + node_account_id, + token_id_1, + token_id_2, + ) + @pytest.fixture def amount(): """Fixture to provide a default amount for fungible tokens.""" return 1000 + @pytest.fixture def metadata(): """Fixture to provide mock metadata for NFTs.""" return [b'a'] + @pytest.fixture def transaction_id(): """Fixture that generates a transaction ID for testing.""" return TransactionId.generate(AccountId(0, 0, 1234)) + @pytest.fixture def private_key(): """Fixture to generate a private key for testing.""" return PrivateKey.generate() + @pytest.fixture def topic_id(): """Fixture to create a topic ID for testing.""" return TopicId(0, 0, 1234) + @pytest.fixture def nft_id(): """Fixture to provide a mock NftId instance.""" @@ -67,21 +80,25 @@ def nft_id(): serial_number = 8 return NftId(token_id=token_id, serial_number=serial_number) + @pytest.fixture def token_id(): """Fixture to provide a mock TokenId instance.""" return TokenId(shard=0, realm=0, num=3) + @pytest.fixture def file_id(): """Fixture to provide a mock FileId instance.""" return FileId(shard=0, realm=0, file=2) + @pytest.fixture def contract_id(): """Fixture to provide a mock ContractId instance.""" return ContractId(shard=0, realm=0, contract=1) + @pytest.fixture def mock_client(): """Fixture to provide a mock client with hardcoded nodes for testing purposes.""" @@ -89,10 +106,10 @@ def mock_client(): node = _Node( AccountId(0, 0, 3), "node1.example.com:50211", - address_book=NodeAddress(cert_hash=FAKE_CERT_HASH, addresses=[]) + address_book=NodeAddress(cert_hash=FAKE_CERT_HASH, addresses=[]), ) node._fetch_server_certificate_pem = lambda: FAKE_CERT_PEM - + nodes = [node] network = Network(nodes=nodes) diff --git a/tests/unit/contract_call_query_test.py b/tests/unit/contract_call_query_test.py index 7d43cf82a..4fd03cb60 100644 --- a/tests/unit/contract_call_query_test.py +++ b/tests/unit/contract_call_query_test.py @@ -17,7 +17,8 @@ from hiero_sdk_python.hapi.services import ( contract_types_pb2, response_header_pb2, - response_pb2, contract_call_local_pb2 + response_pb2, + contract_call_local_pb2, ) from hiero_sdk_python.hapi.services.query_header_pb2 import ResponseType from hiero_sdk_python.response_code import ResponseCode diff --git a/tests/unit/contract_create_transaction_test.py b/tests/unit/contract_create_transaction_test.py index 80088cf7e..578229dbf 100644 --- a/tests/unit/contract_create_transaction_test.py +++ b/tests/unit/contract_create_transaction_test.py @@ -245,6 +245,7 @@ def test_build_scheduled_body(mock_account_ids, contract_params): ) assert schedulable_body.contractCreateInstance.initcode == b"" + def test_build_transaction_body_validation_errors(): """Test that build_transaction_body raises appropriate validation errors.""" # Test missing bytecode_file_id and bytecode diff --git a/tests/unit/contract_execute_transaction_test.py b/tests/unit/contract_execute_transaction_test.py index e5bf93470..f2abcf429 100644 --- a/tests/unit/contract_execute_transaction_test.py +++ b/tests/unit/contract_execute_transaction_test.py @@ -95,6 +95,7 @@ def test_build_transaction_body_with_valid_parameters(mock_account_ids, execute_ == execute_params["function_parameters"] ) + def test_build_scheduled_body_with_valid_parameters(mock_account_ids, execute_params): """Test building a schedulable contract execute transaction body with valid parameters.""" operator_id, _, node_account_id, _, _ = mock_account_ids diff --git a/tests/unit/contract_id_test.py b/tests/unit/contract_id_test.py index b765f808d..67cec8a33 100644 --- a/tests/unit/contract_id_test.py +++ b/tests/unit/contract_id_test.py @@ -2,6 +2,8 @@ Unit tests for the ContractId class. """ +import struct +from unittest.mock import patch import pytest from hiero_sdk_python.contract.contract_id import ContractId @@ -12,9 +14,11 @@ @pytest.fixture def client(mock_client): - mock_client.network.ledger_id = bytes.fromhex("00") # mainnet ledger id + mock_client.network.ledger_id = bytes.fromhex("00") # mainnet ledger id return mock_client + + def test_default_initialization(): """Test ContractId initialization with default values.""" contract_id = ContractId() @@ -55,71 +59,67 @@ def test_str_representation_default(): assert contract_id.checksum is None -def test_from_string_valid(): - """Test creating ContractId from valid string format.""" - contract_id = ContractId.from_string("1.2.3") - - assert contract_id.shard == 1 - assert contract_id.realm == 2 - assert contract_id.contract == 3 - assert contract_id.evm_address is None - assert contract_id.checksum is None - - -def test_from_string_zeros(): - """Test creating ContractId from string with zero values.""" - contract_id = ContractId.from_string("0.0.0") - - assert contract_id.shard == 0 - assert contract_id.realm == 0 - assert contract_id.contract == 0 - assert contract_id.evm_address is None - assert contract_id.checksum is None - -def test_from_string_valid_with_checksum(): +@pytest.mark.parametrize( + "contract_str, expected", + [ + ("1.2.101", (1, 2, 101, None, None)), + ("0.0.0", (0, 0, 0, None, None)), + ("1.2.3-abcde", (1, 2, 3, None, "abcde")), + ( + "1.2.abcdef0123456789abcdef0123456789abcdef01", + (1, 2, 0, bytes.fromhex("abcdef0123456789abcdef0123456789abcdef01"), None), + ), + ], +) +def test_from_string_for_valid_str(contract_str, expected): """Test creating ContractId from valid string format.""" - contract_id = ContractId.from_string("1.2.3-abcde") + shard, realm, contract, evm_address, checksum = expected - assert contract_id.shard == 1 - assert contract_id.realm == 2 - assert contract_id.contract == 3 - assert contract_id.evm_address is None - assert contract_id.checksum == "abcde" + contract_id = ContractId.from_string(contract_str) -def test_from_string_with_evm_address(): - """Test creating ContractId from valid string format with evm_address.""" - contract_id = ContractId.from_string("1.2.abcdef0123456789abcdef0123456789abcdef01") - assert contract_id.shard == 1 - assert contract_id.realm == 2 - assert contract_id.contract == 0 - assert contract_id.evm_address == bytes.fromhex("abcdef0123456789abcdef0123456789abcdef01") - assert contract_id.checksum is None + assert isinstance(contract_id, ContractId) + assert contract_id.shard == shard + assert contract_id.realm == realm + assert contract_id.contract == contract + assert contract_id.evm_address == evm_address + assert contract_id.checksum == checksum @pytest.mark.parametrize( - 'invalid_id', + "invalid_id", [ - '1.2', # Too few parts - '1.2.3.4', # Too many parts - 'a.b.c', # Non-numeric parts - '', # Empty string - '1.a.3', # Partial numeric - 123, - None, - '0.0.-1', - 'abc.def.ghi', - '0.0.1-ad', - '0.0.1-addefgh', - '0.0.1 - abcde', - ' 0.0.100 ', - ' 1.2.abcdef0123456789abcdef0123456789abcdef01 ' - '1.2.001122334455667788990011223344556677' - ] + "1.2", # Too few parts + "1.2.3.4", # Too many parts + "a.b.c", # Non-numeric parts + "", # Empty string + "1.a.3", # Partial numeric + "0.0.-1", + "abc.def.ghi", + "0.0.1-ad", + "0.0.1-addefgh", + "0.0.1 - abcde", + " 0.0.100 ", + " 1.2.abcdef0123456789abcdef0123456789abcdef01 ", + "1.2.0xabcdef0123456789abcdef0123456789abcdef01", + "1.2.001122334455667788990011223344556677", + "1.2.000000000000000000000000000000000000000000", + ], ) def test_from_string_for_invalid_format(invalid_id): """Should raise error when creating ContractId from invalid string input.""" with pytest.raises( - ValueError, match=f"Invalid contract ID string '{invalid_id}'. Expected format 'shard.realm.contract'." + ValueError, + match=f"Invalid contract ID string '{invalid_id}'. Expected format 'shard.realm.contract'.", + ): + ContractId.from_string(invalid_id) + + +@pytest.mark.parametrize("invalid_id", [None, 123, True, object, {}]) +def test_from_string_for_invalid_type(invalid_id): + """Should raise error when creating ContractId from invalid input type.""" + with pytest.raises( + TypeError, + match=f"contract_id_str must be of type str, got {type(invalid_id).__name__}", ): ContractId.from_string(invalid_id) @@ -152,9 +152,29 @@ def test_from_proto(): contract_id = ContractId._from_proto(proto) + assert isinstance(contract_id, ContractId) assert contract_id.shard == 1 assert contract_id.realm == 2 assert contract_id.contract == 3 + assert contract_id.evm_address is None + + +def test_from_proto_with_evm_address(): + """Test creating ContractId from protobuf with EVM address set.""" + evm_address = bytes.fromhex("abcdef0123456789abcdef0123456789abcdef01") + proto = basic_types_pb2.ContractID( + shardNum=1, + realmNum=2, + evm_address=evm_address, + ) + + contract_id = ContractId._from_proto(proto) + + assert isinstance(contract_id, ContractId) + assert contract_id.shard == 1 + assert contract_id.realm == 2 + assert contract_id.contract == 0 + assert contract_id.evm_address == evm_address def test_from_proto_zero_values(): @@ -163,9 +183,11 @@ def test_from_proto_zero_values(): contract_id = ContractId._from_proto(proto) + assert isinstance(contract_id, ContractId) assert contract_id.shard == 0 assert contract_id.realm == 0 assert contract_id.contract == 0 + assert contract_id.evm_address is None def test_roundtrip_proto_conversion(): @@ -271,6 +293,8 @@ def test_evm_address_hash(): # Different EVM addresses should have different hashes assert hash(contract_id1) != hash(contract_id3) + + def test_to_evm_address(): """Test ContractId.to_evm_address() for both explicit and computed EVM addresses.""" # Explicit EVM address @@ -280,40 +304,46 @@ def test_to_evm_address(): # Computed EVM address (no explicit evm_address) contract_id = ContractId(shard=1, realm=2, contract=3) - # [4 bytes shard][8 bytes realm][8 bytes contract], all big-endian - expected_bytes = ( - (0).to_bytes(4, "big") + - (0).to_bytes(8, "big") + - (3).to_bytes(8, "big") + expected_bytes = struct.pack( + ">iqq", contract_id.shard, contract_id.realm, contract_id.contract ) assert contract_id.to_evm_address() == expected_bytes.hex() # Default values contract_id = ContractId() - expected_bytes = ( - (0).to_bytes(4, "big") + - (0).to_bytes(8, "big") + - (0).to_bytes(8, "big") + expected_bytes = struct.pack( + ">iqq", contract_id.shard, contract_id.realm, contract_id.contract ) assert contract_id.to_evm_address() == expected_bytes.hex() -def test_str_representaion_with_checksum(client): + + +def test_str_representation_with_checksum(client): """Should return string representation with checksum""" contract_id = ContractId.from_string("0.0.1") assert contract_id.to_string_with_checksum(client) == "0.0.1-dfkxr" -def test_str_representaion_checksum_with_evm_address(client): + + +def test_str_representation_checksum_with_evm_address(client): """Should raise error on to_string_with_checksum is called when evm_address is set""" contract_id = ContractId.from_string("0.0.abcdef0123456789abcdef0123456789abcdef01") - with pytest.raises(ValueError, match="to_string_with_checksum cannot be applied to ContractId with evm_address"): + with pytest.raises( + ValueError, + match="to_string_with_checksum cannot be applied to ContractId with evm_address", + ): contract_id.to_string_with_checksum(client) + + def test_validate_checksum_success(client): """Should pass checksum validation when checksum is correct.""" contract_id = ContractId.from_string("0.0.1-dfkxr") contract_id.validate_checksum(client) + + def test_validate_checksum_failure(client): """Should raise ValueError if checksum validation fails.""" contract_id = ContractId.from_string("0.0.1-wronx") @@ -321,7 +351,220 @@ def test_validate_checksum_failure(client): with pytest.raises(ValueError, match="Checksum mismatch for 0.0.1"): contract_id.validate_checksum(client) -def test_str_representaion__with_evm_address(): - """Should return str represention with evm_address""" + +def test_str_representation_with_evm_address(): + """Should return str representing with evm_address""" contract_id = ContractId.from_string("0.0.abcdef0123456789abcdef0123456789abcdef01") assert contract_id.__str__() == "0.0.abcdef0123456789abcdef0123456789abcdef01" + +def test_contract_id_repr_numeric(): + """Test __repr__ output for numeric contract ID.""" + contract_id = ContractId(0, 0, 12345) + expected = "ContractId(shard=0, realm=0, contract=12345)" + assert repr(contract_id) == expected + +def test_contract_id_repr_evm_address(): + """Test __repr__ output for EVM-based contract ID.""" + evm_bytes = bytes.fromhex("a" * 40) + contract_id = ContractId(1, 2, evm_address=evm_bytes) + expected = f"ContractId(shard=1, realm=2, evm_address={evm_bytes.hex()})" + assert repr(contract_id) == expected + + +def test_to_string_with_checksum_missing_ledger_id(mock_client): + """Should raise error if client has no ledger ID.""" + mock_client.network.ledger_id = None + contract_id = ContractId.from_string("0.0.1") + + with pytest.raises(ValueError, match="Missing ledger ID"): + contract_id.to_string_with_checksum(mock_client) + + +@pytest.mark.parametrize( + "evm_address_str, expected", + [ + ( + "abcdef0123456789abcdef0123456789abcdef01", + (0, 0, 0, bytes.fromhex("abcdef0123456789abcdef0123456789abcdef01")), + ), + ( + "0xabcdef0123456789abcdef0123456789abcdef01", + (0, 0, 0, bytes.fromhex("abcdef0123456789abcdef0123456789abcdef01")), + ), + ], +) +def test_from_evm_address_valid_params(evm_address_str, expected): + """Test from_evm_address with valid EVM address strings.""" + shard, realm, contract, evm_address = expected + + contract_id = ContractId.from_evm_address(0, 0, evm_address_str) + + assert isinstance(contract_id, ContractId) + assert contract_id.shard == shard + assert contract_id.realm == realm + assert contract_id.contract == contract + assert contract_id.evm_address == evm_address + assert contract_id.checksum is None + + +@pytest.mark.parametrize( + "invalid_address", + [ + "abcdef0123456789abcdef0123456789abcdef", # less than 20 bytes + "abcdef0123456789abcdef0123456789abcdef1010101", # greater than 20 bytes + "abcd-123sjd", # invalid format + "0xZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZ", # invalid hex + ], +) +def test_from_evm_address_invalid_evm_address_str(invalid_address): + """Test from_evm_address raise error for invalid EVM address strings.""" + with pytest.raises(ValueError, match=f"Invalid EVM address: {invalid_address}"): + ContractId.from_evm_address(0, 0, invalid_address) + + +@pytest.mark.parametrize( + "invalid_address", + [None, 1234, True, object, {}], +) +def test_from_evm_address_invalid_evm_address_type(invalid_address): + """Test from_evm_address raise error for non-string EVM address inputs.""" + with pytest.raises( + TypeError, + match=f"evm_address must be of type str, got {type(invalid_address).__name__}", + ): + ContractId.from_evm_address(0, 0, invalid_address) + + +@pytest.mark.parametrize( + "invalid_shard", + [None, "123", True, object, {}], +) +def test_from_evm_address_invalid_shard_type(invalid_shard): + """Test from_evm_address raise error for invalid shard types.""" + with pytest.raises( + TypeError, match=f"shard must be int, got {type(invalid_shard).__name__}" + ): + ContractId.from_evm_address( + invalid_shard, 0, "abcdef0123456789abcdef0123456789abcdef01" + ) + + +def test_from_evm_address_negative_shard_value(): + """Test from_evm_address raise error for negative shard values.""" + with pytest.raises(ValueError, match="shard must be a non-negative integer"): + ContractId.from_evm_address(-1, 0, "abcdef0123456789abcdef0123456789abcdef01") + + +@pytest.mark.parametrize( + "invalid_realm", + [None, "123", True, object, {}], +) +def test_from_evm_address_invalid_realm_type(invalid_realm): + """Test from_evm_address raise error for invalid realm types.""" + with pytest.raises( + TypeError, match=f"realm must be int, got {type(invalid_realm).__name__}" + ): + ContractId.from_evm_address( + 0, invalid_realm, "abcdef0123456789abcdef0123456789abcdef01" + ) + + +def test_from_evm_address_negative_realm_value(): + """Test from_evm_address raise error for negative realm values.""" + with pytest.raises(ValueError, match="realm must be a non-negative integer"): + ContractId.from_evm_address(0, -1, "abcdef0123456789abcdef0123456789abcdef01") + + +def test_from_bytes_success(): + """Should deserialize ContractId correctly from protobuf bytes.""" + original = ContractId(shard=1, realm=2, contract=3) + data = original.to_bytes() + + reconstructed = ContractId.from_bytes(data) + + assert reconstructed == original + + +@pytest.mark.parametrize("invalid_data", [None, "abc", 123, object()]) +def test_from_bytes_invalid_type(invalid_data): + """Should raise TypeError when from_bytes receives non-bytes input.""" + with pytest.raises(TypeError, match="data must be bytes"): + ContractId.from_bytes(invalid_data) + + +def test_from_bytes_invalid_payload(): + """Should raise ValueError when protobuf deserialization fails.""" + with pytest.raises(ValueError, match="Failed to deserialize ContractId from bytes"): + ContractId.from_bytes(b"\x00\x01\x02") + + +def test_populate_contract_num_success(client): + """Should populate contract number using mirror node response.""" + evm_address = bytes.fromhex("abcdef0123456789abcdef0123456789abcdef01") + contract_id = ContractId(shard=0, realm=0, evm_address=evm_address) + + with patch( + "hiero_sdk_python.contract.contract_id.perform_query_to_mirror_node", + return_value={"contract_id": "0.0.123"}, + ): + populated = contract_id.populate_contract_num(client) + + assert populated.contract == 123 + assert populated.evm_address == evm_address + + +def test_populate_contract_num_invalid_response(client): + """Should raise error when populating contract number invalid response.""" + evm_address = bytes.fromhex("abcdef0123456789abcdef0123456789abcdef01") + contract_id = ContractId(shard=0, realm=0, evm_address=evm_address) + + with patch( + "hiero_sdk_python.contract.contract_id.perform_query_to_mirror_node", + return_value={"contract_id": "invalid.account.format"}, + ): + with pytest.raises( + ValueError, + match="Invalid contract_id format received: invalid.account.format", + ): + contract_id.populate_contract_num(client) + + +def test_populate_contract_num_query_fails(client): + """Should raise error when populating contract number query fails.""" + evm_address = bytes.fromhex("abcdef0123456789abcdef0123456789abcdef01") + contract_id = ContractId(shard=0, realm=0, evm_address=evm_address) + + with patch( + "hiero_sdk_python.contract.contract_id.perform_query_to_mirror_node", + side_effect=RuntimeError("mirror node query error"), + ): + with pytest.raises( + RuntimeError, + match="Failed to populate contract num from mirror node for evm_address abcdef0123456789abcdef0123456789abcdef01", + ): + contract_id.populate_contract_num(client) + + +def test_populate_contract_num_without_evm_address(client): + """Should raise error when populate_contract_num is called without evm_address.""" + contract_id = ContractId(shard=0, realm=0, contract=1) + + with pytest.raises( + ValueError, match="evm_address is required to populate the contract number" + ): + contract_id.populate_contract_num(client) + + +def test_populate_contract_num_invalid_mirror_response(client): + """Should raise error if mirror node response is missing contract_id.""" + evm_address = bytes.fromhex("abcdef0123456789abcdef0123456789abcdef01") + contract_id = ContractId(shard=0, realm=0, evm_address=evm_address) + + with patch( + "hiero_sdk_python.contract.contract_id.perform_query_to_mirror_node", + return_value={}, + ): + with pytest.raises( + ValueError, match="Mirror node response missing 'contract_id'" + ): + contract_id.populate_contract_num(client) diff --git a/tests/unit/contract_info_test.py b/tests/unit/contract_info_test.py index 013781fdf..f7bc35829 100644 --- a/tests/unit/contract_info_test.py +++ b/tests/unit/contract_info_test.py @@ -292,6 +292,7 @@ def test_proto_conversion_multiple_token_relationships(multiple_token_relationsh assert second.token_id == TokenId(0, 0, 600) assert second.symbol == "TEST2" + def test_proto_conversion_minimal_fields(): """Test proto conversion with minimal fields""" contract_info = ContractInfo( diff --git a/tests/unit/contract_update_transaction_test.py b/tests/unit/contract_update_transaction_test.py index ab62132b6..05f128b88 100644 --- a/tests/unit/contract_update_transaction_test.py +++ b/tests/unit/contract_update_transaction_test.py @@ -280,6 +280,7 @@ def test_build_transaction_body_with_all_parameters( # Verify other fields are present (the actual protobuf structure may vary) assert transaction_body.contractUpdateInstance.HasField("contractID") + def test_build_scheduled_body_with_all_parameters( update_params, mock_account_ids, transaction_id ): diff --git a/tests/unit/custom_fee_test.py b/tests/unit/custom_fee_test.py index 59639b2c2..9127bf6da 100644 --- a/tests/unit/custom_fee_test.py +++ b/tests/unit/custom_fee_test.py @@ -10,9 +10,9 @@ from hiero_sdk_python.account.account_id import AccountId from hiero_sdk_python.tokens.token_id import TokenId - pytestmark = pytest.mark.unit + def test_custom_fixed_fee_proto_round_trip(): """Ensure CustomFixedFee protobuf serialization and deserialization behave correctly.""" fee = CustomFixedFee( @@ -31,6 +31,7 @@ def test_custom_fixed_fee_proto_round_trip(): assert new_fee.fee_collector_account_id == AccountId(0, 0, 456) assert new_fee.all_collectors_are_exempt is True + def test_custom_fixed_fee_str(): """Test the string representation of CustomFixedFee.""" fee = CustomFixedFee( @@ -108,6 +109,7 @@ def test_custom_fractional_fee_str(): assert "0.0.789" in kv["Fee Collector Account Id"] assert kv["All Collectors Are Exempt"] in ("False", "false") + def test_custom_fractional_fee(): fee = CustomFractionalFee( numerator=1, @@ -120,7 +122,9 @@ def test_custom_fractional_fee(): ) proto = fee._to_proto() # Changed from _to_protobuf - new_fee = CustomFractionalFee._from_proto(proto) # Changed from CustomFee._from_protobuf + new_fee = CustomFractionalFee._from_proto( + proto + ) # Changed from CustomFee._from_protobuf assert isinstance(new_fee, CustomFractionalFee) assert new_fee.numerator == 1 @@ -131,6 +135,7 @@ def test_custom_fractional_fee(): assert new_fee.fee_collector_account_id == AccountId(0, 0, 456) assert new_fee.all_collectors_are_exempt is False + def test_custom_royalty_fee(): fallback_fee = CustomFixedFee( amount=50, @@ -145,7 +150,9 @@ def test_custom_royalty_fee(): ) proto = fee._to_proto() # Changed from _to_protobuf - new_fee = CustomRoyaltyFee._from_proto(proto) # Changed from CustomFee._from_protobuf + new_fee = CustomRoyaltyFee._from_proto( + proto + ) # Changed from CustomFee._from_protobuf assert isinstance(new_fee, CustomRoyaltyFee) assert new_fee.numerator == 5 @@ -156,18 +163,20 @@ def test_custom_royalty_fee(): assert new_fee.fallback_fee.amount == 50 assert new_fee.fallback_fee.denominating_token_id == TokenId(0, 0, 789) + @pytest.mark.parametrize( - "custom_royalty_fee, expected_str", - [ - ( - CustomRoyaltyFee( - numerator=3, - denominator=20, - fallback_fee=None, - fee_collector_account_id=None, - all_collectors_are_exempt=True, - ), - "\n".join([ + "custom_royalty_fee, expected_str", + [ + ( + CustomRoyaltyFee( + numerator=3, + denominator=20, + fallback_fee=None, + fee_collector_account_id=None, + all_collectors_are_exempt=True, + ), + "\n".join( + [ "CustomRoyaltyFee:", " Numerator = 3", " Denominator = 20", @@ -175,20 +184,22 @@ def test_custom_royalty_fee(): " Fallback Fee Denominating Token ID = None", " Fee Collector Account ID = None", " All Collectors Are Exempt = True", - ]) + ] ), - ( - CustomRoyaltyFee( - numerator=7, - denominator=100, - fallback_fee=CustomFixedFee( - amount=30, - denominating_token_id=TokenId(0, 0, 123), - ), - fee_collector_account_id=AccountId(0, 0, 456), - all_collectors_are_exempt=False, + ), + ( + CustomRoyaltyFee( + numerator=7, + denominator=100, + fallback_fee=CustomFixedFee( + amount=30, + denominating_token_id=TokenId(0, 0, 123), ), - "\n".join([ + fee_collector_account_id=AccountId(0, 0, 456), + all_collectors_are_exempt=False, + ), + "\n".join( + [ "CustomRoyaltyFee:", " Numerator = 7", " Denominator = 100", @@ -196,19 +207,24 @@ def test_custom_royalty_fee(): " Fallback Fee Denominating Token ID = 0.0.123", " Fee Collector Account ID = 0.0.456", " All Collectors Are Exempt = False", - ]) - ) - ] + ] + ), + ), + ], ) -def test_custom_royalty_fee_str(custom_royalty_fee: CustomRoyaltyFee, expected_str: str): +def test_custom_royalty_fee_str( + custom_royalty_fee: CustomRoyaltyFee, expected_str: str +): """Test the string representation of CustomRoyaltyFee.""" fee_str = str(custom_royalty_fee) assert fee_str == expected_str + class DummyCustomFee(CustomFee): def _to_proto(self): return "dummy-proto" + def test_custom_fee_init_and_setters(): fee = DummyCustomFee() assert fee.fee_collector_account_id is None @@ -221,6 +237,7 @@ def test_custom_fee_init_and_setters(): fee.set_all_collectors_are_exempt(True) assert fee.all_collectors_are_exempt is True + def test_custom_fee_equality(): fee1 = DummyCustomFee() fee2 = DummyCustomFee() @@ -229,6 +246,7 @@ def test_custom_fee_equality(): fee1.set_all_collectors_are_exempt(True) assert fee1 != fee2 + def test_custom_fee_get_fee_collector_account_id_protobuf(): fee = DummyCustomFee() assert fee._get_fee_collector_account_id_protobuf() is None @@ -238,6 +256,7 @@ def test_custom_fee_get_fee_collector_account_id_protobuf(): fee.set_fee_collector_account_id(mock_account) assert fee._get_fee_collector_account_id_protobuf() == "proto-account" + def test_custom_fee_validate_checksums(): fee = DummyCustomFee() # No account, should not call validate_checksum @@ -249,26 +268,29 @@ def test_custom_fee_validate_checksums(): fee._validate_checksums(client) mock_account.validate_checksum.assert_called_once_with(client) + def test_custom_fee_from_proto_unrecognized(): class FakeProto: def WhichOneof(self, name): return "unknown_fee" + with pytest.raises(ValueError): CustomFee._from_proto(FakeProto()) + def test_set_amount_in_tinybars_deprecation(): """Test that set_amount_in_tinybars shows deprecation warning.""" fee = CustomFixedFee() - + # Test that deprecation warning is raised with warnings.catch_warnings(record=True) as w: warnings.simplefilter("always") fee.set_amount_in_tinybars(100) - + assert len(w) == 1 assert issubclass(w[0].category, DeprecationWarning) assert "set_amount_in_tinybars() is deprecated" in str(w[0].message) - + # Verify the method still works correctly assert fee.amount == 100 assert fee.denominating_token_id is None diff --git a/tests/unit/entity_id_helper_test.py b/tests/unit/entity_id_helper_test.py index ba18f34e5..ac30d5457 100644 --- a/tests/unit/entity_id_helper_test.py +++ b/tests/unit/entity_id_helper_test.py @@ -1,8 +1,13 @@ +from unittest.mock import MagicMock, patch import pytest +import struct +import requests from hiero_sdk_python.utils.entity_id_helper import ( parse_from_string, generate_checksum, + perform_query_to_mirror_node, + to_solidity_address, validate_checksum, format_to_string, format_to_string_with_checksum @@ -117,3 +122,88 @@ def test_parse_and_format_without_checksum(): formatted = format_to_string(shard, realm, num) assert formatted == original + +def test_to_solidity_address_valid(): + shard, realm, num = 0, 0, 1001 + result = to_solidity_address(shard, realm, num) + + # Expect raw packed bytes + expected = struct.pack(">iqq", shard, realm, num).hex() + + assert result == expected + assert len(result) == 40 # exactly 20 bytes + assert result.islower() + +def test_to_solidity_address_zero_values(): + assert to_solidity_address(0, 0, 0) == ("00" * 20) + +def test_to_solidity_address_out_of_range(): + shard, realm, num = 2**31, 0, 0 + with pytest.raises(ValueError, match="shard out of 32-bit range"): + to_solidity_address(shard, realm, num) + +def test_perform_query_to_mirror_node_success(): + """Test successful mirror node response without requests_mock.""" + + mock_response = MagicMock() + mock_response.json.return_value = {"account": "0.0.777"} + mock_response.raise_for_status.return_value = None + + with patch("hiero_sdk_python.utils.entity_id_helper.requests.get", return_value=mock_response): + result = perform_query_to_mirror_node("http://mirror-node/accounts/123") + assert result == {"account": "0.0.777"} + +def test_perform_query_to_mirror_node_failure(): + """Test mirror node failure handling.""" + + with patch("hiero_sdk_python.utils.entity_id_helper.requests.get") as mock_get: + mock_get.side_effect = requests.RequestException("boom") + + with pytest.raises(RuntimeError, match="Unexpected error while querying mirror node:"): + perform_query_to_mirror_node("http://mirror-node/accounts/123") + + +def test_perform_query_to_mirror_node_http_error(): + """ + Test that perform_query_to_mirror_node raises a RuntimeError when requests.get returns an HTTPError. + """ + mock_response = MagicMock() + mock_response.raise_for_status.side_effect = requests.exceptions.HTTPError("HTTP fail") + + with patch("hiero_sdk_python.utils.entity_id_helper.requests.get", return_value=mock_response): + with pytest.raises(RuntimeError, match="Mirror node request failed"): + perform_query_to_mirror_node("http://mirror-node/accounts/123") + + +def test_perform_query_to_mirror_node_connection_error(): + """ + Test that perform_query_to_mirror_node raises a RuntimeError when requests.get raises a ConnectionError. + """ + with patch( + "hiero_sdk_python.utils.entity_id_helper.requests.get", + side_effect=requests.exceptions.ConnectionError("Connection fail") + ): + with pytest.raises(RuntimeError, match="Mirror node request failed"): + perform_query_to_mirror_node("http://mirror-node/accounts/123") + + +def test_perform_query_to_mirror_node_timeout(): + """ + Test that perform_query_to_mirror_node raises a RuntimeError when requests.get raises a Timeout exception. + """ + with patch( + "hiero_sdk_python.utils.entity_id_helper.requests.get", + side_effect=requests.exceptions.Timeout("Timeout") + ): + with pytest.raises(RuntimeError, match="Mirror node request timed out"): + perform_query_to_mirror_node("http://mirror-node/accounts/123") + +def test_perform_query_to_mirror_node_invalid_url_none(): + """Test url must be a non-empty string (None case).""" + with pytest.raises(ValueError, match="url must be a non-empty string"): + perform_query_to_mirror_node(None) + +def test_perform_query_to_mirror_node_invalid_url_empty(): + """Test url must be a non-empty string (empty string case).""" + with pytest.raises(ValueError, match="url must be a non-empty string"): + perform_query_to_mirror_node("") diff --git a/tests/unit/ethereum_transaction_test.py b/tests/unit/ethereum_transaction_test.py index aeb1c5d20..db90dfd9e 100644 --- a/tests/unit/ethereum_transaction_test.py +++ b/tests/unit/ethereum_transaction_test.py @@ -206,6 +206,7 @@ def test_to_proto(mock_client, ethereum_params): assert proto.signedTransactionBytes assert len(proto.signedTransactionBytes) > 0 + def test_build_scheduled_body_raises_exception(): """Test that build_scheduled_body raises ValueError.""" schedule_tx = EthereumTransaction() @@ -213,6 +214,7 @@ def test_build_scheduled_body_raises_exception(): with pytest.raises(ValueError, match="Cannot schedule an EthereumTransaction"): schedule_tx.build_scheduled_body() + def test_ethereum_transaction_can_execute(): """Test that an ethereum transaction can be executed successfully.""" ok_response = transaction_response_pb2.TransactionResponse() diff --git a/tests/unit/evm_address_test.py b/tests/unit/evm_address_test.py index 5f22ec805..82cba0cf5 100644 --- a/tests/unit/evm_address_test.py +++ b/tests/unit/evm_address_test.py @@ -3,6 +3,7 @@ pytestmark = pytest.mark.unit + def test_from_string_without_prefix(): """Test creating EvmAddress from valid 40-character hex string.""" hex_str = "1234567890abcdef1234567890abcdef12345678" @@ -12,6 +13,7 @@ def test_from_string_without_prefix(): assert addr.to_string() == hex_str assert len(addr.address_bytes) == 20 + def test_from_string_with_0x_prefix(): """Test creating EvmAddress from valid hex string with '0x' prefix.""" hex_str = "0x1234567890abcdef1234567890abcdef12345678" @@ -21,16 +23,19 @@ def test_from_string_with_0x_prefix(): assert addr.to_string() == hex_str[2:] assert len(addr.address_bytes) == 20 + def test_from_string_invalid_length(): """Test ValueError for invalid hex string length.""" with pytest.raises(ValueError): EvmAddress.from_string("0x1234") + def test_from_string_invalid_hex_characters(): """Test ValueError for invalid hex characters.""" with pytest.raises(ValueError): EvmAddress.from_string("0xZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZ") + def test_from_bytes_valid(): """Test creating EvmAddress from 20 bytes.""" raw = bytes(range(20)) @@ -40,11 +45,13 @@ def test_from_bytes_valid(): assert addr.address_bytes == raw assert addr.to_string() == raw.hex() + def test_from_bytes_invalid_length(): """Test ValueError for byte length not equal to 20.""" with pytest.raises(ValueError): EvmAddress.from_bytes(bytes(range(10))) + def test_equality(): """Test equality and hash behavior.""" raw = bytes(range(20)) diff --git a/tests/unit/executable_test.py b/tests/unit/executable_test.py index d0be5d424..ad9cd936a 100644 --- a/tests/unit/executable_test.py +++ b/tests/unit/executable_test.py @@ -1,30 +1,45 @@ import pytest import grpc from unittest.mock import patch +from itertools import chain, repeat from hiero_sdk_python.account.account_create_transaction import AccountCreateTransaction from hiero_sdk_python.account.account_id import AccountId from hiero_sdk_python.crypto.private_key import PrivateKey from hiero_sdk_python.exceptions import MaxAttemptsError, PrecheckError +from hiero_sdk_python.executable import ( + _is_transaction_receipt_or_record_request, +) from hiero_sdk_python.hapi.services import ( basic_types_pb2, crypto_get_account_balance_pb2, + query_pb2, response_header_pb2, response_pb2, transaction_get_receipt_pb2, transaction_receipt_pb2, ) -from hiero_sdk_python.hapi.services.transaction_response_pb2 import TransactionResponse as TransactionResponseProto +from hiero_sdk_python.hapi.services.transaction_response_pb2 import ( + TransactionResponse as TransactionResponseProto, +) from hiero_sdk_python.consensus.topic_create_transaction import TopicCreateTransaction from hiero_sdk_python.query.account_balance_query import CryptoGetAccountBalanceQuery +from hiero_sdk_python.query.transaction_get_receipt_query import ( + TransactionGetReceiptQuery, +) +from hiero_sdk_python.query.transaction_record_query import TransactionRecordQuery from hiero_sdk_python.response_code import ResponseCode +from hiero_sdk_python.transaction.transaction_id import TransactionId from tests.unit.mock_server import RealRpcError, mock_hedera_servers pytestmark = pytest.mark.unit + def test_retry_success_before_max_attempts(): """Test that execution succeeds on the last attempt before max_attempts.""" - busy_response = TransactionResponseProto(nodeTransactionPrecheckCode=ResponseCode.BUSY) + busy_response = TransactionResponseProto( + nodeTransactionPrecheckCode=ResponseCode.BUSY + ) ok_response = TransactionResponseProto(nodeTransactionPrecheckCode=ResponseCode.OK) receipt_response = response_pb2.Response( @@ -35,18 +50,19 @@ def test_retry_success_before_max_attempts(): receipt=transaction_receipt_pb2.TransactionReceipt( status=ResponseCode.SUCCESS, accountID=basic_types_pb2.AccountID( - shardNum=0, - realmNum=0, - accountNum=1234 - ) - ) + shardNum=0, realmNum=0, accountNum=1234 + ), + ), ) ) # First server gives 2 BUSY responses then OK on the 3rd try response_sequences = [[busy_response, busy_response, ok_response, receipt_response]] - with mock_hedera_servers(response_sequences) as client, patch('hiero_sdk_python.executable.time.sleep'): + with ( + mock_hedera_servers(response_sequences) as client, + patch("hiero_sdk_python.executable.time.sleep"), + ): # Configure client to allow 3 attempts - should succeed on the last try client.max_attempts = 3 @@ -59,18 +75,25 @@ def test_retry_success_before_max_attempts(): try: receipt = transaction.execute(client) except (Exception, grpc.RpcError) as e: - pytest.fail(f"Transaction execution should not raise an exception, but raised: {e}") + pytest.fail( + f"Transaction execution should not raise an exception, but raised: {e}" + ) assert receipt.status == ResponseCode.SUCCESS def test_retry_failure_after_max_attempts(): """Test that execution fails after max_attempts with retriable errors.""" - busy_response = TransactionResponseProto(nodeTransactionPrecheckCode=ResponseCode.BUSY) + busy_response = TransactionResponseProto( + nodeTransactionPrecheckCode=ResponseCode.BUSY + ) response_sequences = [[busy_response, busy_response]] - with mock_hedera_servers(response_sequences) as client, patch('hiero_sdk_python.executable.time.sleep'): + with ( + mock_hedera_servers(response_sequences) as client, + patch("hiero_sdk_python.executable.time.sleep"), + ): client.max_attempts = 2 transaction = ( @@ -101,7 +124,7 @@ def test_node_switching_after_single_grpc_error(): ), receipt=transaction_receipt_pb2.TransactionReceipt( status=ResponseCode.SUCCESS - ) + ), ) ) @@ -112,7 +135,10 @@ def test_node_switching_after_single_grpc_error(): [error], ] - with mock_hedera_servers(response_sequences) as client, patch('hiero_sdk_python.executable.time.sleep'): + with ( + mock_hedera_servers(response_sequences) as client, + patch("hiero_sdk_python.executable.time.sleep"), + ): transaction = ( AccountCreateTransaction() .set_key_without_alias(PrivateKey.generate().public_key()) @@ -122,9 +148,13 @@ def test_node_switching_after_single_grpc_error(): try: transaction.execute(client) except (Exception, grpc.RpcError) as e: - pytest.fail(f"Transaction execution should not raise an exception, but raised: {e}") + pytest.fail( + f"Transaction execution should not raise an exception, but raised: {e}" + ) # Verify we're now on the second node - assert client.network.current_node._account_id == AccountId(0, 0, 4), "Client should have switched to the second node" + assert transaction.node_account_ids[ + transaction._node_account_ids_index + ] == AccountId(0, 0, 4), "Client should have switched to the second node" def test_node_switching_after_multiple_grpc_errors(): @@ -139,7 +169,7 @@ def test_node_switching_after_multiple_grpc_errors(): ), receipt=transaction_receipt_pb2.TransactionReceipt( status=ResponseCode.SUCCESS - ) + ), ) ) @@ -149,7 +179,10 @@ def test_node_switching_after_multiple_grpc_errors(): [ok_response, receipt_response], ] - with mock_hedera_servers(response_sequences) as client, patch('hiero_sdk_python.executable.time.sleep'): + with ( + mock_hedera_servers(response_sequences) as client, + patch("hiero_sdk_python.executable.time.sleep"), + ): transaction = ( AccountCreateTransaction() .set_key_without_alias(PrivateKey.generate().public_key()) @@ -159,16 +192,22 @@ def test_node_switching_after_multiple_grpc_errors(): try: receipt = transaction.execute(client) except (Exception, grpc.RpcError) as e: - pytest.fail(f"Transaction execution should not raise an exception, but raised: {e}") + pytest.fail( + f"Transaction execution should not raise an exception, but raised: {e}" + ) # Verify we're now on the third node - assert client.network.current_node._account_id == AccountId(0, 0, 5), "Client should have switched to the third node" + assert transaction.node_account_ids[ + transaction._node_account_ids_index + ] == AccountId(0, 0, 5), "Client should have switched to the third node" assert receipt.status == ResponseCode.SUCCESS def test_transaction_with_expired_error_not_retried(): """Test that an expired error is not retried.""" - error_response = TransactionResponseProto(nodeTransactionPrecheckCode=ResponseCode.TRANSACTION_EXPIRED) + error_response = TransactionResponseProto( + nodeTransactionPrecheckCode=ResponseCode.TRANSACTION_EXPIRED + ) ok_response = TransactionResponseProto(nodeTransactionPrecheckCode=ResponseCode.OK) receipt_response = response_pb2.Response( @@ -178,14 +217,15 @@ def test_transaction_with_expired_error_not_retried(): ), receipt=transaction_receipt_pb2.TransactionReceipt( status=ResponseCode.SUCCESS - ) + ), ) ) - response_sequences = [ - [error_response] - ] + response_sequences = [[error_response]] - with mock_hedera_servers(response_sequences) as client, patch('hiero_sdk_python.executable.time.sleep'): + with ( + mock_hedera_servers(response_sequences) as client, + patch("hiero_sdk_python.executable.time.sleep"), + ): transaction = ( AccountCreateTransaction() .set_key_without_alias(PrivateKey.generate().public_key()) @@ -197,9 +237,12 @@ def test_transaction_with_expired_error_not_retried(): assert str(error_response.nodeTransactionPrecheckCode) in str(exc_info.value) + def test_transaction_with_fatal_error_not_retried(): """Test that a fatal error is not retried.""" - error_response = TransactionResponseProto(nodeTransactionPrecheckCode=ResponseCode.INVALID_TRANSACTION_BODY) + error_response = TransactionResponseProto( + nodeTransactionPrecheckCode=ResponseCode.INVALID_TRANSACTION_BODY + ) ok_response = TransactionResponseProto(nodeTransactionPrecheckCode=ResponseCode.OK) receipt_response = response_pb2.Response( @@ -209,14 +252,15 @@ def test_transaction_with_fatal_error_not_retried(): ), receipt=transaction_receipt_pb2.TransactionReceipt( status=ResponseCode.SUCCESS - ) + ), ) ) - response_sequences = [ - [error_response] - ] + response_sequences = [[error_response]] - with mock_hedera_servers(response_sequences) as client, patch('hiero_sdk_python.executable.time.sleep'): + with ( + mock_hedera_servers(response_sequences) as client, + patch("hiero_sdk_python.executable.time.sleep"), + ): transaction = ( AccountCreateTransaction() .set_key_without_alias(PrivateKey.generate().public_key()) @@ -228,9 +272,12 @@ def test_transaction_with_fatal_error_not_retried(): assert str(error_response.nodeTransactionPrecheckCode) in str(exc_info.value) + def test_exponential_backoff_retry(): """Test that the retry mechanism uses exponential backoff.""" - busy_response = TransactionResponseProto(nodeTransactionPrecheckCode=ResponseCode.BUSY) + busy_response = TransactionResponseProto( + nodeTransactionPrecheckCode=ResponseCode.BUSY + ) ok_response = TransactionResponseProto(nodeTransactionPrecheckCode=ResponseCode.OK) receipt_response = response_pb2.Response( @@ -240,15 +287,20 @@ def test_exponential_backoff_retry(): ), receipt=transaction_receipt_pb2.TransactionReceipt( status=ResponseCode.SUCCESS - ) + ), ) ) # Create several BUSY responses to force multiple retries - response_sequences = [[busy_response, busy_response, busy_response, ok_response, receipt_response]] + response_sequences = [ + [busy_response, busy_response, busy_response, ok_response, receipt_response] + ] # Use a mock for time.sleep to capture the delay values - with mock_hedera_servers(response_sequences) as client, patch('hiero_sdk_python.executable.time.sleep') as mock_sleep: + with ( + mock_hedera_servers(response_sequences) as client, + patch("hiero_sdk_python.executable.time.sleep") as mock_sleep, + ): client.max_attempts = 5 transaction = ( @@ -260,21 +312,30 @@ def test_exponential_backoff_retry(): try: transaction.execute(client) except (Exception, grpc.RpcError) as e: - pytest.fail(f"Transaction execution should not raise an exception, but raised: {e}") + pytest.fail( + f"Transaction execution should not raise an exception, but raised: {e}" + ) # Check that time.sleep was called the expected number of times (3 retries) - assert mock_sleep.call_count == 3, f"Expected 3 sleep calls, got {mock_sleep.call_count}" + assert ( + mock_sleep.call_count == 3 + ), f"Expected 3 sleep calls, got {mock_sleep.call_count}" # Verify exponential backoff by checking sleep durations are increasing sleep_args = [call_args[0][0] for call_args in mock_sleep.call_args_list] # Verify each subsequent delay is double than the previous for i in range(1, len(sleep_args)): - assert abs(sleep_args[i] - sleep_args[i-1] * 2) < 0.1, f"Expected doubling delays, but got {sleep_args}" + assert ( + abs(sleep_args[i] - sleep_args[i - 1] * 2) < 0.1 + ), f"Expected doubling delays, but got {sleep_args}" + def test_retriable_error_does_not_switch_node(): """Test that a retriable error does not switch nodes.""" - busy_response = TransactionResponseProto(nodeTransactionPrecheckCode=ResponseCode.BUSY) + busy_response = TransactionResponseProto( + nodeTransactionPrecheckCode=ResponseCode.BUSY + ) ok_response = TransactionResponseProto(nodeTransactionPrecheckCode=ResponseCode.OK) receipt_response = response_pb2.Response( @@ -284,11 +345,14 @@ def test_retriable_error_does_not_switch_node(): ), receipt=transaction_receipt_pb2.TransactionReceipt( status=ResponseCode.SUCCESS - ) + ), ) ) response_sequences = [[busy_response, ok_response, receipt_response]] - with mock_hedera_servers(response_sequences) as client, patch('hiero_sdk_python.executable.time.sleep'): + with ( + mock_hedera_servers(response_sequences) as client, + patch("hiero_sdk_python.executable.time.sleep"), + ): transaction = ( AccountCreateTransaction() .set_key_without_alias(PrivateKey.generate().public_key()) @@ -298,9 +362,14 @@ def test_retriable_error_does_not_switch_node(): try: transaction.execute(client) except (Exception, grpc.RpcError) as e: - pytest.fail(f"Transaction execution should not raise an exception, but raised: {e}") + pytest.fail( + f"Transaction execution should not raise an exception, but raised: {e}" + ) + + assert client.network.current_node._account_id == AccountId( + 0, 0, 3 + ), "Client should not switch node on retriable errors" - assert client.network.current_node._account_id == AccountId(0, 0, 3), "Client should not switch node on retriable errors" def test_topic_create_transaction_retry_on_busy(): """Test that TopicCreateTransaction retries on BUSY response.""" @@ -309,9 +378,7 @@ def test_topic_create_transaction_retry_on_busy(): nodeTransactionPrecheckCode=ResponseCode.BUSY ) - ok_response = TransactionResponseProto( - nodeTransactionPrecheckCode=ResponseCode.OK - ) + ok_response = TransactionResponseProto(nodeTransactionPrecheckCode=ResponseCode.OK) receipt_response = response_pb2.Response( transactionGetReceipt=transaction_get_receipt_pb2.TransactionGetReceiptResponse( @@ -320,12 +387,8 @@ def test_topic_create_transaction_retry_on_busy(): ), receipt=transaction_receipt_pb2.TransactionReceipt( status=ResponseCode.SUCCESS, - topicID=basic_types_pb2.TopicID( - shardNum=0, - realmNum=0, - topicNum=456 - ) - ) + topicID=basic_types_pb2.TopicID(shardNum=0, realmNum=0, topicNum=456), + ), ) ) @@ -333,7 +396,10 @@ def test_topic_create_transaction_retry_on_busy(): [busy_response, ok_response, receipt_response], ] - with mock_hedera_servers(response_sequences) as client, patch('hiero_sdk_python.executable.time.sleep') as mock_sleep: + with ( + mock_hedera_servers(response_sequences) as client, + patch("hiero_sdk_python.executable.time.sleep") as mock_sleep, + ): client.max_attempts = 3 tx = ( @@ -354,7 +420,10 @@ def test_topic_create_transaction_retry_on_busy(): assert mock_sleep.call_count == 1, "Should have retried once" # Verify we didn't switch nodes (BUSY is retriable without node switch) - assert client.network.current_node._account_id == AccountId(0, 0, 3), "Should not have switched nodes on BUSY" + assert client.network.current_node._account_id == AccountId( + 0, 0, 3 + ), "Should not have switched nodes on BUSY" + def test_topic_create_transaction_fails_on_nonretriable_error(): """Test that TopicCreateTransaction fails on non-retriable error.""" @@ -367,7 +436,10 @@ def test_topic_create_transaction_fails_on_nonretriable_error(): [error_response], ] - with mock_hedera_servers(response_sequences) as client, patch('hiero_sdk_python.executable.time.sleep'): + with ( + mock_hedera_servers(response_sequences) as client, + patch("hiero_sdk_python.executable.time.sleep"), + ): tx = ( TopicCreateTransaction() .set_memo("Test with error") @@ -379,6 +451,7 @@ def test_topic_create_transaction_fails_on_nonretriable_error(): ): tx.execute(client) + def test_transaction_node_switching_body_bytes(): """Test that execution switches nodes after receiving a non-retriable error.""" ok_response = TransactionResponseProto(nodeTransactionPrecheckCode=ResponseCode.OK) @@ -391,7 +464,7 @@ def test_transaction_node_switching_body_bytes(): ), receipt=transaction_receipt_pb2.TransactionReceipt( status=ResponseCode.SUCCESS - ) + ), ) ) # First node gives error, second node gives OK, third node gives error @@ -400,7 +473,10 @@ def test_transaction_node_switching_body_bytes(): [ok_response, receipt_response], ] - with mock_hedera_servers(response_sequences) as client, patch('hiero_sdk_python.executable.time.sleep'): + with ( + mock_hedera_servers(response_sequences) as client, + patch("hiero_sdk_python.executable.time.sleep"), + ): # We set the current node to 0 client.network._node_index = 0 client.network.current_node = client.network.nodes[0] @@ -414,50 +490,62 @@ def test_transaction_node_switching_body_bytes(): ) for node in client.network.nodes: - assert transaction._transaction_body_bytes.get(node._account_id) is not None, "Transaction body bytes should be set for all nodes" - sig_map = transaction._signature_map.get(transaction._transaction_body_bytes[node._account_id]) + assert ( + transaction._transaction_body_bytes.get(node._account_id) is not None + ), "Transaction body bytes should be set for all nodes" + sig_map = transaction._signature_map.get( + transaction._transaction_body_bytes[node._account_id] + ) assert sig_map is not None, "Signature map should be set for all nodes" assert len(sig_map.sigPair) == 1, "Signature map should have one signature" - assert sig_map.sigPair[0].pubKeyPrefix == client.operator_private_key.public_key().to_bytes_raw(), "Signature should be for the operator" + assert ( + sig_map.sigPair[0].pubKeyPrefix + == client.operator_private_key.public_key().to_bytes_raw() + ), "Signature should be for the operator" try: transaction.execute(client) except (Exception, grpc.RpcError) as e: - pytest.fail(f"Transaction execution should not raise an exception, but raised: {e}") + pytest.fail( + f"Transaction execution should not raise an exception, but raised: {e}" + ) # Verify we're now on the second node - assert client.network.current_node._account_id == AccountId(0, 0, 4), "Client should have switched to the second node" + assert transaction.node_account_ids[ + transaction._node_account_ids_index + ] == AccountId(0, 0, 4), "Client should have switched to the second node" + def test_query_retry_on_busy(): """ Test query retry behavior when receiving BUSY response. - + This test simulates two scenarios: 1. First node returns BUSY response 2. Second node returns OK response with the balance - + Verifies that the query successfully retries on a different node after receiving BUSY, that the balance is returned correctly and that time.sleep was called once for the retry delay. """ # Create a BUSY response to simulate a node being temporarily unavailable # This response indicates the node cannot process the request at this time busy_response = response_pb2.Response( - cryptogetAccountBalance=crypto_get_account_balance_pb2.CryptoGetAccountBalanceResponse( - header=response_header_pb2.ResponseHeader( - nodeTransactionPrecheckCode=ResponseCode.BUSY - ) + cryptogetAccountBalance=crypto_get_account_balance_pb2.CryptoGetAccountBalanceResponse( + header=response_header_pb2.ResponseHeader( + nodeTransactionPrecheckCode=ResponseCode.BUSY ) ) + ) # Create a successful OK response with a balance of 1 Hbar # This simulates a successful account balance query response ok_response = response_pb2.Response( - cryptogetAccountBalance=crypto_get_account_balance_pb2.CryptoGetAccountBalanceResponse( - header=response_header_pb2.ResponseHeader( - nodeTransactionPrecheckCode=ResponseCode.OK - ), - balance=100000000 # Balance in tinybars - ) + cryptogetAccountBalance=crypto_get_account_balance_pb2.CryptoGetAccountBalanceResponse( + header=response_header_pb2.ResponseHeader( + nodeTransactionPrecheckCode=ResponseCode.OK + ), + balance=100000000, # Balance in tinybars ) + ) # Set up response sequences for multiple nodes: # First node returns BUSY, forcing a retry @@ -467,7 +555,10 @@ def test_query_retry_on_busy(): [ok_response], ] - with mock_hedera_servers(response_sequences) as client, patch('hiero_sdk_python.executable.time.sleep') as mock_sleep: + with ( + mock_hedera_servers(response_sequences) as client, + patch("hiero_sdk_python.executable.time.sleep") as mock_sleep, + ): # We set the current node to the first node so we are sure it will return BUSY response client.network._node_index = 0 client.network.current_node = client.network.nodes[0] @@ -482,4 +573,758 @@ def test_query_retry_on_busy(): assert balance.hbars.to_tinybars() == 100000000 # Verify we switched to the second node - assert client.network.current_node._account_id == AccountId(0, 0, 4), "Client should have switched to the second node" + assert query._node_account_ids_index == 1 + assert query.node_account_ids[query._node_account_ids_index] == AccountId( + 0, 0, 4 + ), "Client should have switched to the second node" + + +# Set max_attempts +def test_set_max_attempts_with_valid_param(): + """Test that set_max_attempts for the transaction and query.""" + # Transaction + transaction = AccountCreateTransaction() + + assert transaction._max_attempts == None + transaction.set_max_attempts(10) + assert transaction._max_attempts == 10 + + # Query + query = CryptoGetAccountBalanceQuery() + + assert query._max_attempts == None + query.set_max_attempts(10) + assert query._max_attempts == 10 + + +@pytest.mark.parametrize("invalid_max_attempts", ["1", 0.2, True, False, object(), {}]) +def test_set_max_attempts_with_invalid_type(invalid_max_attempts): + """Test that set_max_attempts raises TypeError for non-int values.""" + with pytest.raises( + TypeError, + match=f"max_attempts must be of type int, got {type(invalid_max_attempts).__name__}", + ): + transaction = AccountCreateTransaction() + transaction.set_max_attempts(invalid_max_attempts) + + with pytest.raises( + TypeError, + match=f"max_attempts must be of type int, got {type(invalid_max_attempts).__name__}", + ): + query = CryptoGetAccountBalanceQuery() + query.set_max_attempts(invalid_max_attempts) + + +@pytest.mark.parametrize("invalid_max_attempts", [0, -10]) +def test_set_max_attempts_with_invalid_value(invalid_max_attempts): + """Test that set_max_attempts raises ValueError for non-positive values.""" + with pytest.raises(ValueError, match="max_attempts must be greater than 0"): + transaction = AccountCreateTransaction() + transaction.set_max_attempts(invalid_max_attempts) + + with pytest.raises(ValueError, match="max_attempts must be greater than 0"): + query = CryptoGetAccountBalanceQuery() + query.set_max_attempts(invalid_max_attempts) + + +# Set grpc_deadline +def test_set_grpc_deadline_with_valid_param(): + """Test that set_grpc_deadline updates default value of _grpc_deadline.""" + # Transaction + transaction = AccountCreateTransaction() + assert transaction._grpc_deadline == None + + returned = transaction.set_grpc_deadline(20) + assert transaction._grpc_deadline == 20 + assert returned is transaction + + # Query + query = CryptoGetAccountBalanceQuery() + assert query._grpc_deadline == None + + returned = query.set_grpc_deadline(20) + assert query._grpc_deadline == 20 + assert returned is query + + +@pytest.mark.parametrize("invalid_grpc_deadline", ["1", True, False, object(), {}]) +def test_set_grpc_deadline_with_invalid_type(invalid_grpc_deadline): + """Test that set_grpc_deadline raises TypeError for invalid types.""" + with pytest.raises( + TypeError, + match=f"grpc_deadline must be of type Union\\[int, float\\], got {type(invalid_grpc_deadline).__name__}", + ): + # Transaction + transaction = AccountCreateTransaction() + transaction.set_grpc_deadline(invalid_grpc_deadline) + + with pytest.raises( + TypeError, + match=f"grpc_deadline must be of type Union\\[int, float\\], got {type(invalid_grpc_deadline).__name__}", + ): + query = CryptoGetAccountBalanceQuery() + query.set_grpc_deadline(invalid_grpc_deadline) + + +@pytest.mark.parametrize( + "invalid_grpc_deadline", [0, -10, 0.0, -2.3, float("inf"), float("nan")] +) +def test_set_grpc_deadline_with_invalid_value(invalid_grpc_deadline): + """Test that set_grpc_deadline raises ValueError for non-positive values.""" + with pytest.raises( + ValueError, match="grpc_deadline must be a finite value greater than 0" + ): + # Transaction + transaction = AccountCreateTransaction() + transaction.set_grpc_deadline(invalid_grpc_deadline) + + with pytest.raises( + ValueError, match="grpc_deadline must be a finite value greater than 0" + ): + # Query + query = CryptoGetAccountBalanceQuery() + query.set_grpc_deadline(invalid_grpc_deadline) + + +def test_warning_when_request_timeout_less_than_grpc_deadline(): + """Warn when request_timeout is less than grpc_deadline.""" + tx = AccountCreateTransaction() + tx.set_grpc_deadline(10) + + with pytest.warns(UserWarning): + tx.set_request_timeout(5) + + +# Set request_timeout +def test_set_request_timeout_with_valid_param(): + """Test that set_request_timeout updates default value of _request_timeout.""" + # Transaction + transaction = AccountCreateTransaction() + assert transaction._request_timeout == None + + returned = transaction.set_request_timeout(200) + assert transaction._request_timeout == 200 + assert returned is transaction + + # Query + query = CryptoGetAccountBalanceQuery() + assert query._request_timeout == None + + returned = query.set_request_timeout(200) + assert query._request_timeout == 200 + assert returned is query + + +@pytest.mark.parametrize("invalid_request_timeout", ["1", True, False, object(), {}]) +def test_set_request_timeout_with_invalid_type(invalid_request_timeout): + """Test that set_request_timeout raises TypeError for invalid types.""" + with pytest.raises( + TypeError, + match=f"request_timeout must be of type Union\\[int, float\\], got {type(invalid_request_timeout).__name__}", + ): + # Transaction + transaction = AccountCreateTransaction() + transaction.set_request_timeout(invalid_request_timeout) + + with pytest.raises( + TypeError, + match=f"request_timeout must be of type Union\\[int, float\\], got {type(invalid_request_timeout).__name__}", + ): + # Query + query = CryptoGetAccountBalanceQuery() + query.set_request_timeout(invalid_request_timeout) + + +@pytest.mark.parametrize( + "invalid_request_timeout", [0, -10, 0.0, -2.3, float("inf"), float("nan")] +) +def test_set_request_timeout_with_invalid_value(invalid_request_timeout): + """Test that set_request_timeout raises ValueError for non-positive values.""" + with pytest.raises( + ValueError, match="request_timeout must be a finite value greater than 0" + ): + transaction = AccountCreateTransaction() + transaction.set_request_timeout(invalid_request_timeout) + + with pytest.raises( + ValueError, match="request_timeout must be a finite value greater than 0" + ): + query = CryptoGetAccountBalanceQuery() + query.set_request_timeout(invalid_request_timeout) + + +def test_warning_when_grpc_deadline_exceeds_request_timeout(): + """Warn when grpc_deadline is greater than request_timeout.""" + tx = AccountCreateTransaction() + + tx.set_request_timeout(5) + + with pytest.warns(UserWarning): + tx.set_grpc_deadline(10) + + +# Test is transaction_recepit_or_record +def test_is_transaction_receipt_or_record_request(): + """Detect receipt and record query requests correctly.""" + receipt_query = query_pb2.Query( + transactionGetReceipt=transaction_get_receipt_pb2.TransactionGetReceiptQuery() + ) + + assert _is_transaction_receipt_or_record_request(receipt_query) is True + assert _is_transaction_receipt_or_record_request(object()) is False + + +# Set min_backoff +def test_set_min_backoff_with_valid_param(): + """Test that set_min_backoff updates default value of _min_backoff.""" + # Transaction + transaction = AccountCreateTransaction() + assert transaction._min_backoff == None + + returned = transaction.set_min_backoff(2) + assert transaction._min_backoff == 2 + assert returned is transaction + + # Query + query = CryptoGetAccountBalanceQuery() + assert query._min_backoff == None + + returned = query.set_min_backoff(2) + assert query._min_backoff == 2 + assert returned is query + + +@pytest.mark.parametrize("invalid_min_backoff", ["1", True, False, object(), {}]) +def test_set_min_backoff_with_invalid_type(invalid_min_backoff): + """Test that set_min_backoff raises TypeError for invalid types.""" + with pytest.raises( + TypeError, + match=f"min_backoff must be of type int or float, got {type(invalid_min_backoff).__name__}", + ): + # Transaction + transaction = AccountCreateTransaction() + transaction.set_min_backoff(invalid_min_backoff) + + with pytest.raises( + TypeError, + match=f"min_backoff must be of type int or float, got {type(invalid_min_backoff).__name__}", + ): + query = CryptoGetAccountBalanceQuery() + query.set_min_backoff(invalid_min_backoff) + + +@pytest.mark.parametrize( + "invalid_min_backoff", [-1, -10, float("inf"), float("-inf"), float("nan")] +) +def test_set_min_backoff_with_invalid_value(invalid_min_backoff): + """Test that set_min_backoff raises ValueError for invalid values.""" + with pytest.raises(ValueError, match="min_backoff must be a finite value >= 0"): + transaction = AccountCreateTransaction() + transaction.set_min_backoff(invalid_min_backoff) + + with pytest.raises(ValueError, match="min_backoff must be a finite value >= 0"): + query = CryptoGetAccountBalanceQuery() + query.set_min_backoff(invalid_min_backoff) + + +def test_set_min_backoff_exceeds_max_backoff(): + """Test that set_min_backoff raises ValueError if it exceeds max_backoff.""" + with pytest.raises(ValueError, match="min_backoff cannot exceed max_backoff"): + transaction = AccountCreateTransaction() + transaction.set_max_backoff(5) + + transaction.set_min_backoff(10) + + with pytest.raises(ValueError, match="min_backoff cannot exceed max_backoff"): + query = CryptoGetAccountBalanceQuery() + query.set_max_backoff(5) + + query.set_min_backoff(10) + + +# Set max_backoff +def test_set_max_backoff_with_valid_param(): + """Test that set_max_backoff updates default value of _max_backoff.""" + # Transaction + transaction = AccountCreateTransaction() + assert transaction._max_backoff == None + + returned = transaction.set_max_backoff(2) + assert transaction._max_backoff == 2 + assert returned is transaction + + # Query + query = CryptoGetAccountBalanceQuery() + assert query._max_backoff == None + + returned = query.set_max_backoff(2) + assert query._max_backoff == 2 + assert returned is query + + +@pytest.mark.parametrize("invalid_max_backoff", ["1", True, False, object(), {}]) +def test_set_max_backoff_with_invalid_type(invalid_max_backoff): + """Test that set_max_backoff raises TypeError for invalid types.""" + with pytest.raises( + TypeError, + match=f"max_backoff must be of type int or float, got {type(invalid_max_backoff).__name__}", + ): + transaction = AccountCreateTransaction() + transaction.set_max_backoff(invalid_max_backoff) + + with pytest.raises( + TypeError, + match=f"max_backoff must be of type int or float, got {type(invalid_max_backoff).__name__}", + ): + query = CryptoGetAccountBalanceQuery() + query.set_max_backoff(invalid_max_backoff) + + +@pytest.mark.parametrize( + "invalid_max_backoff", [-1, -10, float("inf"), float("-inf"), float("nan")] +) +def test_set_max_backoff_with_invalid_value(invalid_max_backoff): + """Test that set_max_backoff raises ValueError for invalid values.""" + with pytest.raises(ValueError, match="max_backoff must be a finite value >= 0"): + transaction = AccountCreateTransaction() + transaction.set_max_backoff(invalid_max_backoff) + + with pytest.raises(ValueError, match="max_backoff must be a finite value >= 0"): + query = CryptoGetAccountBalanceQuery() + query.set_max_backoff(invalid_max_backoff) + + +def test_set_max_backoff_less_than_min_backoff(): + """Test that set_max_backoff raises ValueError if it is less than min_backoff.""" + with pytest.raises(ValueError, match="max_backoff cannot be less than min_backoff"): + transaction = AccountCreateTransaction() + transaction.set_min_backoff(5) + + transaction.set_max_backoff(2) + + with pytest.raises(ValueError, match="max_backoff cannot be less than min_backoff"): + query = CryptoGetAccountBalanceQuery() + query.set_min_backoff(5) + + query.set_max_backoff(2) + + +def test_backoff_is_capped_by_max_backoff(): + """Backoff delay must not exceed max_backoff.""" + tx = AccountCreateTransaction() + tx.set_min_backoff(2) + tx.set_max_backoff(5) + + # attempt=0 min * 2 = 4 + assert tx._calculate_backoff(0) == 4 + # attempt=1 min * 4 = 8 : capped to 5 + assert tx._calculate_backoff(1) == 5 + + +# Resolve config +def test_execution_config_inherits_from_client(mock_client): + """Test that resolve_execution_config inherits config from client if not set.""" + mock_client.max_attempts = 7 + mock_client._min_backoff = 1 + mock_client._max_backoff = 8 + mock_client._grpc_deadline = 9 + mock_client._request_timeout = 20 + + tx = AccountCreateTransaction() + + tx._resolve_execution_config(mock_client, None) + + assert tx._max_attempts == 7 + assert tx._min_backoff == 1 + assert tx._max_backoff == 8 + assert tx._grpc_deadline == 9 + assert tx._request_timeout == 20 + + +def test_executable_overrides_client_config(mock_client): + """Test the set value override the set config property.""" + mock_client.max_attempts = 10 + + tx = AccountCreateTransaction().set_max_attempts(3) + tx._resolve_execution_config(mock_client, None) + + assert tx._max_attempts == 3 + + +def test_no_healthy_nodes_raises(mock_client): + """Test that execution fails if no healthy nodes are available.""" + mock_client.network._healthy_nodes = [] + + tx = ( + AccountCreateTransaction() + .set_key_without_alias(PrivateKey.generate().public_key()) + .set_initial_balance(1) + ) + + with pytest.raises(RuntimeError, match="No healthy nodes available"): + tx.execute(mock_client) + + +def test_set_node_account_ids_overrides_client_nodes(mock_client): + """Explicit node_account_ids should override client network.""" + node = AccountId(0, 0, 999) + + tx = AccountCreateTransaction().set_node_account_id(node) + tx._resolve_execution_config(mock_client, None) + + assert tx.node_account_ids == [node] + + +def test_parameter_timeout_overrides_client_default(mock_client): + """Explicit timeout pass on the executable should override the client default timeout.""" + tx = AccountCreateTransaction() + tx._resolve_execution_config(mock_client, 2) + + assert tx._request_timeout == 2 + + +def test_set_timeout_overrides_parameter_timeout(mock_client): + """Explicit timeout set on the tx should override the pass timeout.""" + tx = AccountCreateTransaction() + tx.set_request_timeout(5) + tx._resolve_execution_config(mock_client, 2) + + assert tx._request_timeout == 5 + + +# Reuest timeout +def test_request_timeout_exceeded_stops_execution(): + """Test that execution stops when request_timeout is exceeded.""" + busy_response = TransactionResponseProto( + nodeTransactionPrecheckCode=ResponseCode.BUSY + ) + + response_sequences = [[busy_response]] + + def fake_time(): + yield 0 # start + yield 5 # attempt 1 + while True: + yield 11 # timeout exceeded + + time_iter = fake_time() + + with ( + mock_hedera_servers(response_sequences) as client, + patch("hiero_sdk_python.executable.time.sleep"), + patch( + "hiero_sdk_python.executable.time.monotonic", side_effect=lambda: next(time_iter) + ), + patch("hiero_sdk_python.node._Node.is_healthy", return_value=True), + patch( + "hiero_sdk_python.executable._execute_method", + return_value=busy_response, + ), + ): + client._request_timeout = 10 + client.max_attempts = 5 + + tx = ( + AccountCreateTransaction() + .set_key_without_alias(PrivateKey.generate().public_key()) + .set_initial_balance(1) + ) + + with pytest.raises(MaxAttemptsError): + tx.execute(client) + + +@pytest.mark.parametrize( + "error", + [ + RealRpcError(grpc.StatusCode.DEADLINE_EXCEEDED, "timeout"), + RealRpcError(grpc.StatusCode.UNAVAILABLE, "unavailable"), + RealRpcError(grpc.StatusCode.RESOURCE_EXHAUSTED, "busy"), + RealRpcError( + grpc.StatusCode.INTERNAL, "received rst stream" + ), # internal with rst stream + Exception("non grpc exception"), # non grpc exception + ], +) +def test_should_exponential_returns_true(error): + """Test should exponential returns true for listed grpc error and non grpc error.""" + tx = AccountCreateTransaction() + assert tx._should_retry_exponentially(error) is True + + +@pytest.mark.parametrize( + "error", + [ + RealRpcError(grpc.StatusCode.INVALID_ARGUMENT, "invalid args"), + RealRpcError( + grpc.StatusCode.INTERNAL, "internal" + ), # internal with no rst stream + ], +) +def test_should_exponential_returns_false(error): + """Test should exponential returns false for non-listed grpc error.""" + tx = AccountCreateTransaction() + assert tx._should_retry_exponentially(error) is False + + +@pytest.mark.parametrize( + "error", + [ + RealRpcError(grpc.StatusCode.DEADLINE_EXCEEDED, "timeout"), + RealRpcError(grpc.StatusCode.UNAVAILABLE, "unavailable"), + RealRpcError(grpc.StatusCode.RESOURCE_EXHAUSTED, "busy"), + ], +) +def test_should_exponential_error_mark_node_unhealty_and_advance(error): + """Exponential gRPC retry errors advance the node without sleep-based backoff.""" + ok_response = TransactionResponseProto(nodeTransactionPrecheckCode=ResponseCode.OK) + + receipt_response = response_pb2.Response( + transactionGetReceipt=transaction_get_receipt_pb2.TransactionGetReceiptResponse( + header=response_header_pb2.ResponseHeader( + nodeTransactionPrecheckCode=ResponseCode.OK + ), + receipt=transaction_receipt_pb2.TransactionReceipt( + status=ResponseCode.SUCCESS + ), + ) + ) + + response_sequences = [ + [error], + [ok_response, receipt_response], + ] + + with ( + mock_hedera_servers(response_sequences) as client, + patch("hiero_sdk_python.executable.time.sleep") as mock_sleep, + ): + tx = ( + AccountCreateTransaction() + .set_key_without_alias(PrivateKey.generate().public_key()) + .set_initial_balance(1) + ) + + receipt = tx.execute(client) + + assert receipt.status == ResponseCode.SUCCESS + # No delay_for_attempt backoff call, Node is mark unhealthy and advance + assert mock_sleep.call_count == 0 + # Node must have changed + assert tx._node_account_ids_index == 1 + + +def test_rst_stream_error_marks_node_unhealthy_and_advances_without_backoff(): + """INTERNAL RST_STREAM errors trigger exponential retry by advancing the node without sleep-based backoff.""" + error = RealRpcError(grpc.StatusCode.INTERNAL, "received rst stream") + + ok_response = TransactionResponseProto(nodeTransactionPrecheckCode=ResponseCode.OK) + + receipt_response = response_pb2.Response( + transactionGetReceipt=transaction_get_receipt_pb2.TransactionGetReceiptResponse( + header=response_header_pb2.ResponseHeader( + nodeTransactionPrecheckCode=ResponseCode.OK + ), + receipt=transaction_receipt_pb2.TransactionReceipt( + status=ResponseCode.SUCCESS + ), + ) + ) + + response_sequences = [ + [error], + [ok_response, receipt_response], + ] + + with ( + mock_hedera_servers(response_sequences) as client, + patch("hiero_sdk_python.executable.time.sleep") as mock_sleep, + ): + tx = ( + AccountCreateTransaction() + .set_key_without_alias(PrivateKey.generate().public_key()) + .set_initial_balance(1) + ) + + receipt = tx.execute(client) + + # Retry succeeds + assert receipt.status == ResponseCode.SUCCESS + # RST_STREAM exponential retry does not use delay-based backoff + assert mock_sleep.call_count == 0 + # Node must advance after marking the first node unhealthy + assert tx._node_account_ids_index == 1 + + +@pytest.mark.parametrize( + "error", + [ + RealRpcError(grpc.StatusCode.ALREADY_EXISTS, "already exists"), + RealRpcError(grpc.StatusCode.ABORTED, "aborted"), + RealRpcError(grpc.StatusCode.UNAUTHENTICATED, "unauthenticated"), + ], +) +def test_non_exponential_grpc_error_raises_exception(error): + """Errors that are not retried exponentially should raise error immediately""" + response_sequences = [[error]] + + with ( + mock_hedera_servers(response_sequences) as client, + pytest.raises(grpc.RpcError), + ): + tx = ( + AccountCreateTransaction() + .set_key_without_alias(PrivateKey.generate().public_key()) + .set_initial_balance(1) + ) + + tx.execute(client) + + +def test_execution_skips_unhealthy_nodes_and_advances(): + """Execution should skip unhealthy nodes and advance to the next healthy one.""" + busy_response = TransactionResponseProto( + nodeTransactionPrecheckCode=ResponseCode.BUSY + ) + ok_response = TransactionResponseProto(nodeTransactionPrecheckCode=ResponseCode.OK) + + receipt_response = response_pb2.Response( + transactionGetReceipt=transaction_get_receipt_pb2.TransactionGetReceiptResponse( + header=response_header_pb2.ResponseHeader( + nodeTransactionPrecheckCode=ResponseCode.OK + ), + receipt=transaction_receipt_pb2.TransactionReceipt( + status=ResponseCode.SUCCESS + ), + ) + ) + + response_sequences = [ + [busy_response], # first node (unhealthy) + [ok_response, receipt_response], # second node (healthy) + ] + + with ( + mock_hedera_servers(response_sequences) as client, + patch( + "hiero_sdk_python.node._Node.is_healthy", + side_effect=chain([False, True], repeat(True)), + ), + ): + tx = ( + AccountCreateTransaction() + .set_key_without_alias(PrivateKey.generate().public_key()) + .set_initial_balance(1) + ) + + receipt = tx.execute(client) + + assert receipt.status == ResponseCode.SUCCESS + # Ensure the node index advanced past the unhealthy node + assert tx._node_account_ids_index == 1 + + +def test_execution_raises_if_all_nodes_unhealthy(mock_client): + """Execution should raise RuntimeError if all nodes are unhealthy.""" + tx = ( + AccountCreateTransaction() + .set_key_without_alias(PrivateKey.generate().public_key()) + .set_initial_balance(1) + ) + + # Patch node health to always return False + with patch("hiero_sdk_python.node._Node.is_healthy", side_effect=repeat(False)): + with pytest.raises(RuntimeError, match="All nodes are unhealthy"): + tx.execute(mock_client) + + +@pytest.mark.parametrize( + "tx", + [ + TransactionRecordQuery().set_transaction_id( + TransactionId.from_string("0.0.3@1769674705.770340600") + ), + TransactionGetReceiptQuery().set_transaction_id( + TransactionId.from_string("0.0.3@1769674705.770340600") + ), + ], +) +def test_unhealthy_node_receipt_request_triggers_delay_and_no_node_change( + tx, mock_client +): + """Unhealthy node with transaction receipt/record request calls _delay_for_attempt but does not advance node.""" + initial_index = tx._node_account_ids_index + + with ( + patch("hiero_sdk_python.node._Node.is_healthy", return_value=False), + patch("hiero_sdk_python.executable._delay_for_attempt") as mock_delay, + ): + + with pytest.raises(Exception): + tx.execute(mock_client) + + # _delay_for_attempt called + assert mock_delay.call_count > 0 + # Node index did NOT change + assert tx._node_account_ids_index == initial_index + + +def test_retry_invalid_node_account_updates_network(): + """ + Verify that a RETRY execution state with INVALID_NODE_ACCOUNT triggers + node backoff, network refresh, and retry delay before succeeding. + """ + error_response = TransactionResponseProto( + nodeTransactionPrecheckCode=ResponseCode.INVALID_NODE_ACCOUNT + ) + + ok_response = TransactionResponseProto(nodeTransactionPrecheckCode=ResponseCode.OK) + + receipt_response = response_pb2.Response( + transactionGetReceipt=transaction_get_receipt_pb2.TransactionGetReceiptResponse( + header=response_header_pb2.ResponseHeader( + nodeTransactionPrecheckCode=ResponseCode.OK + ), + receipt=transaction_receipt_pb2.TransactionReceipt( + status=ResponseCode.SUCCESS + ), + ) + ) + + response_sequences = [ + [error_response], # first node → INVALID_NODE_ACCOUNT + [ok_response], # second node → success + ] + + with ( + mock_hedera_servers(response_sequences) as client, + patch("hiero_sdk_python.node._Node.is_healthy", return_value=True), + patch( + "hiero_sdk_python.client.network.Network._increase_backoff", + ) as mock_increase_backoff, + patch( + "hiero_sdk_python.client.client.Client.update_network", + ) as mock_update_network, + patch( + "hiero_sdk_python.executable._delay_for_attempt", + ) as mock_delay, + patch( + "hiero_sdk_python.transaction.transaction_response.TransactionResponse.get_receipt", + return_value=receipt_response, + ), + ): + tx = ( + AccountCreateTransaction() + .set_key_without_alias(PrivateKey.generate().public_key()) + .set_initial_balance(1) + ) + + tx.execute(client) + + # Node index must advance after INVALID_NODE_ACCOUNT + assert tx._node_account_ids_index == 1 + + # Recovery actions + mock_increase_backoff.assert_called_once() + mock_update_network.assert_called_once() + mock_delay.assert_called_once() diff --git a/tests/unit/file_append_transaction_test.py b/tests/unit/file_append_transaction_test.py index 0638a58e3..1c7a06ae1 100644 --- a/tests/unit/file_append_transaction_test.py +++ b/tests/unit/file_append_transaction_test.py @@ -13,12 +13,11 @@ from hiero_sdk_python.transaction.transaction_id import TransactionId - def test_constructor_with_parameters(): """Test creating a file append transaction with constructor parameters.""" file_id = FileId(0, 0, 12345) contents = b"Test append content" - + file_tx = FileAppendTransaction( file_id=file_id, contents=contents, @@ -32,18 +31,19 @@ def test_constructor_with_parameters(): assert file_tx.chunk_size == 2048 assert file_tx._default_transaction_fee == Hbar(5).to_tinybars() + def test_set_methods(): """Test the set methods of FileAppendTransaction.""" file_id = FileId(0, 0, 12345) contents = b"Test content" - + file_tx = FileAppendTransaction() test_cases = [ - ('set_file_id', file_id, 'file_id'), - ('set_contents', contents, 'contents'), - ('set_max_chunks', 15, 'max_chunks'), - ('set_chunk_size', 1024, 'chunk_size'), + ("set_file_id", file_id, "file_id"), + ("set_contents", contents, "contents"), + ("set_max_chunks", 15, "max_chunks"), + ("set_chunk_size", 1024, "chunk_size"), ] for method_name, value, attr_name in test_cases: @@ -51,21 +51,22 @@ def test_set_methods(): assert tx_after_set is file_tx assert getattr(file_tx, attr_name) == value + def test_get_required_chunks(): """Test calculating required chunks for different content sizes.""" # Empty content file_tx = FileAppendTransaction() assert file_tx.get_required_chunks() == 1 - + # Small content (fits in one chunk) file_tx.set_contents(b"Small content") assert file_tx.get_required_chunks() == 1 - + # Large content (requires multiple chunks) large_content = b"Large content " * 100 # ~1400 bytes file_tx.set_contents(large_content) assert file_tx.get_required_chunks() == 1 # Default chunk size is 4096 - + # Set smaller chunk size to test multiple chunks file_tx.set_chunk_size(100) assert file_tx.get_required_chunks() > 1 @@ -75,46 +76,45 @@ def test_freeze_with_generates_transaction_ids(): """Test that freeze_with generates transaction IDs for all chunks.""" content = b"Large content that needs multiple chunks" file_tx = FileAppendTransaction( - file_id=FileId(0, 0, 12345), - contents=content, - chunk_size=10 + file_id=FileId(0, 0, 12345), contents=content, chunk_size=10 ) - + # Mock client and transaction_id mock_client = MagicMock() mock_transaction_id = TransactionId( - account_id=MagicMock(), - valid_start=Timestamp(0, 1) + account_id=MagicMock(), valid_start=Timestamp(0, 1) ) file_tx.transaction_id = mock_transaction_id - + file_tx.freeze_with(mock_client) - + # Should have generated transaction IDs for all chunks expected_chunks = file_tx.get_required_chunks() assert len(file_tx._transaction_ids) == expected_chunks - + # First transaction ID should be the original assert file_tx._transaction_ids[0] == mock_transaction_id - + # Subsequent transaction IDs should have incremented timestamps for i in range(1, len(file_tx._transaction_ids)): expected_nanos = mock_transaction_id.valid_start.nanos + i assert file_tx._transaction_ids[i].valid_start.nanos == expected_nanos + def test_validate_chunking(): """Test chunking validation.""" large_content = b"Large content " * 1000 # ~14000 bytes file_tx = FileAppendTransaction( - contents=large_content, - chunk_size=100, - max_chunks=5 + contents=large_content, chunk_size=100, max_chunks=5 ) - + # Should raise error when required chunks > max_chunks - with pytest.raises(ValueError, match="Cannot execute FileAppendTransaction with more than 5 chunks"): + with pytest.raises( + ValueError, match="Cannot execute FileAppendTransaction with more than 5 chunks" + ): file_tx._validate_chunking() + def test_multi_chunk_execution(): """Test that multi-chunk execution works correctly.""" # Create content that requires multiple chunks @@ -122,24 +122,25 @@ def test_multi_chunk_execution(): file_tx = FileAppendTransaction( file_id=FileId(0, 0, 12345), contents=content, - chunk_size=6 # 6 bytes per chunk = 3 chunks + chunk_size=6, # 6 bytes per chunk = 3 chunks ) - + # Mock client and responses mock_client = MagicMock() mock_receipt = MagicMock() mock_receipt.status = ResponseCode.SUCCESS - + # Mock the execute method to return our mock receipt - with patch.object(Transaction, 'execute', return_value=mock_receipt): + with patch.object(Transaction, "execute", return_value=mock_receipt): receipt = file_tx.execute(mock_client) - + # Should return the first receipt assert receipt == mock_receipt - + # Should have called execute 3 times (once per chunk) assert Transaction.execute.call_count == 1 + def test_build_transaction_body_missing_file_id(): """Test build_transaction_body raises error when file ID is missing.""" file_tx = FileAppendTransaction() @@ -147,16 +148,13 @@ def test_build_transaction_body_missing_file_id(): with pytest.raises(ValueError, match="Missing required FileID"): file_tx.build_transaction_body() + def test_build_scheduled_body(): """Test building a schedulable file append transaction body.""" file_id = FileId(0, 0, 12345) contents = b"Test schedulable content" - file_tx = FileAppendTransaction( - file_id=file_id, - contents=contents, - chunk_size=100 - ) + file_tx = FileAppendTransaction(file_id=file_id, contents=contents, chunk_size=100) # Build the scheduled body schedulable_body = file_tx.build_scheduled_body() diff --git a/tests/unit/file_create_transaction_test.py b/tests/unit/file_create_transaction_test.py index 31c01b751..395d5800f 100644 --- a/tests/unit/file_create_transaction_test.py +++ b/tests/unit/file_create_transaction_test.py @@ -8,9 +8,16 @@ from hiero_sdk_python.response_code import ResponseCode from hiero_sdk_python.timestamp import Timestamp from hiero_sdk_python.hapi.services import basic_types_pb2, response_pb2 -from hiero_sdk_python.hapi.services.transaction_response_pb2 import TransactionResponse as TransactionResponseProto -from hiero_sdk_python.hapi.services.transaction_receipt_pb2 import TransactionReceipt as TransactionReceiptProto -from hiero_sdk_python.hapi.services import transaction_get_receipt_pb2, response_header_pb2 +from hiero_sdk_python.hapi.services.transaction_response_pb2 import ( + TransactionResponse as TransactionResponseProto, +) +from hiero_sdk_python.hapi.services.transaction_receipt_pb2 import ( + TransactionReceipt as TransactionReceiptProto, +) +from hiero_sdk_python.hapi.services import ( + transaction_get_receipt_pb2, + response_header_pb2, +) from hiero_sdk_python.hapi.services import file_create_pb2 from hiero_sdk_python.hapi.services.schedulable_transaction_body_pb2 import ( SchedulableTransactionBody, @@ -20,6 +27,7 @@ pytestmark = pytest.mark.unit + def test_constructor_with_parameters(): """Test creating a file create transaction with constructor parameters.""" private_key = PrivateKey.generate() @@ -29,9 +37,7 @@ def test_constructor_with_parameters(): file_memo = "Test memo" file_tx = FileCreateTransaction( - keys=key_list, - contents=contents, - file_memo=file_memo + keys=key_list, contents=contents, file_memo=file_memo ) assert file_tx.keys == key_list @@ -40,25 +46,30 @@ def test_constructor_with_parameters(): assert file_tx.expiration_time is not None # Should have default expiration assert file_tx._default_transaction_fee == Hbar(5).to_tinybars() + def test_constructor_default_expiration_time(): """Test that constructor sets expiration time to exactly time.time() + DEFAULT_EXPIRY_SECONDS.""" fixed_time = 1640995200 # Fixed timestamp: Jan 1, 2022 - - with patch('time.time', return_value=fixed_time): + + with patch("time.time", return_value=fixed_time): file_tx = FileCreateTransaction() - - expected_expiration = Timestamp(fixed_time + FileCreateTransaction.DEFAULT_EXPIRY_SECONDS, 0) + + expected_expiration = Timestamp( + fixed_time + FileCreateTransaction.DEFAULT_EXPIRY_SECONDS, 0 + ) assert file_tx.expiration_time == expected_expiration + def test_constructor_with_custom_expiration_time(): """Test that constructor uses provided expiration time instead of default.""" custom_expiration = Timestamp(1704067200, 0) # Jan 1, 2024 - - with patch('time.time', return_value=1640995200): + + with patch("time.time", return_value=1640995200): file_tx = FileCreateTransaction(expiration_time=custom_expiration) - + assert file_tx.expiration_time == custom_expiration + def test_build_transaction_body(mock_account_ids): """Test building a file create transaction body with valid values.""" operator_id, _, node_account_id, _, _ = mock_account_ids @@ -66,17 +77,15 @@ def test_build_transaction_body(mock_account_ids): private_key = PrivateKey.generate() public_key = private_key.public_key() key_list = [public_key] - + file_tx = FileCreateTransaction( - keys=key_list, - contents=b"Test content", - file_memo="Test memo" + keys=key_list, contents=b"Test content", file_memo="Test memo" ) # Set operator and node account IDs needed for building transaction body file_tx.operator_account_id = operator_id file_tx.node_account_id = node_account_id - + transaction_body = file_tx.build_transaction_body() expected_keys = basic_types_pb2.KeyList(keys=[key._to_proto() for key in key_list]) @@ -84,6 +93,7 @@ def test_build_transaction_body(mock_account_ids): assert transaction_body.fileCreate.contents == b"Test content" assert transaction_body.fileCreate.memo == "Test memo" + def test_build_scheduled_body(mock_account_ids): """Test building a schedulable file create transaction body with valid values.""" operator_id, _, node_account_id, _, _ = mock_account_ids @@ -95,7 +105,7 @@ def test_build_scheduled_body(mock_account_ids): file_tx = FileCreateTransaction( keys=key_list, contents=b"Test schedulable content", - file_memo="Test schedulable memo" + file_memo="Test schedulable memo", ) # Set operator and node account IDs needed for building transaction body @@ -113,6 +123,7 @@ def test_build_scheduled_body(mock_account_ids): assert schedulable_body.fileCreate.contents == b"Test schedulable content" assert schedulable_body.fileCreate.memo == "Test schedulable memo" + def test_set_methods(): """Test the set methods of FileCreateTransaction.""" private_key = PrivateKey.generate() @@ -125,10 +136,10 @@ def test_set_methods(): file_tx = FileCreateTransaction() test_cases = [ - ('set_keys', key_list, 'keys'), - ('set_contents', contents, 'contents'), - ('set_file_memo', file_memo, 'file_memo'), - ('set_expiration_time', expiration_time, 'expiration_time') + ("set_keys", key_list, "keys"), + ("set_contents", contents, "contents"), + ("set_file_memo", file_memo, "file_memo"), + ("set_expiration_time", expiration_time, "expiration_time"), ] for method_name, value, attr_name in test_cases: @@ -136,6 +147,7 @@ def test_set_methods(): assert tx_after_set is file_tx assert getattr(file_tx, attr_name) == value + def test_set_keys_variations(): """Test setting keys with different input types.""" file_tx = FileCreateTransaction() @@ -160,28 +172,29 @@ def test_set_keys_variations(): file_tx.set_keys(key_list) assert file_tx.keys is key_list + def test_set_methods_require_not_frozen(mock_client): """Test that set methods raise exception when transaction is frozen.""" private_key = PrivateKey.generate() public_key = private_key.public_key() - - file_tx = FileCreateTransaction( - keys=[public_key], - contents=b"test content" - ) + + file_tx = FileCreateTransaction(keys=[public_key], contents=b"test content") file_tx.freeze_with(mock_client) test_cases = [ - ('set_keys', [public_key]), - ('set_contents', b"new content"), - ('set_file_memo', "new memo"), - ('set_expiration_time', Timestamp(1704067200, 0)) + ("set_keys", [public_key]), + ("set_contents", b"new content"), + ("set_file_memo", "new memo"), + ("set_expiration_time", Timestamp(1704067200, 0)), ] for method_name, value in test_cases: - with pytest.raises(Exception, match="Transaction is immutable; it has been frozen"): + with pytest.raises( + Exception, match="Transaction is immutable; it has been frozen" + ): getattr(file_tx, method_name)(value) + def test_file_create_transaction_can_execute(): """Test that a file create transaction can be executed successfully.""" # Create test transaction responses @@ -191,11 +204,7 @@ def test_file_create_transaction_can_execute(): # Create a mock receipt for successful file creation mock_receipt_proto = TransactionReceiptProto( status=ResponseCode.SUCCESS, - fileID=basic_types_pb2.FileID( - shardNum=0, - realmNum=0, - fileNum=5678 - ) + fileID=basic_types_pb2.FileID(shardNum=0, realmNum=0, fileNum=5678), ) # Create a response for the receipt query @@ -204,7 +213,7 @@ def test_file_create_transaction_can_execute(): header=response_header_pb2.ResponseHeader( nodeTransactionPrecheckCode=ResponseCode.OK ), - receipt=mock_receipt_proto + receipt=mock_receipt_proto, ) ) @@ -214,7 +223,7 @@ def test_file_create_transaction_can_execute(): with mock_hedera_servers(response_sequences) as client: test_key = PrivateKey.generate().public_key() - + transaction = ( FileCreateTransaction() .set_keys(test_key) @@ -224,9 +233,12 @@ def test_file_create_transaction_can_execute(): receipt = transaction.execute(client) - assert receipt.status == ResponseCode.SUCCESS, "Transaction should have succeeded" + assert ( + receipt.status == ResponseCode.SUCCESS + ), "Transaction should have succeeded" assert receipt.file_id.file == 5678 + def test_file_create_transaction_from_proto(): """Test that a file create transaction can be created from a protobuf object.""" private_key = PrivateKey.generate() @@ -237,22 +249,24 @@ def test_file_create_transaction_from_proto(): proto = file_create_pb2.FileCreateTransactionBody( keys=basic_types_pb2.KeyList(keys=[key._to_proto() for key in key_list]), contents=b"Proto test content", - memo="Proto test memo" + memo="Proto test memo", ) - + # Deserialize the protobuf object from_proto = FileCreateTransaction()._from_proto(proto) - + # Verify deserialized transaction matches original data assert from_proto.contents == b"Proto test content" assert from_proto.file_memo == "Proto test memo" assert len(from_proto.keys) == 1 assert isinstance(from_proto.keys[0], PublicKey) - + # Deserialize empty protobuf - from_proto = FileCreateTransaction()._from_proto(file_create_pb2.FileCreateTransactionBody()) - + from_proto = FileCreateTransaction()._from_proto( + file_create_pb2.FileCreateTransactionBody() + ) + # Verify empty protobuf deserializes to empty/default values assert from_proto.contents == b"" assert from_proto.file_memo == "" - assert from_proto.keys == [] \ No newline at end of file + assert from_proto.keys == [] diff --git a/tests/unit/file_delete_transaction_test.py b/tests/unit/file_delete_transaction_test.py index ce7702901..fb6bdb663 100644 --- a/tests/unit/file_delete_transaction_test.py +++ b/tests/unit/file_delete_transaction_test.py @@ -116,6 +116,7 @@ def test_build_scheduled_body(mock_account_ids, file_id): # Verify fields in the schedulable body assert schedulable_body.fileDelete.fileID == file_id._to_proto() + def test_get_method(): """Test retrieving the gRPC method for the transaction.""" delete_tx = FileDeleteTransaction() diff --git a/tests/unit/file_id_test.py b/tests/unit/file_id_test.py index d8c1120eb..0e962e625 100644 --- a/tests/unit/file_id_test.py +++ b/tests/unit/file_id_test.py @@ -4,198 +4,226 @@ pytestmark = pytest.mark.unit + @pytest.fixture def client(mock_client): - mock_client.network.ledger_id = bytes.fromhex("00") # mainnet ledger id + mock_client.network.ledger_id = bytes.fromhex("00") # mainnet ledger id return mock_client + def test_default_initialization(): """Test FileId initialization with default values.""" file_id = FileId() - + assert file_id.shard == 0 assert file_id.realm == 0 assert file_id.file == 0 assert file_id.checksum == None + def test_custom_initialization(): """Test FileId initialization with custom values.""" file_id = FileId(shard=1, realm=2, file=3) - + assert file_id.shard == 1 assert file_id.realm == 2 assert file_id.file == 3 assert file_id.checksum == None + def test_str_representation(): """Test string representation of FileId.""" file_id = FileId(shard=1, realm=2, file=3) - + assert str(file_id) == "1.2.3" + def test_str_representation_default(): """Test string representation of FileId with default values.""" file_id = FileId() - + assert str(file_id) == "0.0.0" + +def test_repr_representation(): + """Test repr representation of FileId.""" + file_id = FileId(0, 0, 150) + + assert repr(file_id) == "FileId(shard=0, realm=0, file=150)" + + +def test_repr_representation_with_checksum(): + """Test repr representation of FileId with checksum.""" + file_id = FileId.from_string("0.0.1-dfkxr") + + assert repr(file_id) == "FileId(shard=0, realm=0, file=1)" + + def test_from_string_valid(): """Test creating FileId from valid string format.""" file_id = FileId.from_string("1.2.3") - + assert file_id.shard == 1 assert file_id.realm == 2 assert file_id.file == 3 assert file_id.checksum == None + def test_from_string_with_spaces(): """Test creating FileId from string with leading/trailing spaces.""" file_id = FileId.from_string("1.2.3") - + assert file_id.shard == 1 assert file_id.realm == 2 assert file_id.file == 3 assert file_id.checksum == None + def test_from_string_zeros(): """Test creating FileId from string with zero values.""" file_id = FileId.from_string("0.0.0") - + assert file_id.shard == 0 assert file_id.realm == 0 assert file_id.file == 0 assert file_id.checksum == None + def test_from_string_large_numbers(): """Test creating FileId from string with large numbers.""" file_id = FileId.from_string("999.888.777") - + assert file_id.shard == 999 assert file_id.realm == 888 assert file_id.file == 777 assert file_id.checksum == None + def test_from_string_with_checksum(): """Test creating FileId from string with leading/trailing spaces.""" file_id = FileId.from_string("1.2.3-abcde") - + assert file_id.shard == 1 assert file_id.realm == 2 assert file_id.file == 3 assert file_id.checksum == "abcde" + @pytest.mark.parametrize( - 'invalid_id', + "invalid_id", [ - '1.2', # Too few parts - '1.2.3.4', # Too many parts - 'a.b.c', # Non-numeric parts - '', # Empty string - '1.a.3', # Partial numeric + "1.2", # Too few parts + "1.2.3.4", # Too many parts + "a.b.c", # Non-numeric parts + "", # Empty string + "1.a.3", # Partial numeric 123, None, - '0.0.-1', - 'abc.def.ghi', - '0.0.1-ad', - '0.0.1-addefgh', - '0.0.1 - abcde', - ' 0.0.100 ' - ] + "0.0.-1", + "abc.def.ghi", + "0.0.1-ad", + "0.0.1-addefgh", + "0.0.1 - abcde", + " 0.0.100 ", + ], ) def test_from_string_for_invalid_format(invalid_id): """Should raise error when creating FileId from invalid string input.""" with pytest.raises( - ValueError, match=f"Invalid file ID string '{invalid_id}'. Expected format 'shard.realm.file'." + ValueError, + match=f"Invalid file ID string '{invalid_id}'. Expected format 'shard.realm.file'.", ): FileId.from_string(invalid_id) + def test_to_proto(): """Test converting FileId to protobuf format.""" file_id = FileId(shard=1, realm=2, file=3) proto = file_id._to_proto() - + assert isinstance(proto, basic_types_pb2.FileID) assert proto.shardNum == 1 assert proto.realmNum == 2 assert proto.fileNum == 3 + def test_to_proto_default_values(): """Test converting FileId with default values to protobuf format.""" file_id = FileId() proto = file_id._to_proto() - + assert isinstance(proto, basic_types_pb2.FileID) assert proto.shardNum == 0 assert proto.realmNum == 0 assert proto.fileNum == 0 + def test_from_proto(): """Test creating FileId from protobuf format.""" - proto = basic_types_pb2.FileID( - shardNum=1, - realmNum=2, - fileNum=3 - ) - + proto = basic_types_pb2.FileID(shardNum=1, realmNum=2, fileNum=3) + file_id = FileId._from_proto(proto) - + assert file_id.shard == 1 assert file_id.realm == 2 assert file_id.file == 3 + def test_from_proto_zero_values(): """Test creating FileId from protobuf format with zero values.""" - proto = basic_types_pb2.FileID( - shardNum=0, - realmNum=0, - fileNum=0 - ) - + proto = basic_types_pb2.FileID(shardNum=0, realmNum=0, fileNum=0) + file_id = FileId._from_proto(proto) - + assert file_id.shard == 0 assert file_id.realm == 0 assert file_id.file == 0 + def test_roundtrip_proto_conversion(): """Test that converting to proto and back preserves values.""" original = FileId(shard=5, realm=10, file=15) proto = original._to_proto() reconstructed = FileId._from_proto(proto) - + assert original.shard == reconstructed.shard assert original.realm == reconstructed.realm assert original.file == reconstructed.file + def test_roundtrip_string_conversion(): """Test that converting to string and back preserves values.""" original = FileId(shard=7, realm=14, file=21) string_repr = str(original) reconstructed = FileId.from_string(string_repr) - + assert original.shard == reconstructed.shard assert original.realm == reconstructed.realm assert original.file == reconstructed.file + def test_equality(): """Test FileId equality comparison.""" file_id1 = FileId(shard=1, realm=2, file=3) file_id2 = FileId(shard=1, realm=2, file=3) file_id3 = FileId(shard=1, realm=2, file=4) - + assert file_id1 == file_id2 - assert file_id1 != file_id3 + assert file_id1 != file_id3 + def test_str_representaion_with_checksum(client): """Should return string with checksum when ledger id is provided.""" file_id = FileId.from_string("0.0.1") assert file_id.to_string_with_checksum(client) == "0.0.1-dfkxr" + def test_validate_checksum_success(client): """Should pass checksum validation when checksum is correct.""" file_id = FileId.from_string("0.0.1-dfkxr") file_id.validate_checksum(client) + def test_validate_checksum_failure(client): """Should raise ValueError if checksum validation fails.""" file_id = FileId.from_string("0.0.1-wronx") diff --git a/tests/unit/file_info_query_test.py b/tests/unit/file_info_query_test.py index c7da82840..e8602c34a 100644 --- a/tests/unit/file_info_query_test.py +++ b/tests/unit/file_info_query_test.py @@ -8,7 +8,7 @@ from hiero_sdk_python.hapi.services import ( response_pb2, response_header_pb2, - file_get_info_pb2 + file_get_info_pb2, ) from hiero_sdk_python.hapi.services.basic_types_pb2 import KeyList as KeyListProto from hiero_sdk_python.hapi.services.timestamp_pb2 import Timestamp as TimestampProto @@ -18,41 +18,47 @@ pytestmark = pytest.mark.unit + def test_constructor(): """Test initialization of FileInfoQuery.""" file_id = FileId(0, 0, 2) - + query = FileInfoQuery() assert query.file_id is None - + query = FileInfoQuery(file_id) assert query.file_id == file_id + def test_execute_fails_with_missing_file_id(mock_client): """Test request creation with missing File ID.""" query = FileInfoQuery() - - with pytest.raises(ValueError, match="File ID must be set before making the request."): + + with pytest.raises( + ValueError, match="File ID must be set before making the request." + ): query.execute(mock_client) + def test_get_method(): """Test retrieving the gRPC method for the query.""" query = FileInfoQuery() - + mock_channel = Mock() mock_file_stub = Mock() mock_channel.file = mock_file_stub - + method = query._get_method(mock_channel) - + assert method.transaction is None assert method.query == mock_file_stub.getFileInfo + def test_file_info_query_execute(private_key): """Test basic functionality of FileInfoQuery with mock server.""" file_id = FileId(0, 0, 2) expiration_time = TimestampProto(seconds=1718745600) - + # Create file info response with test data file_info_response = file_get_info_pb2.FileGetInfoResponse.FileInfo( fileID=file_id._to_proto(), @@ -60,21 +66,21 @@ def test_file_info_query_execute(private_key): expirationTime=expiration_time, deleted=False, keys=KeyListProto(keys=[private_key.public_key()._to_proto()]), - memo="test memo" + memo="test memo", ) response_sequences = get_file_info_responses(file_info_response) - + with mock_hedera_servers(response_sequences) as client: query = FileInfoQuery(file_id) - + # Get cost and verify it matches expected value cost = query.get_cost(client) assert cost.to_tinybars() == 2 - + # Execute query and get result result = query.execute(client) - + assert result.file_id == file_id assert result.size == 1000 assert result.expiration_time == Timestamp._from_protobuf(expiration_time) @@ -82,34 +88,37 @@ def test_file_info_query_execute(private_key): assert result.keys[0].to_bytes_raw() == private_key.public_key().to_bytes_raw() assert result.file_memo == "test memo" + def get_file_info_responses(file_info_response): - return [[ - response_pb2.Response( - fileGetInfo=file_get_info_pb2.FileGetInfoResponse( - header=response_header_pb2.ResponseHeader( - nodeTransactionPrecheckCode=ResponseCode.OK, - responseType=ResponseType.COST_ANSWER, - cost=2 + return [ + [ + response_pb2.Response( + fileGetInfo=file_get_info_pb2.FileGetInfoResponse( + header=response_header_pb2.ResponseHeader( + nodeTransactionPrecheckCode=ResponseCode.OK, + responseType=ResponseType.COST_ANSWER, + cost=2, + ) + ) + ), + response_pb2.Response( + fileGetInfo=file_get_info_pb2.FileGetInfoResponse( + header=response_header_pb2.ResponseHeader( + nodeTransactionPrecheckCode=ResponseCode.OK, + responseType=ResponseType.COST_ANSWER, + cost=2, + ) ) - ) - ), - response_pb2.Response( - fileGetInfo=file_get_info_pb2.FileGetInfoResponse( - header=response_header_pb2.ResponseHeader( - nodeTransactionPrecheckCode=ResponseCode.OK, - responseType=ResponseType.COST_ANSWER, - cost=2 + ), + response_pb2.Response( + fileGetInfo=file_get_info_pb2.FileGetInfoResponse( + header=response_header_pb2.ResponseHeader( + nodeTransactionPrecheckCode=ResponseCode.OK, + responseType=ResponseType.ANSWER_ONLY, + cost=2, + ), + fileInfo=file_info_response, ) - ) - ), - response_pb2.Response( - fileGetInfo=file_get_info_pb2.FileGetInfoResponse( - header=response_header_pb2.ResponseHeader( - nodeTransactionPrecheckCode=ResponseCode.OK, - responseType=ResponseType.ANSWER_ONLY, - cost=2 - ), - fileInfo=file_info_response - ) - ) - ]] + ), + ] + ] diff --git a/tests/unit/file_update_transaction_test.py b/tests/unit/file_update_transaction_test.py index 8f00e6d56..e0cb78952 100644 --- a/tests/unit/file_update_transaction_test.py +++ b/tests/unit/file_update_transaction_test.py @@ -200,6 +200,7 @@ def test_build_transaction_body_with_optional_fields(mock_account_ids, file_id): # When file_memo is None, the memo field should not be set in the protobuf assert not transaction_body.fileUpdate.HasField("memo") + def test_build_scheduled_body(mock_account_ids, file_id): """Test building a schedulable file update transaction body.""" operator_id, _, node_account_id, _, _ = mock_account_ids @@ -359,4 +360,4 @@ def test_encode_contents_string(): # Test None handling encoded = file_tx._encode_contents(None) - assert encoded is None \ No newline at end of file + assert encoded is None diff --git a/tests/unit/get_receipt_query_test.py b/tests/unit/get_receipt_query_test.py index a0c27be74..79b4d3d35 100644 --- a/tests/unit/get_receipt_query_test.py +++ b/tests/unit/get_receipt_query_test.py @@ -12,13 +12,16 @@ transaction_get_receipt_pb2, transaction_receipt_pb2, ) -from hiero_sdk_python.query.transaction_get_receipt_query import TransactionGetReceiptQuery +from hiero_sdk_python.query.transaction_get_receipt_query import ( + TransactionGetReceiptQuery, +) from hiero_sdk_python.response_code import ResponseCode from tests.unit.mock_server import mock_hedera_servers pytestmark = pytest.mark.unit + # This test uses fixture transaction_id as parameter def test_transaction_get_receipt_query(transaction_id): """Test basic functionality of TransactionGetReceiptQuery with a mocked client.""" @@ -219,9 +222,7 @@ def test_transaction_get_receipt_query_children_empty_when_not_requested( response_sequences = [[response]] with mock_hedera_servers(response_sequences) as client: - query = ( - TransactionGetReceiptQuery().set_transaction_id(transaction_id) - ) + query = TransactionGetReceiptQuery().set_transaction_id(transaction_id) result = query.execute(client) @@ -229,12 +230,18 @@ def test_transaction_get_receipt_query_children_empty_when_not_requested( assert result.children == [] -def test_transaction_get_receipt_query_include_children_with_no_children(transaction_id): - """ Testing that nothing explode if no children ar passed""" +def test_transaction_get_receipt_query_include_children_with_no_children( + transaction_id, +): + """Testing that nothing explode if no children ar passed""" response = response_pb2.Response( transactionGetReceipt=transaction_get_receipt_pb2.TransactionGetReceiptResponse( - header=response_header_pb2.ResponseHeader(nodeTransactionPrecheckCode=ResponseCode.OK), - receipt=transaction_receipt_pb2.TransactionReceipt(status=ResponseCode.SUCCESS), + header=response_header_pb2.ResponseHeader( + nodeTransactionPrecheckCode=ResponseCode.OK + ), + receipt=transaction_receipt_pb2.TransactionReceipt( + status=ResponseCode.SUCCESS + ), # no child_transaction_receipts ) ) @@ -293,7 +300,10 @@ def test_transaction_get_receipt_query_returns_duplicate_receipts_when_requested assert result.status == ResponseCode.SUCCESS assert len(result.duplicates) == 2 for idx, duplicate in enumerate(result.duplicates): - assert duplicate._to_proto() == response.transactionGetReceipt.duplicateTransactionReceipts[idx] + assert ( + duplicate._to_proto() + == response.transactionGetReceipt.duplicateTransactionReceipts[idx] + ) def test_transaction_get_receipt_query_returns_empty_duplicate_receipts_when_requested( @@ -358,10 +368,7 @@ def test_transaction_get_receipt_query_returns_empty_duplicate_receipts_when_not response_sequences = [[response]] with mock_hedera_servers(response_sequences) as client: - query = ( - TransactionGetReceiptQuery() - .set_transaction_id(transaction_id) - ) + query = TransactionGetReceiptQuery().set_transaction_id(transaction_id) result = query.execute(client) diff --git a/tests/unit/hbar_allowance_test.py b/tests/unit/hbar_allowance_test.py index 9a19617c3..756d3f28a 100644 --- a/tests/unit/hbar_allowance_test.py +++ b/tests/unit/hbar_allowance_test.py @@ -287,7 +287,9 @@ def test_from_proto_field_helper(): # Test with empty field (should not happen in this proto, but testing the method) proto_empty = CryptoAllowanceProto(amount=1000) - result = HbarAllowance._from_proto_field(proto_empty, "owner", AccountId._from_proto) + result = HbarAllowance._from_proto_field( + proto_empty, "owner", AccountId._from_proto + ) assert result is None diff --git a/tests/unit/hbar_test.py b/tests/unit/hbar_test.py index 2589aae2d..67553893d 100644 --- a/tests/unit/hbar_test.py +++ b/tests/unit/hbar_test.py @@ -7,6 +7,7 @@ pytestmark = pytest.mark.unit + def test_constructor(): """Test creation with int, float, and Decimal values in hbars.""" hbar1 = Hbar(50) @@ -21,30 +22,30 @@ def test_constructor(): assert hbar3.to_tinybars() == 50_000_000 assert hbar3.to_hbars() == 0.5 -@pytest.mark.parametrize( - 'invalid_amount', - ['1', True, False, {}, object] -) + +@pytest.mark.parametrize("invalid_amount", ["1", True, False, {}, object]) def test_constructor_invalid_amount_type(invalid_amount): """Test creation with invalid type raise errors.""" - with pytest.raises(TypeError, match="Amount must be of type int, float, or Decimal"): + with pytest.raises( + TypeError, match="Amount must be of type int, float, or Decimal" + ): Hbar(invalid_amount) -@pytest.mark.parametrize( - 'invalid_amount', - [float('inf'), float('nan')] -) + +@pytest.mark.parametrize("invalid_amount", [float("inf"), float("nan")]) def test_constructor_non_finite_amount_value(invalid_amount): """Test creation raise errors for non finite amount.""" with pytest.raises(ValueError, match="Hbar amount must be finite"): Hbar(invalid_amount) + def test_constructor_with_tinybar_unit(): """Test creation with unit set to HbarUnit.TINYBAR.""" hbar1 = Hbar(50, unit=HbarUnit.TINYBAR) assert hbar1.to_tinybars() == 50 assert hbar1.to_hbars() == 0.0000005 + def test_constructor_with_unit(): """Test creation directly in tinybars.""" hbar1 = Hbar(50, unit=HbarUnit.TINYBAR) @@ -59,7 +60,7 @@ def test_constructor_with_unit(): assert hbar3.to_tinybars() == 5_000_000 assert hbar3.to_hbars() == 0.05 - hbar4 = Hbar(50, unit=HbarUnit.HBAR) # Default + hbar4 = Hbar(50, unit=HbarUnit.HBAR) # Default assert hbar4.to_tinybars() == 5_000_000_000 assert hbar4.to_hbars() == 50 @@ -75,15 +76,20 @@ def test_constructor_with_unit(): assert hbar7.to_tinybars() == 5_000_000_000_000_000_000 assert hbar7.to_hbars() == 50_000_000_000 + def test_constructor_fractional_tinybar(): """Test creation with fractional tinybars.""" with pytest.raises(ValueError, match="Fractional tinybar value not allowed"): Hbar(0.1, unit=HbarUnit.TINYBAR) + def test_constructor_invalid_type(): """Test creation of Hbar with invalid type.""" - with pytest.raises(TypeError, match="Amount must be of type int, float, or Decimal"): - Hbar('10') + with pytest.raises( + TypeError, match="Amount must be of type int, float, or Decimal" + ): + Hbar("10") + def test_from_string(): """Test creation of HBAR from valid string""" @@ -95,26 +101,29 @@ def test_from_string(): assert Hbar.from_string("+3").to_tinybars() == 300_000_000 assert Hbar.from_string("-3").to_tinybars() == -300_000_000 + @pytest.mark.parametrize( - 'invalid_str', + "invalid_str", [ - '1 ', - '-1 ', - '+1 ', - '1.151 ', - '-1.151 ', - '+1.151 ', - '1.', - '1.151.', - '.1', - '1.151 uℏ', - '1.151 h', - 'abcd' - ] + "1 ", + "-1 ", + "+1 ", + "1.151 ", + "-1.151 ", + "+1.151 ", + "1.", + "1.151.", + ".1", + "1.151 uℏ", + "1.151 h", + "abcd", + ], ) def test_from_string_invalid(invalid_str): """Test creation of HBAR from invalid string""" - with pytest.raises(ValueError, match=re.escape(f"Invalid Hbar format: '{invalid_str}'")): + with pytest.raises( + ValueError, match=re.escape(f"Invalid Hbar format: '{invalid_str}'") + ): Hbar.from_string(invalid_str) @@ -128,6 +137,7 @@ def test_creation_using_of_method(): assert Hbar.of(50, HbarUnit.MEGABAR).to_tinybars() == 5_000_000_000_000_000 assert Hbar.of(50, HbarUnit.GIGABAR).to_tinybars() == 5_000_000_000_000_000_000 + def test_to_unit(): assert Hbar(50).to(HbarUnit.HBAR) == 50 assert Hbar(50).to(HbarUnit.TINYBAR) == 5_000_000_000 @@ -137,6 +147,7 @@ def test_to_unit(): assert Hbar(50).to(HbarUnit.MEGABAR) == 0.00005 assert Hbar(50).to(HbarUnit.GIGABAR) == 0.00000005 + def test_negated(): """Test negation of Hbar values.""" hbar = Hbar(10) @@ -146,11 +157,13 @@ def test_negated(): # Check that again become equal to original assert neg_hbar.negated() == hbar + def test_hbar_constant(): assert Hbar.ZERO.to_hbars() == 0 assert Hbar.MAX.to_hbars() == 50_000_000_000 assert Hbar.MIN.to_hbars() == -50_000_000_000 + def test_comparison(): """Test comparison and equality operators.""" h1 = Hbar(1) @@ -168,10 +181,11 @@ def test_comparison(): assert (h1 == 5) is False with pytest.raises(TypeError): _ = h1 < 5 - + + def test_factory_methods(): """Test the convenient from_X factory methods.""" - + # from_microbars # 1 microbar = 100 tinybars result = Hbar.from_microbars(1) @@ -224,6 +238,7 @@ def test_factory_methods(): # NEW TESTS: Coverage improvements for issue #1447 # --------------------------------------------------------------------------- + def test_from_tinybars_rejects_non_int(): """from_tinybars() should reject non-integer input.""" with pytest.raises(TypeError, match="tinybars must be an int"): @@ -236,7 +251,6 @@ def test_from_tinybars_rejects_non_int(): Hbar.from_tinybars(Decimal("1000")) - @pytest.mark.parametrize("other", [1, 1.0, "1", None]) def test_comparison_with_non_hbar_raises_type_error(other): """Ordering comparisons with non-Hbar types should raise TypeError.""" @@ -255,7 +269,6 @@ def test_comparison_with_non_hbar_raises_type_error(other): _ = h >= other - def test_str_formatting_and_negatives(): """String representation should use fixed 8 decimal places.""" assert str(Hbar(1)) == "1.00000000 ℏ" @@ -271,7 +284,6 @@ def test_repr_contains_class_name_and_value(): assert "2" in r - def test_hash_consistency_for_equal_values(): """Equal Hbar values must have identical hashes.""" h1 = Hbar(1) @@ -288,13 +300,11 @@ def test_hash_consistency_for_equal_values(): assert len(d) == 1 assert d[h1] == "value2" - @pytest.mark.parametrize( - 'invalid_tinybars', - ['1', 0.1, Decimal('0.1'), True, False, object, {}] + "invalid_tinybars", ["1", 0.1, Decimal("0.1"), True, False, object, {}] ) def test_from_tinybars_invalid_type_param(invalid_tinybars): """Test from_tinybar method raises error if the type is not int.""" with pytest.raises(TypeError, match=re.escape("tinybars must be an int.")): - Hbar.from_tinybars(invalid_tinybars) \ No newline at end of file + Hbar.from_tinybars(invalid_tinybars) diff --git a/tests/unit/hbar_transfer_test.py b/tests/unit/hbar_transfer_test.py index 2c6f7e4f2..33a68819d 100644 --- a/tests/unit/hbar_transfer_test.py +++ b/tests/unit/hbar_transfer_test.py @@ -28,7 +28,9 @@ def test_hbar_transfer_constructor(mock_account_ids): assert hbar_transfer.is_approved is False # Test with explicit is_approved=True - approved_transfer = HbarTransfer(account_id=account_id, amount=amount, is_approved=True) + approved_transfer = HbarTransfer( + account_id=account_id, amount=amount, is_approved=True + ) assert approved_transfer.account_id == account_id assert approved_transfer.amount == amount @@ -48,7 +50,9 @@ def test_to_proto(mock_account_ids): amount = 1000 is_approved = True - hbar_transfer = HbarTransfer(account_id=account_id, amount=amount, is_approved=is_approved) + hbar_transfer = HbarTransfer( + account_id=account_id, amount=amount, is_approved=is_approved + ) # Convert to protobuf proto = hbar_transfer._to_proto() diff --git a/tests/unit/hedera_trust_manager_test.py b/tests/unit/hedera_trust_manager_test.py index 449f33c3e..33ce72d51 100644 --- a/tests/unit/hedera_trust_manager_test.py +++ b/tests/unit/hedera_trust_manager_test.py @@ -1,4 +1,5 @@ """Unit tests for _HederaTrustManager certificate validation.""" + import hashlib import pytest from src.hiero_sdk_python.node import _HederaTrustManager @@ -11,7 +12,7 @@ def test_trust_manager_init_with_cert_hash(): cert_hash = b"abc123def456" trust_manager = _HederaTrustManager(cert_hash, verify_certificate=True) # UTF-8 decodable strings are decoded directly, not converted to hex - assert trust_manager.cert_hash == cert_hash.decode('utf-8').lower() + assert trust_manager.cert_hash == cert_hash.decode("utf-8").lower() def test_trust_manager_init_with_utf8_hex_string(): @@ -45,8 +46,10 @@ def test_trust_manager_check_server_trusted_matching_hash(): pem_cert = b"-----BEGIN CERTIFICATE-----\nTEST_CERT\n-----END CERTIFICATE-----\n" cert_hash_bytes = hashlib.sha384(pem_cert).digest() cert_hash_hex = cert_hash_bytes.hex().lower() - - trust_manager = _HederaTrustManager(cert_hash_hex.encode('utf-8'), verify_certificate=True) + + trust_manager = _HederaTrustManager( + cert_hash_hex.encode("utf-8"), verify_certificate=True + ) # Should not raise assert trust_manager.check_server_trusted(pem_cert) is True @@ -55,9 +58,9 @@ def test_trust_manager_check_server_trusted_mismatched_hash(): """Test certificate validation raises error on hash mismatch.""" pem_cert = b"-----BEGIN CERTIFICATE-----\nTEST_CERT\n-----END CERTIFICATE-----\n" wrong_hash = b"wrong_hash_value" - + trust_manager = _HederaTrustManager(wrong_hash, verify_certificate=True) - + with pytest.raises(ValueError, match="Failed to confirm the server's certificate"): trust_manager.check_server_trusted(pem_cert) @@ -65,7 +68,7 @@ def test_trust_manager_check_server_trusted_mismatched_hash(): def test_trust_manager_check_server_trusted_no_verification(): """Test certificate validation skipped when verification disabled.""" pem_cert = b"-----BEGIN CERTIFICATE-----\nTEST_CERT\n-----END CERTIFICATE-----\n" - + trust_manager = _HederaTrustManager(None, verify_certificate=False) # Should not raise even without cert hash assert trust_manager.check_server_trusted(pem_cert) is True @@ -88,8 +91,7 @@ def test_trust_manager_normalize_hash_lowercase(): def test_trust_manager_normalize_hash_unicode_decode_error(): """Test hash normalization handles Unicode decode errors.""" # Create bytes that can't be decoded as UTF-8 - cert_hash = bytes([0xff, 0xfe, 0xfd]) + cert_hash = bytes([0xFF, 0xFE, 0xFD]) trust_manager = _HederaTrustManager(cert_hash, verify_certificate=True) # Should fall back to hex encoding assert trust_manager.cert_hash == cert_hash.hex().lower() - diff --git a/tests/unit/key_utils_test.py b/tests/unit/key_utils_test.py index 821dbcec8..a44d1bbb8 100644 --- a/tests/unit/key_utils_test.py +++ b/tests/unit/key_utils_test.py @@ -14,10 +14,10 @@ def test_key_to_proto_with_ed25519_public_key(): """Tests key_to_proto with an Ed25519 PublicKey.""" private_key = PrivateKey.generate_ed25519() public_key = private_key.public_key() - + expected_proto = public_key._to_proto() result_proto = key_to_proto(public_key) - + assert result_proto == expected_proto assert isinstance(result_proto, basic_types_pb2.Key) @@ -26,10 +26,10 @@ def test_key_to_proto_with_ecdsa_public_key(): """Tests key_to_proto with an ECDSA PublicKey.""" private_key = PrivateKey.generate_ecdsa() public_key = private_key.public_key() - + expected_proto = public_key._to_proto() result_proto = key_to_proto(public_key) - + assert result_proto == expected_proto assert isinstance(result_proto, basic_types_pb2.Key) @@ -38,13 +38,13 @@ def test_key_to_proto_with_ed25519_private_key(): """Tests key_to_proto with an Ed25519 PrivateKey (extracts public key).""" private_key = PrivateKey.generate_ed25519() public_key = private_key.public_key() - + # We expect the *public key's* proto, even though we passed a private key expected_proto = public_key._to_proto() - + # Call the function with the PrivateKey result_proto = key_to_proto(private_key) - + # Assert it correctly converted it to the public key proto assert result_proto == expected_proto assert isinstance(result_proto, basic_types_pb2.Key) @@ -54,10 +54,10 @@ def test_key_to_proto_with_ecdsa_private_key(): """Tests key_to_proto with an ECDSA PrivateKey (extracts public key).""" private_key = PrivateKey.generate_ecdsa() public_key = private_key.public_key() - + expected_proto = public_key._to_proto() result_proto = key_to_proto(private_key) - + assert result_proto == expected_proto assert isinstance(result_proto, basic_types_pb2.Key) @@ -72,7 +72,7 @@ def test_key_to_proto_with_invalid_string_raises_error(): """Tests key_to_proto raises TypeError with invalid input.""" with pytest.raises(TypeError) as e: key_to_proto("this is not a key") - + assert "Key must be of type PrivateKey or PublicKey" in str(e.value) @@ -80,12 +80,11 @@ def test_key_type_alias(): """Tests that the Key type alias works correctly.""" private_key = PrivateKey.generate_ed25519() public_key = private_key.public_key() - + # Test that both PrivateKey and PublicKey can be assigned to Key type key1: Key = private_key key2: Key = public_key - + # Both should work with key_to_proto assert key_to_proto(key1) is not None assert key_to_proto(key2) is not None - diff --git a/tests/unit/keys_private_test.py b/tests/unit/keys_private_test.py index 84ee8e2e1..d1f141d66 100644 --- a/tests/unit/keys_private_test.py +++ b/tests/unit/keys_private_test.py @@ -10,6 +10,7 @@ pytestmark = pytest.mark.unit + def test_generate_ed25519(): """ Test generating an Ed25519 key, then: @@ -136,7 +137,7 @@ def test_from_string_der_ed25519(): """ Test from_string_der with a known valid DER encoding for Ed25519. Then confirm sign/verify works. - + This example DER was built using a known Ed25519 seed (all '01'). """ der_hex = ( @@ -319,11 +320,7 @@ def test_from_bytes_ambiguity_prefers_ecdsa_when_ed25519_fails(monkeypatch): ecdsa_scalar_one = (1).to_bytes(32, "big") # 2) Force the Ed25519 loader to always return None - monkeypatch.setattr( - PrivateKey, - "_try_load_ed25519", - staticmethod(lambda b: None) - ) + monkeypatch.setattr(PrivateKey, "_try_load_ed25519", staticmethod(lambda b: None)) # 3) Now from_bytes should skip Ed25519 and succeed with ECDSA with warnings.catch_warnings(record=True) as w: @@ -387,12 +384,15 @@ def test_from_string_ecdsa_strips_0x(): assert priv.is_ecdsa() -@pytest.mark.parametrize("fn, length", [ - (PrivateKey.from_bytes_ed25519, 31), - (PrivateKey.from_bytes_ed25519, 33), - (PrivateKey.from_bytes_ecdsa, 31), - (PrivateKey.from_bytes_ecdsa, 33), -]) +@pytest.mark.parametrize( + "fn, length", + [ + (PrivateKey.from_bytes_ed25519, 31), + (PrivateKey.from_bytes_ed25519, 33), + (PrivateKey.from_bytes_ecdsa, 31), + (PrivateKey.from_bytes_ecdsa, 33), + ], +) def test_from_bytes_wrong_length(fn, length): bad = b"\x00" * length with pytest.raises(ValueError): @@ -434,7 +434,7 @@ def test_repr_contains_full_hex(key_type): def test_der_roundtrip(key_type): """ Make sure that if we serialize a key to DER and then load it back, - we get the same raw seed/scalar. + we get the same raw seed/scalar. """ priv1 = PrivateKey.generate(key_type) der_hex = priv1.to_string_der() diff --git a/tests/unit/keys_public_test.py b/tests/unit/keys_public_test.py index 0ff2ceca4..a1ba9bed3 100644 --- a/tests/unit/keys_public_test.py +++ b/tests/unit/keys_public_test.py @@ -11,6 +11,7 @@ pytestmark = pytest.mark.unit + @pytest.fixture def ed25519_keypair(): """Returns (private_key, public_key) for Ed25519.""" @@ -18,6 +19,7 @@ def ed25519_keypair(): public = private.public_key() return private, public + @pytest.fixture def ecdsa_keypair(): """Returns (private_key, public_key) for ECDSA with secp256k1.""" @@ -43,10 +45,7 @@ def test_from_bytes_ed25519_valid(ed25519_keypair): # The loader emits a warning because a 32-byte blob could also be # an Ed25519 *private* seed - with pytest.warns( - UserWarning, - match="cannot distinguish Ed25519 private seeds" - ): + with pytest.warns(UserWarning, match="cannot distinguish Ed25519 private seeds"): # Attempt to construct a PublicKey wrapper from the raw bytes pubk = PublicKey.from_bytes_ed25519(raw_bytes) @@ -63,6 +62,7 @@ def test_from_bytes_ed25519_wrong_length(): with pytest.raises(ValueError, match="must be 32 bytes"): PublicKey.from_bytes_ed25519(data) + def test_from_bytes_ed25519_private_seed(ed25519_keypair): """ Demonstrate that from_bytes_ed25519 cannot tell a private seed apart @@ -87,6 +87,7 @@ def test_from_bytes_ed25519_private_seed(ed25519_keypair): # Round-tripping back to raw bytes returns the same 32 bytes (the seed) assert pk.to_bytes_ed25519() == seed + # ------------------------------------------------------------------------------ # Test: from_bytes_ecdsa # ------------------------------------------------------------------------------ @@ -96,7 +97,7 @@ def test_from_bytes_ecdsa_compressed_valid(ecdsa_keypair): # Serialize the public key into its SEC1 compressed form (33 bytes): compressed = pub.public_bytes( encoding=serialization.Encoding.X962, - format=serialization.PublicFormat.CompressedPoint, #Compressed + format=serialization.PublicFormat.CompressedPoint, # Compressed ) assert len(compressed) == 33 @@ -116,7 +117,7 @@ def test_from_bytes_ecdsa_uncompressed_valid(ecdsa_keypair): # Serialize the public key into its SEC1 uncompressed form (65 bytes): uncompressed = pub.public_bytes( encoding=serialization.Encoding.X962, - format=serialization.PublicFormat.UncompressedPoint, #Uncompressed + format=serialization.PublicFormat.UncompressedPoint, # Uncompressed ) assert len(uncompressed) == 65 @@ -134,12 +135,14 @@ def test_from_bytes_ecdsa_uncompressed_valid(ecdsa_keypair): # And the total length of the compressed point must be 33 bytes assert len(compressed) == 33 + def test_from_bytes_ecdsa_wrong_length(): # 32 bytes is not a valid ECDSA public point data = b"\x02" + b"\x00" * 31 with pytest.raises(ValueError, match="must be 33 or 65 bytes"): PublicKey.from_bytes_ecdsa(data) + def test_from_bytes_ecdsa_invalid(): # 33 bytes but invalid prefix or data # 0x05 is not a valid secp256k1 prefix (should be 0x02 or 0x03 if compressed) @@ -147,6 +150,7 @@ def test_from_bytes_ecdsa_invalid(): with pytest.raises(ValueError, match="Invalid ECDSA public key bytes"): PublicKey.from_bytes_ecdsa(data) + # ------------------------------------------------------------------------------ # Test: from_der # ------------------------------------------------------------------------------ @@ -190,6 +194,7 @@ def test_from_der_ecdsa(ecdsa_keypair): # Converting back to DER should match the original exactly assert pk.to_bytes_der() == der + def test_from_der_unsupported_curve(): """ Ensure that DER-encoded keys on curves other than secp256k1 @@ -216,7 +221,7 @@ def test_from_der_invalid(): in from_der(). """ # Create a bogus DER-like blob - der = b"\x30\x82" + b"\xFF" * 50 + der = b"\x30\x82" + b"\xff" * 50 # Expect a ValueError indicating failure to parse DER public key with pytest.raises(ValueError, match="Could not parse DER public key"): @@ -315,6 +320,7 @@ def test_from_bytes_invalid(): with pytest.raises(ValueError, match="Failed to load public key"): PublicKey.from_bytes(data) + # ------------------------------------------------------------------------------ # Test: from_string_xxx # ------------------------------------------------------------------------------ @@ -326,12 +332,13 @@ def test_from_string_ed25519(ed25519_keypair): """ _, pub = ed25519_keypair raw = pub.public_bytes( - encoding=serialization.Encoding.Raw, - format=serialization.PublicFormat.Raw + encoding=serialization.Encoding.Raw, format=serialization.PublicFormat.Raw ) hex_str = raw.hex() - with pytest.warns(UserWarning, match="cannot distinguish Ed25519 private seeds from public keys"): + with pytest.warns( + UserWarning, match="cannot distinguish Ed25519 private seeds from public keys" + ): pubk = PublicKey.from_string_ed25519(hex_str) assert pubk.is_ed25519() assert pubk.to_string_ed25519() == hex_str @@ -387,6 +394,7 @@ def test_from_string_ecdsa_invalid_hex(): with pytest.raises(ValueError, match="Invalid hex string for ECDSA public key"): PublicKey.from_string_ecdsa("not-a-hex") + def test_from_string_catch_all_ecdsa(ecdsa_keypair): _, pub = ecdsa_keypair compressed = pub.public_bytes( @@ -432,8 +440,7 @@ def test_from_string_catch_all_ed25519(ed25519_keypair): """ _, pub = ed25519_keypair raw = pub.public_bytes( - encoding=serialization.Encoding.Raw, - format=serialization.PublicFormat.Raw + encoding=serialization.Encoding.Raw, format=serialization.PublicFormat.Raw ) hex_str = raw.hex() @@ -441,6 +448,7 @@ def test_from_string_catch_all_ed25519(ed25519_keypair): pubk = PublicKey.from_string(hex_str) assert pubk.is_ed25519() + # ------------------------------------------------------------------------------ # Test: _from_proto # ------------------------------------------------------------------------------ @@ -450,29 +458,32 @@ def test_from_proto_ed25519(ed25519_keypair): proto = pubk._to_proto() assert pubk._from_proto(proto).to_bytes_raw() == pubk.to_bytes_raw() + def test_from_proto_ecdsa(ecdsa_keypair): _, pub = ecdsa_keypair pubk = PublicKey(pub) proto = pubk._to_proto() assert pubk._from_proto(proto).to_bytes_raw() == pubk.to_bytes_raw() + def test_from_proto_unsupported_type(): # Create a Key proto with an unsupported type proto = Key() # Set some arbitrary bytes to a RSA_3072 as currently we do not support it proto.RSA_3072 = b"currently unsupported" - + # Verify that attempting to parse an unsupported key type raises ValueError with pytest.raises(ValueError, match="Unsupported public key type in protobuf"): PublicKey._from_proto(proto) + # ------------------------------------------------------------------------------ # Test: _to_proto # ------------------------------------------------------------------------------ def test_to_proto_ed25519(ed25519_keypair): - _, pub = ed25519_keypair + _, pub = ed25519_keypair pubk = PublicKey(pub) - + # Convert to the protobuf Key message proto = pubk._to_proto() # Ensure the oneof field named “key” is set to the ed25519 variant @@ -482,9 +493,9 @@ def test_to_proto_ed25519(ed25519_keypair): def test_to_proto_ecdsa(ecdsa_keypair): - _, pub = ecdsa_keypair + _, pub = ecdsa_keypair pubk = PublicKey(pub) - + # Convert to the protobuf Key message proto = pubk._to_proto() # Ensure the oneof field named “key” is set to the ECDSA_secp256k1 variant @@ -492,6 +503,7 @@ def test_to_proto_ecdsa(ecdsa_keypair): # The bytes in the proto should exactly match the compressed secp256k1 bytes assert proto.ECDSA_secp256k1 == pubk.to_bytes_ecdsa() + # ------------------------------------------------------------------------------ # Test: verify signatures # ------------------------------------------------------------------------------ @@ -537,6 +549,7 @@ def test_verify_ecdsa_success(ecdsa_keypair): # If the signature is correct, verify() returns None and raises no error. pk.verify(signature, msg) + def test_verify_ecdsa_fail(ecdsa_keypair): priv, pub = ecdsa_keypair pk = PublicKey(pub) @@ -563,6 +576,7 @@ def test_repr_ed25519(ed25519_keypair): # It must include the raw public-key hex string assert pubk.to_string_raw() in r + def test_repr_ecdsa(ecdsa_keypair): _, pub = ecdsa_keypair pubk = PublicKey(pub) @@ -572,6 +586,7 @@ def test_repr_ecdsa(ecdsa_keypair): # It must include the raw public-key hex string assert pubk.to_string_raw() in r + def test_to_evm_address_ecdsa_key(ecdsa_keypair): """Test that the evm_address is created.""" _, pub = ecdsa_keypair @@ -583,6 +598,7 @@ def test_to_evm_address_ecdsa_key(ecdsa_keypair): assert isinstance(evm_address, EvmAddress) assert len(evm_address.address_bytes) == 20 + def test_to_evm_address_from_ecdsa_key_manual_derivation(ecdsa_keypair): """Verify that to_evm_address() matches manual derivation.""" _, pub = ecdsa_keypair @@ -594,7 +610,8 @@ def test_to_evm_address_from_ecdsa_key_manual_derivation(ecdsa_keypair): derived_bytes = public_key.to_evm_address().address_bytes - assert evm_bytes== derived_bytes + assert evm_bytes == derived_bytes + def test_to_evm_address_with_same_ecdsa_key(ecdsa_keypair): """Test deriving EVM address from a valid same ECDSA public key.""" @@ -613,6 +630,7 @@ def test_to_evm_address_with_same_ecdsa_key(ecdsa_keypair): assert evm_addr1 == evm_addr2 + def test_to_evm_address_raises_for_ed25519(ed25519_keypair): """Ensure ValueError is raised when deriving EVM address from Ed25519 key.""" _, pub = ed25519_keypair diff --git a/tests/unit/logger_test.py b/tests/unit/logger_test.py index 8a4f9b1d5..247686e7f 100644 --- a/tests/unit/logger_test.py +++ b/tests/unit/logger_test.py @@ -5,6 +5,7 @@ pytestmark = pytest.mark.unit + def test_set_level(): """Test that changing log level affects what will be logged.""" logger = Logger(LogLevel.DEBUG, "test_logger") @@ -16,7 +17,7 @@ def test_get_level(): """Test getting the current log level.""" logger = Logger(level=LogLevel.DEBUG) assert logger.get_level() == LogLevel.DEBUG - + logger.set_level(LogLevel.ERROR) assert logger.get_level() == LogLevel.ERROR @@ -25,7 +26,7 @@ def test_logger_creation(): logger = Logger(LogLevel.DEBUG, "test_logger") assert logger.name == "test_logger" assert logger.level == LogLevel.DEBUG - + def test_logger_creation_from_env(): os.environ["LOG_LEVEL"] = "CRITICAL" @@ -35,23 +36,23 @@ def test_logger_creation_from_env(): def test_logger_output(capsys): """Test that logger outputs the expected messages to stdout. - + This test uses pytest's capsys fixture to capture the actual log output, allowing verification of the exact content written to stdout by the logger. """ # Create a logger that logs to the captured stdout with UNIQUE name logger = Logger(LogLevel.TRACE, "test_logger_output") - + # Log messages at different levels with key-value pairs logger.trace("trace message", "traceKey", "traceValue") logger.debug("debug message", "debugKey", "debugValue") logger.info("info message", "infoKey", "infoValue") logger.warning("warning message", "warningKey", "warningValue") logger.error("error message", "errorKey", "errorValue") - + # Get the captured output captured = capsys.readouterr() - + # Verify that each message appears in the output assert "trace message: traceKey = traceValue" in captured.out assert "debug message: debugKey = debugValue" in captured.out @@ -63,7 +64,7 @@ def test_logger_output(capsys): logger.error("this should not appear") captured = capsys.readouterr() assert captured.out == "" - + # Test re-enabling logging logger.set_silent(False) logger.info("this should appear") @@ -73,27 +74,27 @@ def test_logger_output(capsys): def test_logger_respects_level(capsys): """Test that logger only outputs messages at or above its level. - + Uses pytest's capsys fixture to verify that log filtering works correctly by examining which messages actually appear in the captured output based on the configured log level. """ # Create a logger that logs to the captured stdout with UNIQUE name logger = Logger(LogLevel.INFO, "test_logger_respects_level") - + # These should not be logged logger.trace("trace message") logger.debug("debug message") - + # These should be logged logger.info("info message") logger.warning("warning message") logger.error("error message") - + # Get the captured output captured = capsys.readouterr() logger.info(captured.out) - + # Check that appropriate messages were logged or not logged assert "trace message" not in captured.out assert "debug message" not in captured.out diff --git a/tests/unit/mock_server.py b/tests/unit/mock_server.py index cdd1b6cce..50bee264d 100644 --- a/tests/unit/mock_server.py +++ b/tests/unit/mock_server.py @@ -20,13 +20,14 @@ ) from hiero_sdk_python.logger.log_level import LogLevel + class MockServer: """Mock gRPC server that returns predetermined responses.""" - + def __init__(self, responses): """ Initialize a mock gRPC server with predetermined responses. - + Args: responses (list): List of response objects to return in sequence """ @@ -34,75 +35,91 @@ def __init__(self, responses): self.server = grpc.server(futures.ThreadPoolExecutor(max_workers=10)) self.port = _find_free_port() self.address = f"localhost:{self.port}" - + self._register_services() - + # Start the server self.server.add_insecure_port(self.address) self.server.start() - + def _register_services(self): """Register all necessary gRPC services.""" # Create and register all servicers services = [ - (crypto_service_pb2_grpc.CryptoServiceServicer, - crypto_service_pb2_grpc.add_CryptoServiceServicer_to_server), - (token_service_pb2_grpc.TokenServiceServicer, - token_service_pb2_grpc.add_TokenServiceServicer_to_server), - (consensus_service_pb2_grpc.ConsensusServiceServicer, - consensus_service_pb2_grpc.add_ConsensusServiceServicer_to_server), - (network_service_pb2_grpc.NetworkServiceServicer, - network_service_pb2_grpc.add_NetworkServiceServicer_to_server), - (file_service_pb2_grpc.FileServiceServicer, - file_service_pb2_grpc.add_FileServiceServicer_to_server), - (smart_contract_service_pb2_grpc.SmartContractServiceServicer, - smart_contract_service_pb2_grpc.add_SmartContractServiceServicer_to_server), - (schedule_service_pb2_grpc.ScheduleServiceServicer, - schedule_service_pb2_grpc.add_ScheduleServiceServicer_to_server), - (util_service_pb2_grpc.UtilServiceServicer, - util_service_pb2_grpc.add_UtilServiceServicer_to_server), + ( + crypto_service_pb2_grpc.CryptoServiceServicer, + crypto_service_pb2_grpc.add_CryptoServiceServicer_to_server, + ), + ( + token_service_pb2_grpc.TokenServiceServicer, + token_service_pb2_grpc.add_TokenServiceServicer_to_server, + ), + ( + consensus_service_pb2_grpc.ConsensusServiceServicer, + consensus_service_pb2_grpc.add_ConsensusServiceServicer_to_server, + ), + ( + network_service_pb2_grpc.NetworkServiceServicer, + network_service_pb2_grpc.add_NetworkServiceServicer_to_server, + ), + ( + file_service_pb2_grpc.FileServiceServicer, + file_service_pb2_grpc.add_FileServiceServicer_to_server, + ), + ( + smart_contract_service_pb2_grpc.SmartContractServiceServicer, + smart_contract_service_pb2_grpc.add_SmartContractServiceServicer_to_server, + ), + ( + schedule_service_pb2_grpc.ScheduleServiceServicer, + schedule_service_pb2_grpc.add_ScheduleServiceServicer_to_server, + ), + ( + util_service_pb2_grpc.UtilServiceServicer, + util_service_pb2_grpc.add_UtilServiceServicer_to_server, + ), ] - + for servicer_class, add_servicer_fn in services: servicer = self._create_mock_servicer(servicer_class) add_servicer_fn(servicer, self.server) - + def _create_mock_servicer(self, servicer_class): """ Create a mock servicer that returns predetermined responses. - + Args: servicer_class: The gRPC servicer class to mock - + Returns: A mock servicer object """ responses = self.responses - + class MockServicer(servicer_class): def __getattribute__(self, name): # Get special attributes normally - if name in ('_next_response', '__class__'): + if name in ("_next_response", "__class__"): return super().__getattribute__(name) - + def method_wrapper(request, context): nonlocal responses if not responses: # If no more responses are available, return None return None - + response = responses.pop(0) - + if isinstance(response, RealRpcError): # Abort with custom error context.abort(response.code(), response.details()) - + return response - + return method_wrapper - + return MockServicer() - + def close(self): """Stop the server.""" self.server.stop(0) @@ -111,21 +128,21 @@ def close(self): def _find_free_port(): """Find a free port on localhost.""" with closing(socket.socket(socket.AF_INET, socket.SOCK_STREAM)) as s: - s.bind(('', 0)) + s.bind(("", 0)) s.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1) return s.getsockname()[1] class RealRpcError(grpc.RpcError): """A more realistic gRPC error for testing.""" - + def __init__(self, status_code, details): self._status_code = status_code self._details = details - + def code(self): return self._status_code - + def details(self): return self._details @@ -134,29 +151,28 @@ def details(self): def mock_hedera_servers(response_sequences): """ Context manager that creates mock Hedera servers and a client. - + Args: response_sequences: List of response sequences, one for each mock server - + Yields: Client: The configured client """ # Create mock servers servers = [MockServer(responses) for responses in response_sequences] - + try: # Configure the network with mock servers nodes = [] for i, server in enumerate(servers): node = _Node(AccountId(0, 0, 3 + i), server.address, None) - + # force insecure transport node._apply_transport_security(False) node._set_verify_certificates(False) - + nodes.append(node) - # Create network and client network = Network(nodes=nodes) client = Client(network) @@ -165,7 +181,7 @@ def mock_hedera_servers(response_sequences): key = PrivateKey.generate() client.set_operator(AccountId(0, 0, 1800), key) client.max_attempts = 4 # Configure for testing - + yield client finally: # Clean up the servers diff --git a/tests/unit/network_test.py b/tests/unit/network_test.py new file mode 100644 index 000000000..6d7de27e7 --- /dev/null +++ b/tests/unit/network_test.py @@ -0,0 +1,449 @@ +import time +import pytest +from unittest.mock import Mock, patch + +from hiero_sdk_python.account.account_id import AccountId +from hiero_sdk_python.address_book.node_address import NodeAddress +from hiero_sdk_python.client.network import Network +from hiero_sdk_python.node import _Node + +pytestmark = pytest.mark.unit + + +@pytest.fixture(autouse=True) +def mock_network_nodes(monkeypatch): + """Helper to mock fetch_node_from_mirror_nodes apply to all instead of making mirror rest call""" + fake_nodes = [ + _Node(AccountId(0, 0, 3), "127.0.0.1:50211", NodeAddress()), + _Node(AccountId(0, 0, 4), "127.0.0.1:50212", NodeAddress()), + _Node(AccountId(0, 0, 5), "127.0.0.1:50212", NodeAddress()), + ] + + def fake_fetch_nodes(self): + return fake_nodes + + monkeypatch.setattr(Network, "_fetch_nodes_from_mirror_node", fake_fetch_nodes) + + return fake_nodes + + +# Tests _readmit_nodes +def test_readmit_nodes_returns_early(monkeypatch): + """ + Test _readmit_nodes returns immediately if _earliest_readmit_time has not passed yet. + """ + network = Network("testnet") + now = 1000.0 + monkeypatch.setattr(time, "monotonic", lambda: now) + + network._earliest_readmit_time = now + 10 + network._healthy_nodes = [] + network._readmit_nodes() + + assert network._healthy_nodes == [] + + +def test_readmit_nodes_adds_expired_node(monkeypatch): + """ + Test readmit_nodes adds a node whose backoff period has expired to the healthy nodes list. + """ + network = Network("testnet") + + now = 1000.0 + monkeypatch.setattr(time, "monotonic", lambda: now) + + # Node ready to be readmitted + node = Mock(spec=_Node) + node._readmit_time = now - 1 + + network.nodes = [node] + network._healthy_nodes = [] + + network._earliest_readmit_time = 0 + + network._readmit_nodes() + + assert node in network._healthy_nodes + assert network._earliest_readmit_time >= now + + +def test_readmit_nodes_skips_unexpired_node(monkeypatch): + """ + Test _readmit_nodes does not add nodes whose backoff period has not yet expired. + """ + network = Network("testnet") + + now = 1000.0 + monkeypatch.setattr(time, "monotonic", lambda: now) + + # Node not ready to be readmitted + node = Mock(spec=_Node) + node._readmit_time = now + 50 + + network.nodes = [node] + network._healthy_nodes = [] + network._earliest_readmit_time = 0 + + network._readmit_nodes() + + assert node not in network._healthy_nodes + + expected_delay = max(network._node_min_readmit_period, node._readmit_time - now) + expected_delay = min(expected_delay, network._node_max_readmit_period) + + assert network._earliest_readmit_time == now + expected_delay + + +def test_readmit_nodes_updates_earliest_readmit_time(monkeypatch): + """ + Test _readmit_nodes correctly calculates _earliest_readmit_time based + on multiple nodes with different _readmit_time values and the configured + min/max readmit periods. + """ + network = Network("testnet") + + now = 1000.0 + monkeypatch.setattr(time, "monotonic", lambda: now) + + node_ready = Mock(spec=_Node) + node_ready._readmit_time = now - 5 # ready to be readmitted + + node_not_ready = Mock(spec=_Node) + node_not_ready._readmit_time = now + 20 # not ready yet + + network.nodes = [node_ready, node_not_ready] + network._healthy_nodes = [] + network._earliest_readmit_time = 0 + network._node_min_readmit_period = 8 + network._node_max_readmit_period = 60 + + network._readmit_nodes() + + # Only ready node is added + assert node_ready in network._healthy_nodes + assert node_not_ready not in network._healthy_nodes + + # _earliest_readmit_time should reflect the next node's readmit time with min/max applied + expected_delay = min( + network._node_max_readmit_period, + max(network._node_min_readmit_period, node_not_ready._readmit_time - now), + ) + assert network._earliest_readmit_time == now + expected_delay + + +def test_readmit_nodes_does_not_duplicate_healthy_nodes(monkeypatch): + """Test that the _readmit_nodes method does not add duplicate nodes.""" + network = Network("testnet") + + now = 1000.0 + monkeypatch.setattr(time, "monotonic", lambda: now) + + node = Mock(spec=_Node) + node._readmit_time = now - 10 + + network.nodes = [node] + network._healthy_nodes = [node] + network._earliest_readmit_time = 0 + + network._readmit_nodes() + + assert network._healthy_nodes.count(node) == 1 + + +# Tests _increase_backoff +def test_increase_backoff_removes_node_from_healthy(): + """ + Test _increase_backoff calls _increase_backoff on the node and removes it from the healthy nodes list. + """ + network = Network("testnet") + + # Mock node + node = Mock(spec=_Node) + network.nodes = [node] + network._healthy_nodes = [node] + + # Call the method + network._increase_backoff(node) + + # Assert node's _increase_backoff was called + node._increase_backoff.assert_called_once() + + # Node should be removed from healthy nodes + assert node not in network._healthy_nodes + + +def test_increase_backoff_type_error_for_invalid_input(): + """ + Test _increase_backoff raises TypeError if the argument is not of type _Node. + """ + network = Network("testnet") + + invalid_values = [None, True, object, 123, "node", [], {}] + + for invalid in invalid_values: + with pytest.raises(TypeError, match="node must be of type _Node"): + network._increase_backoff(invalid) + + +def test_increase_backoff_does_not_affect_other_nodes(): + """ + Test _increase_backoff only affects the target node and does not remove other nodes from healthy_nodes. + """ + network = Network("testnet") + + node1 = Mock(spec=_Node) + node2 = Mock(spec=_Node) + network.nodes = [node1, node2] + network._healthy_nodes = [node1, node2] + + network._increase_backoff(node1) + + # node1 removed + assert node1 not in network._healthy_nodes + # node2 still in healthy_nodes + assert node2 in network._healthy_nodes + # node1's _increase_backoff called + node1._increase_backoff.assert_called_once() + # node2's _increase_backoff not called + node2._increase_backoff.assert_not_called() + + +# Tests _decrease_backoff +def test_decrease_backoff_calls_node_method(): + """ + Test _decrease_backoff calls _decrease_backoff on the target node. + """ + network = Network("testnet") + + node = Mock(spec=_Node) + network.nodes = [node] + network._healthy_nodes = [node] + + # Call the method + network._decrease_backoff(node) + + # Assert node's _decrease_backoff was called + node._decrease_backoff.assert_called_once() + + # Node should remain in healthy_nodes (unlike _increase_backoff) + assert node in network._healthy_nodes + + +def test_decrease_backoff_type_error_for_invalid_input(): + """ + Test _decrease_backoff raises TypeError if the argument is not of type _Node. + """ + network = Network("testnet") + + invalid_values = [None, 123, True, object, "node", [], {}] + + for invalid in invalid_values: + with pytest.raises(TypeError, match="node must be of type _Node"): + network._decrease_backoff(invalid) + + +def test_decrease_backoff_does_not_affect_other_nodes(): + """ + Test _decrease_backoff only affects the target node and does not call _decrease_backoff on other nodes. + """ + network = Network("testnet") + + node1 = Mock(spec=_Node) + node2 = Mock(spec=_Node) + network.nodes = [node1, node2] + network._healthy_nodes = [node1, node2] + + network._decrease_backoff(node1) + + # node1's _decrease_backoff called + node1._decrease_backoff.assert_called_once() + # node2's _decrease_backoff not called + node2._decrease_backoff.assert_not_called() + + # Both nodes remain in healthy_nodes + assert node1 in network._healthy_nodes + assert node2 in network._healthy_nodes + + +# Test set_network_nodes +def test_set_network_nodes_with_explicit_nodes(): + """ + Test _set_network_nodes uses explicitly provided nodes and marks healthy ones. + """ + network = Network("testnet") + + # mock nodes + node1 = Mock(spec=_Node) + node2 = Mock(spec=_Node) + + node1.is_healthy.return_value = True + node2.is_healthy.return_value = False + + network._set_network_nodes([node1, node2]) + + assert network.nodes == [node1, node2] + assert network._healthy_nodes == [node1] + + +def test_set_network_nodes_resets_healthy_nodes(): + """ + Test _set_network_nodes clears previously healthy nodes. + """ + network = Network("testnet") + + old_node = Mock(spec=_Node) + network._healthy_nodes = [old_node] + + new_node = Mock(spec=_Node) + new_node.is_healthy.return_value = True + + network._set_network_nodes([new_node]) + + assert old_node not in network._healthy_nodes + assert network._healthy_nodes == [new_node] + + +# Test select_node +def test_select_node_round_robin(): + """Test that _select_node cycles through healthy nodes using round-robin selection.""" + network = Network("testnet") + + node1 = Mock(spec=_Node) + node2 = Mock(spec=_Node) + + network._healthy_nodes = [node1, node2] + network._node_index = 0 + + assert network._select_node() is node2 + assert network._select_node() is node1 + + +def test_select_node_raises_when_no_healthy_nodes(): + """ + Test _select_node raises ValueError if no healthy nodes exist. + """ + network = Network("testnet") + network._healthy_nodes = [] + + with pytest.raises(ValueError, match="No healthy node available"): + network._select_node() + + +# Test get_node +def test_get_node_by_account_id(): + """ + Test _get_node returns node matching account ID. + """ + network = Network("testnet") + + node = _Node(AccountId(0, 0, 3), "127.0.0.1:8080", None) + + network._healthy_nodes = [node] + + with patch( + "hiero_sdk_python.client.network.Network._readmit_nodes" + ) as mock_readmit: + result = network._get_node(AccountId(0, 0, 3)) + + assert mock_readmit.call_count == 1 + assert result._account_id == node._account_id + + +def test_get_node_returns_none_when_not_found(): + """ + Test _get_node returns None if no matching node exists. + """ + network = Network("testnet") + + node = Mock(spec=_Node) + node._account_id = AccountId(0, 0, 3) + network._healthy_nodes = [node] + + assert network._get_node("0.0.999") is None + + +# Tests parse_mirror_address +@pytest.mark.parametrize( + "mirror_addr,expected_host,expected_port", + [ + ("localhost:5551", "localhost", 5551), + ("127.0.0.1:8080", "127.0.0.1", 8080), + ("mirror.hedera.com:443", "mirror.hedera.com", 443), + ("justhost", "justhost", 443), # no port defaults to 443 + ("badport:abc", "badport", 443), # invalid port defaults to 443 + ], +) +def test_parse_mirror_address(mirror_addr, expected_host, expected_port): + """Test that _parse_mirror_address correctly splits mirror_address into host and port.""" + network = Network("testnet", mirror_address=mirror_addr) + host, port = network._parse_mirror_address() + assert host == expected_host + assert port == expected_port + + +# Tests _determine_scheme_and_port +@pytest.mark.parametrize( + "host,port,expected_scheme,expected_port", + [ + ("localhost", 443, "http", 8080), + ("127.0.0.1", 80, "http", 80), + ("127.0.0.1", 5000, "http", 5000), + ("hedera.com", 5600, "https", 443), + ("hedera.com", 443, "https", 443), + ("hedera.com", 8443, "https", 8443), + ], +) +def test_determine_scheme_and_port(host, port, expected_scheme, expected_port): + """Test that _determine_scheme_and_port correctly computes the scheme (http/https).""" + network = Network("testnet") + scheme, out_port = network._determine_scheme_and_port(host, port) + assert out_port == expected_port + assert scheme == expected_scheme + + +# Tests for _build_rest_url +@pytest.mark.parametrize( + "scheme,host,port,expected_url", + [ + ("https", "hedera.com", 443, "https://hedera.com/api/v1"), + ("https", "hedera.com", 8443, "https://hedera.com:8443/api/v1"), + ("http", "localhost", 80, "http://localhost/api/v1"), + ("http", "localhost", 8080, "http://localhost:8080/api/v1"), + ], +) +def test_build_rest_url(scheme, host, port, expected_url): + """Test that _build_rest_url constructs the correct REST API URL.""" + network = Network("testnet") + url = network._build_rest_url(scheme, host, port) + assert url == expected_url + + +def test_get_mirror_rest_url_fallback(): + """Test get_mirror_rest_url fallback behavior when network is not in MIRROR_NODE_URLS.""" + # Custom network with no entry in MIRROR_NODE_URLS + network = Network("customnet", mirror_address="localhost:1234") + + scheme, port = network._determine_scheme_and_port(*network._parse_mirror_address()) + expected_url = network._build_rest_url(scheme, "localhost", port) + + assert network.get_mirror_rest_url() == expected_url + + +@pytest.mark.unit +def test_resolve_nodes_fallback_to_default(monkeypatch): + """Test that _resolve_nodes falls back to DEFAULT_NODES if no nodes are provided and mirror fetch returns empty.""" + network_name = "testnet" + network = Network(network_name) + + # Patch _fetch_nodes_from_mirror_node to return empty list + monkeypatch.setattr(network, "_fetch_nodes_from_mirror_node", lambda: []) + + # Call _resolve_nodes with nodes=None should fallback to DEFAULT_NODES + resolved_nodes = network._resolve_nodes(None) + + # DEFAULT_NODES for testnet has 4 entries (0..3) + expected_count = len(network.DEFAULT_NODES[network_name]) + assert isinstance(resolved_nodes, list) + assert all(isinstance(n, _Node) for n in resolved_nodes) + assert len(resolved_nodes) == expected_count + assert resolved_nodes[0]._account_id == network.DEFAULT_NODES[network_name][0][1] diff --git a/tests/unit/network_tls_test.py b/tests/unit/network_tls_test.py index 17fa486f1..455c9d7ab 100644 --- a/tests/unit/network_tls_test.py +++ b/tests/unit/network_tls_test.py @@ -1,4 +1,5 @@ """Unit tests for TLS configuration in Network and Client.""" + import pytest from src.hiero_sdk_python.client.client import Client from src.hiero_sdk_python.client.network import Network @@ -10,42 +11,49 @@ def test_network_tls_enabled_by_default_for_hosted_networks(): """Test that TLS is enabled by default for hosted networks.""" - for network_name in ('mainnet', 'testnet', 'previewnet'): + for network_name in ("mainnet", "testnet", "previewnet"): network = Network(network_name) - assert network.is_transport_security() is True, f"TLS should be enabled for {network_name}" + assert ( + network.is_transport_security() is True + ), f"TLS should be enabled for {network_name}" def test_network_tls_disabled_by_default_for_local_networks(): """Test that TLS is disabled by default for local networks.""" - for network_name in ('solo', 'localhost', 'local'): + for network_name in ("solo", "localhost", "local"): network = Network(network_name) - assert network.is_transport_security() is False, f"TLS should be disabled for {network_name}" + assert ( + network.is_transport_security() is False + ), f"TLS should be disabled for {network_name}" def test_network_tls_disabled_by_default_for_custom_networks(): """Test that TLS is disabled by default for custom networks.""" # Provide nodes for custom network since it has no defaults from src.hiero_sdk_python.node import _Node + nodes = [_Node(AccountId(0, 0, 3), "127.0.0.1:50211", None)] - network = Network('custom-network', nodes=nodes) + network = Network("custom-network", nodes=nodes) assert network.is_transport_security() is False def test_network_verification_enabled_by_default(): """Test that certificate verification is enabled by default for all networks.""" - for network_name in ('mainnet', 'testnet', 'previewnet', 'solo', 'localhost'): + for network_name in ("mainnet", "testnet", "previewnet", "solo", "localhost"): network = Network(network_name) - assert network.is_verify_certificates() is True, f"Verification should be enabled for {network_name}" + assert ( + network.is_verify_certificates() is True + ), f"Verification should be enabled for {network_name}" def test_network_set_transport_security_enable(): """Test enabling TLS on network.""" - network = Network('solo') # Starts with TLS disabled + network = Network("solo") # Starts with TLS disabled assert network.is_transport_security() is False - + network.set_transport_security(True) assert network.is_transport_security() is True - + # Verify all nodes are updated for node in network.nodes: assert node._address._is_transport_security() is True @@ -53,12 +61,12 @@ def test_network_set_transport_security_enable(): def test_network_set_transport_security_disable(): """Test disabling TLS on network.""" - network = Network('testnet') # Starts with TLS enabled + network = Network("testnet") # Starts with TLS enabled assert network.is_transport_security() is True - + network.set_transport_security(False) assert network.is_transport_security() is False - + # Verify all nodes are updated for node in network.nodes: assert node._address._is_transport_security() is False @@ -66,25 +74,25 @@ def test_network_set_transport_security_disable(): def test_network_set_transport_security_idempotent(): """Test that setting TLS to same value is idempotent.""" - network = Network('testnet') + network = Network("testnet") initial_state = network.is_transport_security() - + # Set to same value multiple times network.set_transport_security(initial_state) network.set_transport_security(initial_state) network.set_transport_security(initial_state) - + assert network.is_transport_security() == initial_state def test_network_set_verify_certificates(): """Test setting certificate verification.""" - network = Network('testnet') + network = Network("testnet") assert network.is_verify_certificates() is True - + network.set_verify_certificates(False) assert network.is_verify_certificates() is False - + # Verify all nodes are updated for node in network.nodes: assert node._verify_certificates is False @@ -92,23 +100,23 @@ def test_network_set_verify_certificates(): def test_network_set_verify_certificates_idempotent(): """Test that setting verification to same value is idempotent.""" - network = Network('testnet') + network = Network("testnet") initial_state = network.is_verify_certificates() - + network.set_verify_certificates(initial_state) network.set_verify_certificates(initial_state) - + assert network.is_verify_certificates() == initial_state def test_network_set_tls_root_certificates(): """Test setting custom root certificates.""" - network = Network('testnet') + network = Network("testnet") custom_certs = b"-----BEGIN CERTIFICATE-----\nCUSTOM\n-----END CERTIFICATE-----\n" - + network.set_tls_root_certificates(custom_certs) assert network.get_tls_root_certificates() == custom_certs - + # Verify all nodes are updated for node in network.nodes: assert node._root_certificates == custom_certs @@ -116,77 +124,79 @@ def test_network_set_tls_root_certificates(): def test_network_set_tls_root_certificates_none(): """Test clearing custom root certificates.""" - network = Network('testnet') + network = Network("testnet") custom_certs = b"custom" network.set_tls_root_certificates(custom_certs) - + network.set_tls_root_certificates(None) assert network.get_tls_root_certificates() is None def test_client_set_transport_security(): """Test Client.set_transport_security() method.""" - network = Network('solo') + network = Network("solo") client = Client(network) - + assert client.is_transport_security() is False client.set_transport_security(True) assert client.is_transport_security() is True - + # Should return self for chaining assert client.set_transport_security(False) is client def test_client_set_verify_certificates(): """Test Client.set_verify_certificates() method.""" - network = Network('testnet') + network = Network("testnet") client = Client(network) - + assert client.is_verify_certificates() is True client.set_verify_certificates(False) assert client.is_verify_certificates() is False - + # Should return self for chaining assert client.set_verify_certificates(True) is client def test_client_set_tls_root_certificates(): """Test Client.set_tls_root_certificates() method.""" - network = Network('testnet') + network = Network("testnet") client = Client(network) custom_certs = b"custom_certs" - + client.set_tls_root_certificates(custom_certs) assert client.get_tls_root_certificates() == custom_certs def test_network_get_mirror_rest_url_hosted_networks(): """Test REST URL generation for hosted networks.""" - for network_name in ('mainnet', 'testnet', 'previewnet'): + for network_name in ("mainnet", "testnet", "previewnet"): network = Network(network_name) url = network.get_mirror_rest_url() - assert url.startswith('https://') - assert url.endswith('/api/v1') + assert url.startswith("https://") + assert url.endswith("/api/v1") # Should not include :443 for default HTTPS port - assert ':443' not in url + assert ":443" not in url def test_network_get_mirror_rest_url_localhost(): """Test REST URL generation for localhost.""" - network = Network('solo') + network = Network("solo") url = network.get_mirror_rest_url() - # Solo uses http://localhost:8080 - assert 'http://' in url or 'https://' in url - assert url.endswith('/api/v1') + # Solo uses http://localhost:5551 + assert "http://" in url or "https://" in url + assert url.endswith("/api/v1") def test_network_get_mirror_rest_url_custom_port(): """Test REST URL generation with custom port for network without MIRROR_NODE_URLS entry.""" # Use a custom network that doesn't have MIRROR_NODE_URLS entry from src.hiero_sdk_python.node import _Node + nodes = [_Node(AccountId(0, 0, 3), "127.0.0.1:50211", None)] - network = Network('custom-network', nodes=nodes, mirror_address='custom.mirror.com:8443') + network = Network( + "custom-network", nodes=nodes, mirror_address="custom.mirror.com:8443" + ) url = network.get_mirror_rest_url() # Should use custom mirror_address and include port - assert url.startswith('https://custom.mirror.com:8443/api/v1') - + assert url.startswith("https://custom.mirror.com:8443/api/v1") diff --git a/tests/unit/nft_id_test.py b/tests/unit/nft_id_test.py index 53d6ef0cd..b084da45c 100644 --- a/tests/unit/nft_id_test.py +++ b/tests/unit/nft_id_test.py @@ -5,42 +5,54 @@ from hiero_sdk_python.hapi.services import basic_types_pb2 pytestmark = pytest.mark.unit + + def test_nft_id(): - #return true + # return true nftid_constructor_tokenid = TokenId(shard=0, realm=1, num=2) - nftid_constructor_test = NftId(token_id=nftid_constructor_tokenid, serial_number=1234) + nftid_constructor_test = NftId( + token_id=nftid_constructor_tokenid, serial_number=1234 + ) assert str(nftid_constructor_test) == "0.1.2/1234" - assert repr(nftid_constructor_test) == "NftId(token_id=TokenId(shard=0, realm=1, num=2, checksum=None), serial_number=1234)" + assert ( + repr(nftid_constructor_test) + == "NftId(token_id=TokenId(shard=0, realm=1, num=2, checksum=None), serial_number=1234)" + ) assert nftid_constructor_test._to_proto().__eq__( basic_types_pb2.NftID( token_ID=basic_types_pb2.TokenID(shardNum=0, realmNum=1, tokenNum=2), - serial_number=1234) + serial_number=1234, + ) ) assert NftId._from_proto( nft_id_proto=basic_types_pb2.NftID( token_ID=basic_types_pb2.TokenID(shardNum=0, realmNum=1, tokenNum=2), - serial_number=1234 + serial_number=1234, ) ).__eq__(nftid_constructor_test) - #return false + # return false with pytest.raises(TypeError): nftid_failed_constructor_tokenid1 = TokenId(shard=0, realm=1, num="A") with pytest.raises(TypeError): nftid_failed_constructor_tokenid = TokenId(shard=0, realm="b", num=1) with pytest.raises(TypeError): - nftid_failed_constructor_tokenid = TokenId(shard='c', realm=1, num=1) + nftid_failed_constructor_tokenid = TokenId(shard="c", realm=1, num=1) with pytest.raises(TypeError): nftid_failed_constructor = NftId(token_id=None, serial_number=1234) with pytest.raises(TypeError): nftid_failed_constructor = NftId(token_id=1234, serial_number=1234) with pytest.raises(TypeError): - nftid_failed_constructor = NftId(token_id=TokenId(shard=0, realm=1, num=0), serial_number="asdfasdfasdf") + nftid_failed_constructor = NftId( + token_id=TokenId(shard=0, realm=1, num=0), serial_number="asdfasdfasdf" + ) with pytest.raises(ValueError): - nftid_failed_constructor = NftId(token_id=TokenId(shard=0, realm=1, num=0), serial_number=-1234) + nftid_failed_constructor = NftId( + token_id=TokenId(shard=0, realm=1, num=0), serial_number=-1234 + ) - #don't need to test protobuf cause its final and type checked + # don't need to test protobuf cause its final and type checked with pytest.raises(ValueError): NftId.from_string("") @@ -66,6 +78,7 @@ def test_nft_id(): with pytest.raises(ValueError): NftId.from_string(fail_str) + def test_get_nft_id_with_checksum(mock_client): """Should return string with checksum when ledger id is provided.""" client = mock_client @@ -74,4 +87,4 @@ def test_get_nft_id_with_checksum(mock_client): token_id = TokenId.from_string("0.0.1") nft_id = NftId(token_id, 1) - assert nft_id.to_string_with_checksum(client) == "0.0.1-dfkxr/1" \ No newline at end of file + assert nft_id.to_string_with_checksum(client) == "0.0.1-dfkxr/1" diff --git a/tests/unit/node_address_test.py b/tests/unit/node_address_test.py index 9191bc9c2..db361893e 100644 --- a/tests/unit/node_address_test.py +++ b/tests/unit/node_address_test.py @@ -3,16 +3,24 @@ from hiero_sdk_python.account.account_id import AccountId from hiero_sdk_python.address_book.endpoint import Endpoint from hiero_sdk_python.address_book.node_address import NodeAddress -from hiero_sdk_python.hapi.services.basic_types_pb2 import NodeAddress as NodeAddressProto +from hiero_sdk_python.hapi.services.basic_types_pb2 import ( + NodeAddress as NodeAddressProto, +) + pytestmark = pytest.mark.unit + def test_init(): """Test initialization of _NodeAddress.""" # Create test data account_id = AccountId(0, 0, 123) - addresses = [Endpoint(address=bytes("192.168.1.1", 'utf-8'), port=8080, domain_name="example.com")] - cert_hash = b'sample-cert-hash' - + addresses = [ + Endpoint( + address=bytes("192.168.1.1", "utf-8"), port=8080, domain_name="example.com" + ) + ] + cert_hash = b"sample-cert-hash" + # Initialize _NodeAddress node_address = NodeAddress( public_key="sample-public-key", @@ -20,9 +28,9 @@ def test_init(): node_id=1234, cert_hash=cert_hash, addresses=addresses, - description="Sample Node" + description="Sample Node", ) - + # Assert properties are set correctly assert node_address._public_key == "sample-public-key" assert node_address._account_id == account_id @@ -31,30 +39,35 @@ def test_init(): assert node_address._addresses == addresses assert node_address._description == "Sample Node" + def test_string_representation(): """Test string representation of _NodeAddress.""" # Create AccountId account_id = AccountId(0, 0, 123) - - # Create - endpoint = Endpoint(address=bytes("192.168.1.1", 'utf-8'), port=8080, domain_name="example.com") - + + # Create + endpoint = Endpoint( + address=bytes("192.168.1.1", "utf-8"), port=8080, domain_name="example.com" + ) + # Create NodeAddress node_address = NodeAddress( public_key="sample-public-key", account_id=account_id, node_id=1234, - cert_hash=b'sample-cert-hash', + cert_hash=b"sample-cert-hash", addresses=[endpoint], - description="Sample Node" + description="Sample Node", ) - + # Get string representation result = str(node_address) - + # Check if expected fields are in the result assert "NodeAccountId: 0.0.123" in result - assert "CertHash: 73616d706c652d636572742d68617368" in result # hex representation of sample-cert-hash + assert ( + "CertHash: 73616d706c652d636572742d68617368" in result + ) # hex representation of sample-cert-hash assert "NodeId: 1234" in result assert "PubKey: sample-public-key" in result @@ -63,9 +76,15 @@ def test_to_proto(): """Test conversion of NodeAddress to protobuf with endpoints.""" account_id = AccountId(0, 0, 123) endpoints = [ - Endpoint(address=bytes("192.168.1.1", "utf-8"), port=8080, domain_name="example1.com"), - Endpoint(address=bytes("192.168.1.2", "utf-8"), port=8081, domain_name="example2.com"), - Endpoint(address=bytes("192.168.1.3", "utf-8"), port=8082, domain_name="example3.com"), + Endpoint( + address=bytes("192.168.1.1", "utf-8"), port=8080, domain_name="example1.com" + ), + Endpoint( + address=bytes("192.168.1.2", "utf-8"), port=8081, domain_name="example2.com" + ), + Endpoint( + address=bytes("192.168.1.3", "utf-8"), port=8082, domain_name="example3.com" + ), ] node_address = NodeAddress( public_key="sample-public-key", @@ -73,7 +92,7 @@ def test_to_proto(): node_id=1234, cert_hash=b"sample-cert-hash", addresses=endpoints, - description="Sample Node" + description="Sample Node", ) node_address_proto = node_address._to_proto() @@ -146,9 +165,7 @@ def test_from_proto(): """Test creation of NodeAddress from protobuf with endpoint.""" account_id_proto = AccountId(0, 0, 123)._to_proto() endpoint_proto = Endpoint( - address=bytes("192.168.1.1", "utf-8"), - port=8080, - domain_name="example.com" + address=bytes("192.168.1.1", "utf-8"), port=8080, domain_name="example.com" )._to_proto() # Create NodeAddressProto @@ -178,9 +195,7 @@ def test_round_trip(): """Ensure NodeAddress → Proto → NodeAddress round trip works.""" account_id = AccountId(0, 0, 123) endpoint = Endpoint( - address=bytes("192.168.1.1", "utf-8"), - port=8080, - domain_name="example.com" + address=bytes("192.168.1.1", "utf-8"), port=8080, domain_name="example.com" ) # Create NodeAddress @@ -190,7 +205,7 @@ def test_round_trip(): node_id=1234, cert_hash=b"sample-cert-hash", addresses=[endpoint], - description="Sample Node" + description="Sample Node", ) # Convert to proto @@ -220,12 +235,13 @@ def test_empty_addresses(): node_id=1234, cert_hash=b"sample-cert-hash", addresses=[], - description="No endpoints" + description="No endpoints", ) proto = node_address._to_proto() assert len(proto.serviceEndpoint) == 0 + def test_to_proto_none_account_id(): """Test _to_proto handles None account_id gracefully.""" node_address = NodeAddress( @@ -234,10 +250,10 @@ def test_to_proto_none_account_id(): node_id=1234, cert_hash=b"sample-cert-hash", addresses=[], - description="No account" + description="No account", ) proto = node_address._to_proto() # Should not have nodeAccountId set - assert not proto.HasField('nodeAccountId') + assert not proto.HasField("nodeAccountId") diff --git a/tests/unit/node_create_transaction_test.py b/tests/unit/node_create_transaction_test.py index b2d109012..3068cb2e5 100644 --- a/tests/unit/node_create_transaction_test.py +++ b/tests/unit/node_create_transaction_test.py @@ -104,7 +104,8 @@ def test_build_transaction_body_with_valid_parameters(mock_account_ids, node_par ) assert len(node_create.service_endpoint) == 1 assert ( - node_create.service_endpoint[0] == node_params["service_endpoints"][0]._to_proto() + node_create.service_endpoint[0] + == node_params["service_endpoints"][0]._to_proto() ) assert node_create.gossip_ca_certificate == node_params["gossip_ca_certificate"] assert node_create.grpc_certificate_hash == node_params["grpc_certificate_hash"] @@ -139,7 +140,8 @@ def test_build_scheduled_body(node_params): ) assert len(node_create.service_endpoint) == 1 assert ( - node_create.service_endpoint[0] == node_params["service_endpoints"][0]._to_proto() + node_create.service_endpoint[0] + == node_params["service_endpoints"][0]._to_proto() ) assert node_create.gossip_ca_certificate == node_params["gossip_ca_certificate"] assert node_create.grpc_certificate_hash == node_params["grpc_certificate_hash"] diff --git a/tests/unit/node_test.py b/tests/unit/node_test.py new file mode 100644 index 000000000..f82335363 --- /dev/null +++ b/tests/unit/node_test.py @@ -0,0 +1,87 @@ +import time +import pytest + +from hiero_sdk_python.account.account_id import AccountId +from hiero_sdk_python.address_book.endpoint import Endpoint +from hiero_sdk_python.address_book.node_address import NodeAddress +from hiero_sdk_python.node import _Node + +pytestmark = pytest.mark.unit + + +@pytest.fixture +def mock_address_book(): + """Create a mock address book with certificate hash.""" + cert_hash = b"test_cert_hash_12345" + endpoint = Endpoint( + address=b"node.example.com", port=50211, domain_name="node.example.com" + ) + address_book = NodeAddress( + account_id=AccountId(0, 0, 3), cert_hash=cert_hash, addresses=[endpoint] + ) + return address_book + + +@pytest.fixture +def node(mock_address_book): + """Create a node with deterministic value for unit tests.""" + node = _Node(AccountId(0, 0, 3), "127.0.0.1:50211", mock_address_book) + return node + + +# Test is_healthy +def test_is_healthy_when_readmit_time_in_past(node): + """Test that a node is healthy if readmit time is in the past.""" + node._readmit_time = time.monotonic() - 10 + assert node.is_healthy() is True + + +def test_is_healthy_when_readmit_time_in_future(node): + """Test that a node is unhealthy if readmit time is in the future.""" + node._readmit_time = time.monotonic() + 10 + assert node.is_healthy() is False + + +# Test increase_backoff +def test_increase_backoff_doubles_value(node): + """Test that _increase_backoff doubles the current backoff.""" + node._current_backoff = 10 + + node._increase_backoff() + assert node._current_backoff == 20 + assert node._readmit_time > time.monotonic() + + +def test_increase_backoff_caps_at_max(node): + """Test that _increase_backoff does not exceed the maximum backoff.""" + node._current_backoff = node._max_backoff + + node._increase_backoff() + assert node._current_backoff == node._max_backoff + + +def test_increase_backoff_updates_readmit_time(node): + """Test that _increase_backoff updates the readmit time correctly.""" + node._current_backoff = 10 + + before = time.monotonic() + node._increase_backoff() + assert node._readmit_time > before + 10 + + +# Test decrease_backoff +def test_decrease_backoff_halves_value(node): + """Test that _decrease_backoff halves the current backoff.""" + node._current_backoff = 20 + + node._decrease_backoff() + assert node._current_backoff == 10 + + +def test_decrease_backoff_floors_at_min(node): + """Test that _decrease_backoff does not go below the minimum backoff.""" + node._current_backoff = node._min_backoff + + node._decrease_backoff() + assert node._current_backoff == node._min_backoff + diff --git a/tests/unit/node_tls_test.py b/tests/unit/node_tls_test.py index 1898ba79d..3fb979e5b 100644 --- a/tests/unit/node_tls_test.py +++ b/tests/unit/node_tls_test.py @@ -1,4 +1,5 @@ """Unit tests for TLS functionality in _Node.""" + import hashlib import ssl from unittest.mock import Mock, patch, MagicMock @@ -15,11 +16,11 @@ def mock_address_book(): """Create a mock address book with certificate hash.""" cert_hash = b"test_cert_hash_12345" - endpoint = Endpoint(address=b"node.example.com", port=50212, domain_name="node.example.com") + endpoint = Endpoint( + address=b"node.example.com", port=50212, domain_name="node.example.com" + ) address_book = NodeAddress( - account_id=AccountId(0, 0, 3), - cert_hash=cert_hash, - addresses=[endpoint] + account_id=AccountId(0, 0, 3), cert_hash=cert_hash, addresses=[endpoint] ) return address_book @@ -30,9 +31,7 @@ def mock_address_book_no_domain(): cert_hash = b"test_cert_hash_12345" endpoint = Endpoint(address=b"127.0.0.1", port=50212, domain_name=None) address_book = NodeAddress( - account_id=AccountId(0, 0, 3), - cert_hash=cert_hash, - addresses=[endpoint] + account_id=AccountId(0, 0, 3), cert_hash=cert_hash, addresses=[endpoint] ) return address_book @@ -53,7 +52,7 @@ def test_node_apply_transport_security_enable(mock_node_without_address_book): """Test enabling TLS on a node.""" node = mock_node_without_address_book assert node._address._is_transport_security() is False - + node._apply_transport_security(True) assert node._address._is_transport_security() is True assert node._address._get_port() == 50212 @@ -65,7 +64,7 @@ def test_node_apply_transport_security_disable(mock_node_with_address_book): # Start with TLS enabled node._apply_transport_security(True) assert node._address._is_transport_security() is True - + node._apply_transport_security(False) assert node._address._is_transport_security() is False assert node._address._get_port() == 50211 @@ -75,7 +74,7 @@ def test_node_apply_transport_security_idempotent(mock_node_without_address_book """Test that applying same TLS state is idempotent.""" node = mock_node_without_address_book initial_port = node._address._get_port() - + node._apply_transport_security(False) # Already disabled assert node._address._get_port() == initial_port @@ -85,14 +84,17 @@ def test_node_apply_transport_security_closes_channel(mock_node_with_address_boo node = mock_node_with_address_book # Disable verification to skip certificate fetching node._verify_certificates = False - + # Create a channel first - with patch('grpc.secure_channel') as mock_secure, patch.object(node, "_fetch_server_certificate_pem", return_value=b"dummy-cert"): + with ( + patch("grpc.secure_channel") as mock_secure, + patch.object(node, "_fetch_server_certificate_pem", return_value=b"dummy-cert"), + ): mock_channel = Mock() mock_secure.return_value = mock_channel node._get_channel() assert node._channel is not None - + # Apply transport security change node._apply_transport_security(False) # Channel should be closed @@ -103,7 +105,7 @@ def test_node_set_verify_certificates(mock_node_with_address_book): """Test setting certificate verification on node.""" node = mock_node_with_address_book assert node._verify_certificates is True - + node._set_verify_certificates(False) assert node._verify_certificates is False @@ -112,49 +114,51 @@ def test_node_set_verify_certificates_idempotent(mock_node_with_address_book): """Test that setting verification to same value is idempotent.""" node = mock_node_with_address_book initial_state = node._verify_certificates - + node._set_verify_certificates(initial_state) node._set_verify_certificates(initial_state) - + assert node._verify_certificates == initial_state def test_node_build_channel_options_with_hostname_not_override(): """Test channel options include hostname override when domain differs from address.""" - endpoint = Endpoint(address=b"127.0.0.1", port=50212, domain_name="node.example.com") + endpoint = Endpoint( + address=b"127.0.0.1", port=50212, domain_name="node.example.com" + ) address_book = NodeAddress( - account_id=AccountId(0, 0, 3), - cert_hash=b"hash", - addresses=[endpoint] + account_id=AccountId(0, 0, 3), cert_hash=b"hash", addresses=[endpoint] ) node = _Node(AccountId(0, 0, 3), "127.0.0.1:50212", address_book) - + options = node._build_channel_options() assert options is not None - assert ('grpc.ssl_target_name_override', 'node.example.com') not in options + assert ("grpc.ssl_target_name_override", "node.example.com") not in options def test_node_build_channel_options_no_override_when_same(): """Test channel options don't include override when hostname matches address.""" - endpoint = Endpoint(address=b"node.example.com", port=50212, domain_name="node.example.com") + endpoint = Endpoint( + address=b"node.example.com", port=50212, domain_name="node.example.com" + ) address_book = NodeAddress( - account_id=AccountId(0, 0, 3), - cert_hash=b"hash", - addresses=[endpoint] + account_id=AccountId(0, 0, 3), cert_hash=b"hash", addresses=[endpoint] ) node = _Node(AccountId(0, 0, 3), "node.example.com:50212", address_book) - + options = node._build_channel_options() assert options == [ ("grpc.default_authority", "127.0.0.1"), ("grpc.ssl_target_name_override", "127.0.0.1"), ("grpc.keepalive_time_ms", 100000), ("grpc.keepalive_timeout_ms", 10000), - ("grpc.keepalive_permit_without_calls", 1) + ("grpc.keepalive_permit_without_calls", 1), ] -def test_node_build_channel_options_override_localhost_without_address_book(mock_node_without_address_book): +def test_node_build_channel_options_override_localhost_without_address_book( + mock_node_without_address_book, +): """Test channel options don't include override without address book.""" node = mock_node_without_address_book options = node._build_channel_options() @@ -163,26 +167,33 @@ def test_node_build_channel_options_override_localhost_without_address_book(mock ("grpc.ssl_target_name_override", "127.0.0.1"), ("grpc.keepalive_time_ms", 100000), ("grpc.keepalive_timeout_ms", 10000), - ("grpc.keepalive_permit_without_calls", 1) + ("grpc.keepalive_permit_without_calls", 1), ] -@patch('socket.create_connection') -@patch('ssl.create_default_context') -def test_node_fetch_server_certificate_pem(mock_ssl_context, mock_socket_conn, mock_node_with_address_book): +@patch("socket.create_connection") +@patch("ssl.create_default_context") +def test_node_fetch_server_certificate_pem( + mock_ssl_context, mock_socket_conn, mock_node_with_address_book +): """Test fetching server certificate in PEM format.""" node = mock_node_with_address_book - + # Mock SSL context and socket mock_context = MagicMock() mock_ssl_context.return_value = mock_context - mock_context.wrap_socket.return_value.__enter__.return_value.getpeercert.return_value = b"DER_CERT" - + mock_context.wrap_socket.return_value.__enter__.return_value.getpeercert.return_value = ( + b"DER_CERT" + ) + mock_sock = MagicMock() mock_socket_conn.return_value.__enter__.return_value = mock_sock - + # Mock DER to PEM conversion - with patch('ssl.DER_cert_to_PEM_cert', return_value="-----BEGIN CERTIFICATE-----\nPEM\n-----END CERTIFICATE-----\n"): + with patch( + "ssl.DER_cert_to_PEM_cert", + return_value="-----BEGIN CERTIFICATE-----\nPEM\n-----END CERTIFICATE-----\n", + ): pem_cert = node._fetch_server_certificate_pem() assert isinstance(pem_cert, bytes) assert b"BEGIN CERTIFICATE" in pem_cert @@ -192,16 +203,16 @@ def test_node_validate_tls_certificate_with_trust_manager(mock_node_with_address """Test certificate validation using trust manager.""" node = mock_node_with_address_book node._verify_certificates = True - + # Mock certificate fetching pem_cert = b"-----BEGIN CERTIFICATE-----\nTEST\n-----END CERTIFICATE-----\n" cert_hash = hashlib.sha384(pem_cert).digest().hex().lower() - + # Update address book with matching hash - node._address_book._cert_hash = cert_hash.encode('utf-8') + node._address_book._cert_hash = cert_hash.encode("utf-8") node._node_pem_cert = pem_cert - - with patch.object(node, '_fetch_server_certificate_pem', return_value=pem_cert): + + with patch.object(node, "_fetch_server_certificate_pem", return_value=pem_cert): # Should not raise node._validate_tls_certificate_with_trust_manager() @@ -210,12 +221,12 @@ def test_node_validate_tls_certificate_hash_mismatch(mock_node_with_address_book """Test certificate validation raises error on hash mismatch.""" node = mock_node_with_address_book node._verify_certificates = True - + pem_cert = b"-----BEGIN CERTIFICATE-----\nTEST\n-----END CERTIFICATE-----\n" wrong_hash = b"wrong_hash" node._address_book._cert_hash = wrong_hash node._node_pem_cert = pem_cert - + with pytest.raises(ValueError, match="Failed to confirm the server's certificate"): node._validate_tls_certificate_with_trust_manager() @@ -224,7 +235,7 @@ def test_node_validate_tls_certificate_no_verification(mock_node_with_address_bo """Test certificate validation skipped when verification disabled.""" node = mock_node_with_address_book node._verify_certificates = False - + # Should not raise even without proper setup node._validate_tls_certificate_with_trust_manager() @@ -233,58 +244,68 @@ def test_node_validate_tls_certificate_no_address_book(): """Test certificate validation skips when verification enabled but no address book.""" node = _Node(AccountId(0, 0, 3), "127.0.0.1:50212", None) node._verify_certificates = True - + # Validation should skip (not raise) when no address book is available # This allows unit tests to work without address books while still enabling # verification in production where address books are available. node._validate_tls_certificate_with_trust_manager() # Should not raise -@patch('grpc.secure_channel') -@patch('grpc.insecure_channel') -def test_node_get_channel_secure(mock_insecure, mock_secure, mock_node_with_address_book): +@patch("grpc.secure_channel") +@patch("grpc.insecure_channel") +def test_node_get_channel_secure( + mock_insecure, mock_secure, mock_node_with_address_book +): """Test channel creation for secure connection.""" node = mock_node_with_address_book node._address = node._address._to_secure() # Ensure TLS is enabled - - with patch.object(node, "_fetch_server_certificate_pem", return_value=b"dummy-cert"): + + with patch.object( + node, "_fetch_server_certificate_pem", return_value=b"dummy-cert" + ): mock_channel = Mock() mock_secure.return_value = mock_channel - + # Skip certificate validation for this test node._verify_certificates = False - + channel = node._get_channel() - + mock_secure.assert_called_once() mock_insecure.assert_not_called() assert channel is not None -@patch('grpc.secure_channel') -@patch('grpc.insecure_channel') -def test_node_get_channel_insecure(mock_insecure, mock_secure, mock_node_without_address_book): +@patch("grpc.secure_channel") +@patch("grpc.insecure_channel") +def test_node_get_channel_insecure( + mock_insecure, mock_secure, mock_node_without_address_book +): """Test channel creation for insecure connection.""" node = mock_node_without_address_book - + mock_channel = Mock() mock_insecure.return_value = mock_channel - + channel = node._get_channel() - + mock_insecure.assert_called_once() mock_secure.assert_not_called() assert channel is not None -@patch('grpc.secure_channel') -@patch('grpc.insecure_channel') -def test_node_get_channel_reuses_existing(mock_insecure, mock_secure, mock_node_with_address_book): +@patch("grpc.secure_channel") +@patch("grpc.insecure_channel") +def test_node_get_channel_reuses_existing( + mock_insecure, mock_secure, mock_node_with_address_book +): """Test that channel is reused when already created.""" node = mock_node_with_address_book node._verify_certificates = False - - with patch.object(node, "_fetch_server_certificate_pem", return_value=b"dummy-cert"): + + with patch.object( + node, "_fetch_server_certificate_pem", return_value=b"dummy-cert" + ): mock_channel = Mock() mock_secure.return_value = mock_channel @@ -300,7 +321,7 @@ def test_node_set_root_certificates(mock_node_with_address_book): """Test setting root certificates on node.""" node = mock_node_with_address_book custom_certs = b"custom_root_certs" - + node._set_root_certificates(custom_certs) assert node._root_certificates == custom_certs @@ -309,29 +330,37 @@ def test_node_set_root_certificates_closes_channel(mock_node_with_address_book): """Test that setting root certificates closes existing channel.""" node = mock_node_with_address_book node._verify_certificates = False - - with patch('grpc.secure_channel') as mock_secure, patch.object(node, "_fetch_server_certificate_pem", return_value=b"dummy-cert"): - + + with ( + patch("grpc.secure_channel") as mock_secure, + patch.object(node, "_fetch_server_certificate_pem", return_value=b"dummy-cert"), + ): + mock_channel = Mock() mock_secure.return_value = mock_channel node._get_channel() assert node._channel is not None - + node._set_root_certificates(b"certs") # Channel should be closed to force recreation assert node._channel is None -def test_secure_connect_raise_error_if_no_certificate_is_available(mock_node_without_address_book): + +def test_secure_connect_raise_error_if_no_certificate_is_available( + mock_node_without_address_book, +): """Test get channel raise error if no certificate available if transport security true.""" node = mock_node_without_address_book node._apply_transport_security(True) - + with pytest.raises(ValueError, match="No certificate available."): node._get_channel() @patch("grpc.secure_channel") -def test_node_get_channel_with_root_certificates(mock_secure, mock_node_with_address_book): +def test_node_get_channel_with_root_certificates( + mock_secure, mock_node_with_address_book +): """Test secure channel uses provided root certificates.""" node = mock_node_with_address_book node._address = node._address._to_secure() @@ -355,6 +384,7 @@ def test_node_get_channel_with_root_certificates(mock_secure, mock_node_with_add mock_fetch.assert_not_called() assert channel is not None + @pytest.mark.parametrize( "cert_hash, expected", [ @@ -363,7 +393,7 @@ def test_node_get_channel_with_root_certificates(mock_secure, mock_node_with_add (b"0xABCDEF1234", "abcdef1234"), (b" AbCdEf ", "abcdef"), (b"abcdef123456", "abcdef123456"), - (b"\xff\xfe\xfd\xfc", "fffefdfc") + (b"\xff\xfe\xfd\xfc", "fffefdfc"), ], ) def test_normalize_cert_hash(cert_hash, expected): @@ -371,6 +401,7 @@ def test_normalize_cert_hash(cert_hash, expected): result = _Node._normalize_cert_hash(cert_hash) assert result == expected + def test_validate_tls_skipped_when_not_secure(mock_node_with_address_book): """Test skip validate_certificate when insrcure connection is use""" node = mock_node_with_address_book @@ -381,6 +412,7 @@ def test_validate_tls_skipped_when_not_secure(mock_node_with_address_book): # Should return early and NOT raise node._validate_tls_certificate_with_trust_manager() + @patch("socket.create_connection") @patch("ssl.create_default_context") def test_fetch_server_certificate_legacy_tls_path(mock_ssl_context, mock_socket): @@ -405,4 +437,4 @@ def test_fetch_server_certificate_legacy_tls_path(mock_ssl_context, mock_socket) # Assert legacy flags applied assert mock_context.options & ssl.OP_NO_TLSv1 - assert mock_context.options & ssl.OP_NO_TLSv1_1 \ No newline at end of file + assert mock_context.options & ssl.OP_NO_TLSv1_1 diff --git a/tests/unit/node_update_transaction_test.py b/tests/unit/node_update_transaction_test.py index d47be2ff9..d6b092dd1 100644 --- a/tests/unit/node_update_transaction_test.py +++ b/tests/unit/node_update_transaction_test.py @@ -109,10 +109,15 @@ def test_build_transaction_body(mock_account_ids, node_params): ) assert len(node_update.service_endpoint) == 1 assert ( - node_update.service_endpoint[0] == node_params["service_endpoints"][0]._to_proto() + node_update.service_endpoint[0] + == node_params["service_endpoints"][0]._to_proto() + ) + assert ( + node_update.gossip_ca_certificate.value == node_params["gossip_ca_certificate"] + ) + assert ( + node_update.grpc_certificate_hash.value == node_params["grpc_certificate_hash"] ) - assert node_update.gossip_ca_certificate.value == node_params["gossip_ca_certificate"] - assert node_update.grpc_certificate_hash.value == node_params["grpc_certificate_hash"] assert node_update.admin_key == node_params["admin_key"]._to_proto() assert node_update.decline_reward.value == node_params["decline_reward"] assert ( @@ -144,9 +149,16 @@ def test_build_scheduled_body(node_params): node_update.gossip_endpoint[0] == node_params["gossip_endpoints"][0]._to_proto() ) assert len(node_update.service_endpoint) == 1 - assert node_update.service_endpoint[0] == node_params["service_endpoints"][0]._to_proto() - assert node_update.gossip_ca_certificate.value == node_params["gossip_ca_certificate"] - assert node_update.grpc_certificate_hash.value == node_params["grpc_certificate_hash"] + assert ( + node_update.service_endpoint[0] + == node_params["service_endpoints"][0]._to_proto() + ) + assert ( + node_update.gossip_ca_certificate.value == node_params["gossip_ca_certificate"] + ) + assert ( + node_update.grpc_certificate_hash.value == node_params["grpc_certificate_hash"] + ) assert node_update.admin_key == node_params["admin_key"]._to_proto() assert node_update.decline_reward.value == node_params["decline_reward"] assert ( diff --git a/tests/unit/prng_transaction_test.py b/tests/unit/prng_transaction_test.py index ced8e1e3d..cd16f0691 100644 --- a/tests/unit/prng_transaction_test.py +++ b/tests/unit/prng_transaction_test.py @@ -162,4 +162,6 @@ def test_prng_transaction_can_execute(): receipt = transaction.execute(client) - assert receipt.status == ResponseCode.SUCCESS, "Transaction should have succeeded" + assert ( + receipt.status == ResponseCode.SUCCESS + ), "Transaction should have succeeded" diff --git a/tests/unit/query_nodes_test.py b/tests/unit/query_nodes_test.py index 6696be9a7..8c3465ab0 100644 --- a/tests/unit/query_nodes_test.py +++ b/tests/unit/query_nodes_test.py @@ -2,6 +2,7 @@ from hiero_sdk_python.query.query import Query from hiero_sdk_python.account.account_id import AccountId + def test_set_single_node_account_id(): q = Query() node = AccountId(0, 0, 3) @@ -11,6 +12,7 @@ def test_set_single_node_account_id(): assert q.node_account_ids == [node] assert q._used_node_account_id is None # not selected until execution + def test_set_multiple_node_account_ids(): q = Query() nodes = [AccountId(0, 0, 3), AccountId(0, 0, 4)] @@ -20,6 +22,7 @@ def test_set_multiple_node_account_ids(): assert q.node_account_ids == nodes assert q._used_node_account_id is None + def test_select_node_account_id(): q = Query() nodes = [AccountId(0, 0, 3), AccountId(0, 0, 4)] diff --git a/tests/unit/query_test.py b/tests/unit/query_test.py index 695782866..3f8ab911e 100644 --- a/tests/unit/query_test.py +++ b/tests/unit/query_test.py @@ -9,23 +9,32 @@ from hiero_sdk_python.query.token_info_query import TokenInfoQuery from hiero_sdk_python.response_code import ResponseCode from hiero_sdk_python.executable import _ExecutionState -from hiero_sdk_python.hapi.services import query_header_pb2, response_pb2, response_header_pb2, crypto_get_account_balance_pb2, token_get_info_pb2 +from hiero_sdk_python.hapi.services import ( + query_header_pb2, + response_pb2, + response_header_pb2, + crypto_get_account_balance_pb2, + token_get_info_pb2, +) from tests.unit.mock_server import mock_hedera_servers pytestmark = pytest.mark.unit + # By default we test query that doesn't require payment @pytest.fixture def query(): """Fixture for a query that doesn't require payment""" return CryptoGetAccountBalanceQuery() + @pytest.fixture def query_requires_payment(): """Fixture for a query that requires payment""" query = TokenInfoQuery() return query + def test_query_initialization(query): """Test Query initialization with default values""" assert isinstance(query.timestamp, int) @@ -34,25 +43,28 @@ def test_query_initialization(query): assert query.node_index == 0 assert query.payment_amount is None + def test_set_query_payment(query): """Test setting custom query payment""" payment = Hbar(2) result = query.set_query_payment(payment) - + assert result == query assert query.payment_amount == payment - + + def test_before_execute_payment_not_required(query, mock_client): """Test _before_execute method setup for query that doesn't require payment""" # payment_amount is None, should not set payment_amount query._before_execute(mock_client) - + # since node_account_ids is not set it will be empty # query internally use node form client assert query.node_account_ids == [] assert query.operator == mock_client.operator assert query.payment_amount is None + def test_before_execute_payment_required(query_requires_payment, mock_client): """Test _before_execute method setup for query that requires payment""" # get_cost() should return Hbar(2) @@ -60,7 +72,7 @@ def test_before_execute_payment_required(query_requires_payment, mock_client): mock_get_cost.return_value = Hbar(2) query_requires_payment.get_cost = mock_get_cost query_requires_payment.set_max_query_payment(Hbar(3)) - + # payment_amount is None, should set payment_amount to 2 Hbars query_requires_payment._before_execute(mock_client) @@ -74,89 +86,119 @@ def test_before_execute_payment_required(query_requires_payment, mock_client): mock_get_cost.return_value = Hbar(1) query_requires_payment.get_cost = mock_get_cost query_requires_payment._before_execute(mock_client) - + assert query_requires_payment.payment_amount.to_tinybars() == Hbar(2).to_tinybars() + def test_request_header_no_fields_set(query): """Test combinations with no fields set""" header = query._make_request_header() - assert not header.HasField('payment'), "Payment field should not be present when no fields are set" - + assert not header.HasField( + "payment" + ), "Payment field should not be present when no fields are set" + + def test_request_header_payment_set(query, mock_client): """Test combinations with payment set""" # Test with only query payment set query.payment_amount = Hbar(1) header = query._make_request_header() - assert not header.HasField('payment'), "Payment field should not be present when only query payment is set" - + assert not header.HasField( + "payment" + ), "Payment field should not be present when only query payment is set" + # Test with query payment and operator set query.operator = mock_client.operator header = query._make_request_header() - assert not header.HasField('payment'), "Payment field should not be present when only operator and payment are set" + assert not header.HasField( + "payment" + ), "Payment field should not be present when only operator and payment are set" + def test_request_header_node_account_set(query, mock_client): """Test combinations with node account set""" # Test with just node account set query.node_account_id = mock_client.network.current_node._account_id - + header = query._make_request_header() - assert not header.HasField('payment'), "Payment field should not be present when only node account is set" + assert not header.HasField( + "payment" + ), "Payment field should not be present when only node account is set" # Test with node account and query payment set query.payment_amount = Hbar(1) header = query._make_request_header() - assert not header.HasField('payment'), "Payment field should not be present when only node account and payment are set" + assert not header.HasField( + "payment" + ), "Payment field should not be present when only node account and payment are set" + def test_request_header_operator_set(query, mock_client): """Test combinations with operator set""" # Test with just operator set query.operator = mock_client.operator - + header = query._make_request_header() - assert not header.HasField('payment'), "Payment field should not be present when only operator is set" + assert not header.HasField( + "payment" + ), "Payment field should not be present when only operator is set" # Test with operator and node account set query.node_account_id = mock_client.network.current_node._account_id - + header = query._make_request_header() - assert not header.HasField('payment'), "Payment field should not be present when only operator and node account are set" + assert not header.HasField( + "payment" + ), "Payment field should not be present when only operator and node account are set" + def test_request_header_payment_zero(query, mock_client): """Test that payment field is not present in request header when payment amount is 0""" # Set up operator and node account ID from mock client query.operator = mock_client.operator query.node_account_id = mock_client.network.current_node._account_id - + # Test with payment amount set to 0 Hbar query.payment_amount = Hbar(0) header = query._make_request_header() - assert not header.HasField('payment'), "Payment field should not be present when payment is set to 0" + assert not header.HasField( + "payment" + ), "Payment field should not be present when payment is set to 0" + def test_make_request_header_with_payment(query_requires_payment, mock_client): """Test making request header with payment transaction for queries that require payment""" query_requires_payment.operator = mock_client.operator - query_requires_payment.node_account_id = mock_client.network.current_node._account_id + query_requires_payment.node_account_id = ( + mock_client.network.current_node._account_id + ) query_requires_payment.set_query_payment(Hbar(1)) - + header = query_requires_payment._make_request_header() - + assert isinstance(header, query_header_pb2.QueryHeader) assert header.responseType == query_header_pb2.ResponseType.ANSWER_ONLY - assert header.HasField('payment'), "Payment field should be present when payment is set for queries that require payment" - + assert header.HasField( + "payment" + ), "Payment field should be present when payment is set for queries that require payment" + + def test_request_header_excludes_payment_for_free_query(query, mock_client): """Test that payment is not included in request header for queries that don't require payment""" query.operator = mock_client.operator query.node_account_id = mock_client.network.current_node._account_id # Set query payment to 1 Hbar query.set_query_payment(Hbar(1)) - + # Get header and verify payment was not included header = query._make_request_header() - + assert isinstance(header, query_header_pb2.QueryHeader) assert header.responseType == query_header_pb2.ResponseType.ANSWER_ONLY - assert not header.HasField('payment'), "Payment field should not be present for queries that don't require payment" + assert not header.HasField( + "payment" + ), "Payment field should not be present for queries that don't require payment" + def test_should_retry_retryable_statuses(query): """Test that retryable status codes trigger retry""" @@ -164,21 +206,22 @@ def test_should_retry_retryable_statuses(query): retryable_statuses = [ ResponseCode.PLATFORM_TRANSACTION_NOT_CREATED, ResponseCode.PLATFORM_NOT_ACTIVE, - ResponseCode.BUSY + ResponseCode.BUSY, ] - + for status in retryable_statuses: response = response_pb2.Response( - cryptogetAccountBalance=crypto_get_account_balance_pb2.CryptoGetAccountBalanceResponse( - header=response_header_pb2.ResponseHeader( - nodeTransactionPrecheckCode=status + cryptogetAccountBalance=crypto_get_account_balance_pb2.CryptoGetAccountBalanceResponse( + header=response_header_pb2.ResponseHeader( + nodeTransactionPrecheckCode=status + ) ) ) - ) - + result = query._should_retry(response) assert result == _ExecutionState.RETRY, f"Status {status} should trigger retry" + def test_should_retry_ok_status(query): """Test that OK status finishes execution""" response = response_pb2.Response( @@ -188,10 +231,11 @@ def test_should_retry_ok_status(query): ) ) ) - + result = query._should_retry(response) assert result == _ExecutionState.FINISHED + def test_should_retry_error_status(query): """Test that non-retryable error status triggers error state""" response = response_pb2.Response( @@ -201,10 +245,11 @@ def test_should_retry_error_status(query): ) ) ) - + result = query._should_retry(response) assert result == _ExecutionState.ERROR - + + def test_get_cost_when_payment_not_required(query, mock_client): """Test get_cost when payment is not required and is set or not set""" # Test without payment_amount @@ -216,35 +261,38 @@ def test_get_cost_when_payment_not_required(query, mock_client): result = query.get_cost(mock_client) assert result.to_tinybars() == Hbar(0).to_tinybars() + def test_get_cost_when_payment_required_and_set(query_requires_payment, mock_client): """Test get_cost when payment is required and set""" query_requires_payment.set_query_payment(Hbar(2)) result = query_requires_payment.get_cost(mock_client) assert result.to_tinybars() == Hbar(2).to_tinybars() - + + def test_get_cost_when_payment_required_and_not_set(query_requires_payment, token_id): """Test get_cost when payment is required and not set""" - + # Create mock response containing cost information (2 tinybars) for token info query response = response_pb2.Response( tokenGetInfo=token_get_info_pb2.TokenGetInfoResponse( header=response_header_pb2.ResponseHeader( nodeTransactionPrecheckCode=ResponseCode.OK, responseType=query_header_pb2.ResponseType.COST_ANSWER, - cost=2 + cost=2, ) ) ) - + response_sequences = [[response]] - + with mock_hedera_servers(response_sequences) as client: # Need to set token_id before getting cost, otherwise will fail query_requires_payment.set_token_id(token_id) result = query_requires_payment.get_cost(client) # Verify cost matches expected value of 2 tinybars assert result.to_tinybars() == 2 - + + def test_query_payment_requirement_defaults_to_true(query_requires_payment): """Test that the base Query class and payment-requiring queries default to requiring payment.""" query = Query() @@ -252,15 +300,16 @@ def test_query_payment_requirement_defaults_to_true(query_requires_payment): # Verify that payment-requiring query also defaults to requiring payment assert query_requires_payment._is_payment_required() == True + @pytest.mark.parametrize( - 'valid_amount,expected', + "valid_amount,expected", [ (1, Hbar(1)), (0.1, Hbar(0.1)), - (Decimal('0.1'), Hbar(Decimal('0.1'))), + (Decimal("0.1"), Hbar(Decimal("0.1"))), (Hbar(1), Hbar(1)), - (Hbar(0), Hbar(0)) - ] + (Hbar(0), Hbar(0)), + ], ) def test_set_max_query_payment_valid_param(query, valid_amount, expected): """Test that set_max_query_payment correctly converts various input types to Hbar.""" @@ -269,37 +318,39 @@ def test_set_max_query_payment_valid_param(query, valid_amount, expected): query.set_max_query_payment(valid_amount) assert query.max_query_payment == expected + @pytest.mark.parametrize( - 'negative_amount', - [-1, -0.1, Decimal('-0.1'), Decimal('-1'), Hbar(-1), Hbar(-0.2)] + "negative_amount", [-1, -0.1, Decimal("-0.1"), Decimal("-1"), Hbar(-1), Hbar(-0.2)] ) def test_set_max_query_payment_negative_value(query, negative_amount): """Test set_max_query_payment for negative amount values.""" with pytest.raises(ValueError, match="max_query_payment must be non-negative"): query.set_max_query_payment(negative_amount) -@pytest.mark.parametrize( - 'invalid_amount', - ['1', 'abc', True, False, None, object()] -) + +@pytest.mark.parametrize("invalid_amount", ["1", "abc", True, False, None, object()]) def test_set_max_query_payment_invalid_param(query, invalid_amount): """Test that set_max_query_payment raise error for invalid param.""" - with pytest.raises(TypeError, match=( - "max_query_payment must be int, float, Decimal, or Hbar, " - f"got {type(invalid_amount).__name__}" - )): + with pytest.raises( + TypeError, + match=( + "max_query_payment must be int, float, Decimal, or Hbar, " + f"got {type(invalid_amount).__name__}" + ), + ): query.set_max_query_payment(invalid_amount) -@pytest.mark.parametrize( - 'invalid_amount', - [float('inf'), float('nan')] -) + +@pytest.mark.parametrize("invalid_amount", [float("inf"), float("nan")]) def test_set_max_query_payment_non_finite_value(query, invalid_amount): """Test that set_max_query_payment raise error for non finite value.""" with pytest.raises(ValueError, match="Hbar amount must be finite"): query.set_max_query_payment(invalid_amount) -def test_set_max_payment_override_client_max_payment(query_requires_payment, mock_client): + +def test_set_max_payment_override_client_max_payment( + query_requires_payment, mock_client +): """ Test that a query can override the Client's default_max_query_payment """ @@ -320,12 +371,15 @@ def test_set_max_payment_override_client_max_payment(query_requires_payment, moc assert query_requires_payment.payment_amount == Hbar(2) -def test_set_max_payment_override_client_max_payment_and_error(query_requires_payment, mock_client): + +def test_set_max_payment_override_client_max_payment_and_error( + query_requires_payment, mock_client +): """ Test that a query can override the Client's default_max_query_payment and that execution fails if the query cost exceeds the query-specific max. """ - # Check Client's default max_query_payment + # Check Client's default max_query_payment assert mock_client.default_max_query_payment == Hbar(1) # Update Client's default max_query_payment to 2 Hbar @@ -339,7 +393,7 @@ def test_set_max_payment_override_client_max_payment_and_error(query_requires_pa # Mock get_cost to return 2 Hbar, exceeding the query.max_query_payment mock_get_cost = MagicMock(return_value=Hbar(2)) query_requires_payment.get_cost = mock_get_cost - + # Execution should raise ValueError because 2 > query.max_query_payment (1) expected_msg = "Query cost ℏ2.0 HBAR exceeds max set query payment: ℏ1.0 HBAR" with pytest.raises(ValueError, match=re.escape(expected_msg)): @@ -367,7 +421,10 @@ def test_payment_query_use_client_max_payment(query_requires_payment, mock_clien assert query_requires_payment.payment_amount == Hbar(2) -def test_payment_query_use_client_max_payment_and_error(query_requires_payment, mock_client): + +def test_payment_query_use_client_max_payment_and_error( + query_requires_payment, mock_client +): """ Test that execution fails if cost > client default max when query doesn't override. """ @@ -382,4 +439,3 @@ def test_payment_query_use_client_max_payment_and_error(query_requires_payment, expected_msg = "Query cost ℏ2.0 HBAR exceeds max set query payment: ℏ1.0 HBAR" with pytest.raises(ValueError, match=re.escape(expected_msg)): query_requires_payment._before_execute(mock_client) - diff --git a/tests/unit/staking_info_test.py b/tests/unit/staking_info_test.py new file mode 100644 index 000000000..02506d014 --- /dev/null +++ b/tests/unit/staking_info_test.py @@ -0,0 +1,302 @@ +"""Tests for the StakingInfo class.""" + +import pytest +from dataclasses import FrozenInstanceError + +from hiero_sdk_python.account.account_id import AccountId +from hiero_sdk_python.hapi.services.basic_types_pb2 import ( + StakingInfo as StakingInfoProto, +) +from hiero_sdk_python.hbar import Hbar +from hiero_sdk_python.staking_info import StakingInfo +from hiero_sdk_python.timestamp import Timestamp + +pytestmark = pytest.mark.unit + + +@pytest.fixture(name="staking_info_with_account") +def fixture_staking_info_with_account(): + """Return a StakingInfo instance staked to an account.""" + return StakingInfo( + decline_reward=True, + stake_period_start=Timestamp(100, 200), + pending_reward=Hbar.from_tinybars(1234), + staked_to_me=Hbar.from_tinybars(5678), + staked_account_id=AccountId(0, 0, 123), + ) + + +@pytest.fixture(name="staking_info_with_node") +def fixture_staking_info_with_node(): + """Return a StakingInfo instance staked to a node.""" + return StakingInfo( + decline_reward=False, + stake_period_start=Timestamp(300, 400), + pending_reward=Hbar.from_tinybars(2222), + staked_to_me=Hbar.from_tinybars(4444), + staked_node_id=3, + ) + + +@pytest.fixture(name="proto_staking_info_with_account") +def fixture_proto_staking_info_with_account(): + """Return a StakingInfo protobuf staked to an account.""" + return StakingInfoProto( + decline_reward=True, + stake_period_start=Timestamp(100, 200)._to_protobuf(), + pending_reward=1234, + staked_to_me=5678, + staked_account_id=AccountId(0, 0, 123)._to_proto(), + ) + + +@pytest.fixture(name="proto_staking_info_with_node") +def fixture_proto_staking_info_with_node(): + """Return a StakingInfo protobuf staked to a node.""" + return StakingInfoProto( + decline_reward=False, + stake_period_start=Timestamp(300, 400)._to_protobuf(), + pending_reward=2222, + staked_to_me=4444, + staked_node_id=3, + ) + + +def test_default_initialization(): + """Verify defaults for an empty StakingInfo.""" + staking_info = StakingInfo() + + assert staking_info.decline_reward is None + assert staking_info.stake_period_start is None + assert staking_info.pending_reward is None + assert staking_info.staked_to_me is None + assert staking_info.staked_account_id is None + assert staking_info.staked_node_id is None + + +def test_frozen_instance_is_immutable(): + """Ensure dataclass is frozen and rejects mutation.""" + staking_info = StakingInfo() + with pytest.raises(FrozenInstanceError): + staking_info.decline_reward = True + + +def test_initialization_with_account(staking_info_with_account): + """Validate field values when staked to an account.""" + staking_info = staking_info_with_account + + assert staking_info.decline_reward is True + assert staking_info.stake_period_start == Timestamp(100, 200) + assert staking_info.pending_reward == Hbar.from_tinybars(1234) + assert staking_info.staked_to_me == Hbar.from_tinybars(5678) + assert str(staking_info.staked_account_id) == "0.0.123" + assert staking_info.staked_node_id is None + + +def test_initialization_with_node(staking_info_with_node): + """Validate field values when staked to a node.""" + staking_info = staking_info_with_node + + assert staking_info.decline_reward is False + assert staking_info.stake_period_start == Timestamp(300, 400) + assert staking_info.pending_reward == Hbar.from_tinybars(2222) + assert staking_info.staked_to_me == Hbar.from_tinybars(4444) + assert staking_info.staked_account_id is None + assert staking_info.staked_node_id == 3 + + +def test_oneof_validation_raises(): + """Reject setting both staked_account_id and staked_node_id.""" + with pytest.raises( + ValueError, + match=r"Only one of staked_account_id or staked_node_id can be set\.", + ): + StakingInfo( + staked_account_id=AccountId(0, 0, 123), + staked_node_id=3, + ) + + +def test_from_proto_with_account(proto_staking_info_with_account): + """Build StakingInfo from a proto with an account target.""" + staking_info = StakingInfo._from_proto(proto_staking_info_with_account) + + assert staking_info.decline_reward is True + assert staking_info.stake_period_start == Timestamp(100, 200) + assert staking_info.pending_reward == Hbar.from_tinybars(1234) + assert staking_info.staked_to_me == Hbar.from_tinybars(5678) + assert str(staking_info.staked_account_id) == "0.0.123" + assert staking_info.staked_node_id is None + + +def test_from_proto_with_node(proto_staking_info_with_node): + """Build StakingInfo from a proto with a node target.""" + staking_info = StakingInfo._from_proto(proto_staking_info_with_node) + + assert staking_info.decline_reward is False + assert staking_info.stake_period_start == Timestamp(300, 400) + assert staking_info.pending_reward == Hbar.from_tinybars(2222) + assert staking_info.staked_to_me == Hbar.from_tinybars(4444) + assert staking_info.staked_account_id is None + assert staking_info.staked_node_id == 3 + + +def test_from_proto_none_raises(): + """Reject None inputs when building from proto.""" + with pytest.raises(ValueError, match=r"Staking info proto is None"): + StakingInfo._from_proto(None) + + +def test_to_proto_with_account(staking_info_with_account): + """Serialize to proto when staked to an account.""" + proto = staking_info_with_account._to_proto() + + assert proto.decline_reward is True + assert proto.HasField("stake_period_start") + assert proto.stake_period_start == Timestamp(100, 200)._to_protobuf() + assert proto.pending_reward == 1234 + assert proto.staked_to_me == 5678 + assert proto.HasField("staked_account_id") + assert proto.staked_account_id == AccountId(0, 0, 123)._to_proto() + assert not proto.HasField("staked_node_id") + + +def test_to_proto_with_node(staking_info_with_node): + """Serialize to proto when staked to a node.""" + proto = staking_info_with_node._to_proto() + + assert proto.decline_reward is False + assert proto.HasField("stake_period_start") + assert proto.stake_period_start == Timestamp(300, 400)._to_protobuf() + assert proto.pending_reward == 2222 + assert proto.staked_to_me == 4444 + assert not proto.HasField("staked_account_id") + assert proto.HasField("staked_node_id") + assert proto.staked_node_id == 3 + + +def test_proto_round_trip_with_account(staking_info_with_account): + """Round-trip proto serialization with an account target.""" + restored = StakingInfo._from_proto(staking_info_with_account._to_proto()) + + assert restored.decline_reward == staking_info_with_account.decline_reward + assert restored.stake_period_start == staking_info_with_account.stake_period_start + assert restored.pending_reward == staking_info_with_account.pending_reward + assert restored.staked_to_me == staking_info_with_account.staked_to_me + assert str(restored.staked_account_id) == str( + staking_info_with_account.staked_account_id + ) + assert restored.staked_node_id is None + + +def test_proto_round_trip_with_node(staking_info_with_node): + """Round-trip proto serialization with a node target.""" + restored = StakingInfo._from_proto(staking_info_with_node._to_proto()) + + assert restored.decline_reward == staking_info_with_node.decline_reward + assert restored.stake_period_start == staking_info_with_node.stake_period_start + assert restored.pending_reward == staking_info_with_node.pending_reward + assert restored.staked_to_me == staking_info_with_node.staked_to_me + assert restored.staked_account_id is None + assert restored.staked_node_id == staking_info_with_node.staked_node_id + + +def test_from_bytes_deserializes(staking_info_with_account): + """Deserialize from bytes into an equivalent StakingInfo.""" + data = staking_info_with_account.to_bytes() + restored = StakingInfo.from_bytes(data) + + assert restored.decline_reward == staking_info_with_account.decline_reward + assert restored.stake_period_start == staking_info_with_account.stake_period_start + assert restored.pending_reward == staking_info_with_account.pending_reward + assert restored.staked_to_me == staking_info_with_account.staked_to_me + assert str(restored.staked_account_id) == str( + staking_info_with_account.staked_account_id + ) + assert restored.staked_node_id is None + + +def test_from_bytes_empty_raises(): + """Reject empty byte payloads.""" + with pytest.raises(ValueError, match=r"data cannot be empty"): + StakingInfo.from_bytes(b"") + + +def test_from_bytes_with_string_raises(): + """Reject non-bytes payloads of type str.""" + with pytest.raises(TypeError, match=r"data must be bytes"): + StakingInfo.from_bytes("Hi from Anto :D") + + +def test_from_bytes_with_int_raises(): + """Reject non-bytes payloads of type int.""" + with pytest.raises(TypeError, match=r"data must be bytes"): + StakingInfo.from_bytes(123) + + +def test_from_bytes_invalid_bytes_raises(): + """Reject malformed byte payloads.""" + with pytest.raises(ValueError, match=r"Failed to parse StakingInfo bytes"): + StakingInfo.from_bytes(b"\xff\xff\xff") + + +def test_to_bytes_produces_non_empty_bytes(staking_info_with_node): + """Ensure serialization yields a non-empty bytes payload.""" + data = staking_info_with_node.to_bytes() + + assert isinstance(data, bytes) + assert len(data) > 0 + + +def test_bytes_round_trip_with_node(staking_info_with_node): + """Round-trip byte serialization with a node target.""" + data = staking_info_with_node.to_bytes() + restored = StakingInfo.from_bytes(data) + + assert restored.decline_reward == staking_info_with_node.decline_reward + assert restored.stake_period_start == staking_info_with_node.stake_period_start + assert restored.pending_reward == staking_info_with_node.pending_reward + assert restored.staked_to_me == staking_info_with_node.staked_to_me + assert restored.staked_account_id is None + assert restored.staked_node_id == staking_info_with_node.staked_node_id + + +def test_str_output_format(staking_info_with_account): + """Check human-readable string formatting.""" + expected = ( + "StakingInfo(\n" + " decline_reward=True,\n" + " stake_period_start=100.000000200,\n" + " pending_reward=0.00001234 \u210f,\n" + " staked_to_me=0.00005678 \u210f,\n" + " staked_account_id=0.0.123,\n" + " staked_node_id=None\n" + ")" + ) + + assert str(staking_info_with_account) == expected + + +def test_repr_contains_class_name_and_fields(staking_info_with_node): + """Ensure repr includes key fields for debugging.""" + rep = repr(staking_info_with_node) + + assert "StakingInfo(" in rep + assert "decline_reward=False" in rep + assert "stake_period_start=" in rep + assert "pending_reward=Hbar(0.00002222)" in rep + assert "staked_to_me=Hbar(0.00004444)" in rep + assert "staked_node_id=3" in rep + + +def test_proto_round_trip_default(): + """Round-trip proto serialization for default values.""" + default_info = StakingInfo() + restored = StakingInfo._from_proto(default_info._to_proto()) + + assert restored.decline_reward is False # proto3 scalar default + assert restored.stake_period_start is None + assert restored.pending_reward == Hbar.from_tinybars(0) # proto3 scalar default + assert restored.staked_to_me == Hbar.from_tinybars(0) # proto3 scalar default + assert restored.staked_account_id is None + assert restored.staked_node_id is None diff --git a/tests/unit/token_create_transaction_test.py b/tests/unit/token_create_transaction_test.py index 45935b38c..055ea600f 100644 --- a/tests/unit/token_create_transaction_test.py +++ b/tests/unit/token_create_transaction_test.py @@ -562,8 +562,8 @@ def test_transaction_execution_failure(mock_account_ids): # Attempt to execute - this should raise the mocked PrecheckError token_tx.execute(token_tx.client) - # Verify _execute was called with client - mock_execute.assert_called_once_with(token_tx.client) + # Verify _execute was called with client and timeout as None + mock_execute.assert_called_once_with(token_tx.client, None) # This test uses fixture (mock_account_ids, mock_client) as parameter def test_overwrite_defaults(mock_account_ids, mock_client): diff --git a/tests/unit/topic_id_test.py b/tests/unit/topic_id_test.py index fb26c9ec0..7f453a364 100644 --- a/tests/unit/topic_id_test.py +++ b/tests/unit/topic_id_test.py @@ -123,3 +123,8 @@ def test_validate_checksum_failure(client): with pytest.raises(ValueError): topic_id.validate_checksum(client) + +def test_topic_id_repr(): + """Test that __repr__ returns the expected format.""" + topic_id = TopicId(0, 0, 42) + assert repr(topic_id) == "TopicId(shard=0, realm=0, num=42)" diff --git a/tests/unit/topic_message_submit_transaction_test.py b/tests/unit/topic_message_submit_transaction_test.py index 1d8e643a2..54705555f 100644 --- a/tests/unit/topic_message_submit_transaction_test.py +++ b/tests/unit/topic_message_submit_transaction_test.py @@ -221,11 +221,11 @@ def test_execute_topic_message_submit_transaction(topic_id, message): # This test uses fixture topic_id as parameter def test_topic_message_submit_transaction_with_large_message(topic_id): - """Test sending a large message (close to the maximum allowed size).""" + """Test sending a large message (multi-chunk, same node).""" # Create a large message (just under the typical 4KB limit) large_message = "A" * 4000 - - # Create success responses + + # Create a single node response sequence for all chunks tx_response = transaction_response_pb2.TransactionResponse( nodeTransactionPrecheckCode=ResponseCode.OK ) @@ -240,27 +240,23 @@ def test_topic_message_submit_transaction_with_large_message(topic_id): ) ) ) - - response_sequences = [ - [tx_response, receipt_response], # chunk 1 - [tx_response, receipt_response], # chunk 2 - [tx_response, receipt_response], # chunk 3 - [tx_response, receipt_response], # chunk 4 - ] - - with mock_hedera_servers(response_sequences) as client: + # For simplicity, assume 4 chunks are required + # All chunks go to the same node, so repeat the same responses for that node + response_sequence = [tx_response, receipt_response] * 4 # 4 chunks + + with mock_hedera_servers([response_sequence]) as client: tx = ( TopicMessageSubmitTransaction() .set_topic_id(topic_id) .set_message(large_message) .freeze_with(client) ) - + try: receipt = tx.execute(client) except Exception as e: pytest.fail(f"Should not raise exception, but raised: {e}") - + # Verify the receipt contains the expected values assert receipt.status == ResponseCode.SUCCESS diff --git a/tests/unit/transaction_freeze_and_bytes_test.py b/tests/unit/transaction_freeze_and_bytes_test.py index 1e3cd272f..e629626e5 100644 --- a/tests/unit/transaction_freeze_and_bytes_test.py +++ b/tests/unit/transaction_freeze_and_bytes_test.py @@ -12,6 +12,11 @@ from hiero_sdk_python.transaction.transfer_transaction import TransferTransaction from hiero_sdk_python.transaction.transaction_id import TransactionId +from hiero_sdk_python.hapi.services.transaction_response_pb2 import ( + TransactionResponse as TransactionResponseProto, +) + + pytestmark = pytest.mark.unit @@ -611,3 +616,92 @@ def test_unsigned_transaction_can_be_signed_after_to_bytes(): assert unsigned_bytes != signed_bytes assert isinstance(signed_bytes, bytes) + +def test_transaction_freeze_with_node_ids(mock_client): + """ + Test freeze_with() correctly initializes transaction bytes using provided node_account_id(s). + """ + # Case 1 Single node_account_id + single_node_id = AccountId(0,0,3) + tx = TransferTransaction() + tx.node_account_id = single_node_id + + tx.freeze_with(mock_client) + + assert tx.node_account_ids == [single_node_id] + # Verify creates transaction_bytes for single node_id + assert len(tx._transaction_body_bytes) == 1 + assert set(tx._transaction_body_bytes.keys()) == {single_node_id} + + # Case 2 node_account_id list + node_account_ids = [AccountId(0,0,3), AccountId(0,0,4)] + tx = TransferTransaction() + tx.node_account_ids = node_account_ids + + tx.freeze_with(mock_client) + + assert tx.node_account_ids == node_account_ids + # Verify creates transaction_bytes for two node_ids + assert len(tx._transaction_body_bytes) == 2 + assert set(tx._transaction_body_bytes.keys()) == set(node_account_ids) + +def test_transaction_freeze_with_node_ids_without_client(): + """ + Test freeze() correctly initializes transaction bytes using provided node_account_id(s). + """ + operator_id = AccountId.from_string("0.0.1234") + + # Case 1 Single node_account_id + single_node_id = AccountId(0,0,3) + tx = TransferTransaction() + tx.set_transaction_id(TransactionId.generate(operator_id)) + tx.node_account_id = single_node_id + + tx.freeze() + + assert tx.node_account_ids == [single_node_id] + # Verify creates transaction_bytes for single node_id + assert len(tx._transaction_body_bytes) == 1 + assert set(tx._transaction_body_bytes.keys()) == {single_node_id} + + # Case 2 node_account_id list + node_account_ids = [AccountId(0,0,3), AccountId(0,0,4)] + tx = TransferTransaction() + tx.set_transaction_id(TransactionId.generate(operator_id)) + tx.node_account_ids = node_account_ids + + tx.freeze() + + assert tx.node_account_ids == node_account_ids + # Verify creates transaction_bytes for two node_ids + assert len(tx._transaction_body_bytes) == 2 + assert set(tx._transaction_body_bytes.keys()) == set(node_account_ids) + +def test_transaction_freeze_without_node_ids(mock_client): + """ + Test freeze_with() initializes transaction bytes using clients network node id. + """ + tx = TransferTransaction() + tx.freeze_with(mock_client) + + assert tx.node_account_ids == [] + # Verify creates transaction_bytes for client network nodes + assert len(tx._transaction_body_bytes) == len(mock_client.network.nodes) + assert set(tx._transaction_body_bytes.keys()) == set(node._account_id for node in mock_client.network.nodes) + +def test_map_response_raises_if_proto_request_is_not_transaction(): + """ + Test _map_response raises ValueError when provided proto is not a transaction_pb2.Transaction. + """ + tx = TransferTransaction() + + mock_response = TransactionResponseProto() + mock_node_id = None + invalid_proto_request = object() + + with pytest.raises(TypeError, match="Expected Transaction but got"): + tx._map_response( + response=mock_response, + node_id=mock_node_id, + proto_request=invalid_proto_request, + ) From 826d68477313da825a4dac1b7452438b49b539bf Mon Sep 17 00:00:00 2001 From: SubhraSameerDash <2303105_cseai@gita.edu.in> Date: Sun, 8 Feb 2026 08:53:47 +0530 Subject: [PATCH 2/3] feat: add FileId repr Signed-off-by: SubhraSameerDash <2303105_cseai@gita.edu.in> --- CHANGELOG.md | 14 +++----------- 1 file changed, 3 insertions(+), 11 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index b58249ab9..c13ed940f 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -6,8 +6,8 @@ This changelog is based on [Keep a Changelog](https://keepachangelog.com/en/1.1. ## [Unreleased] - ### Tests + - Standardize formatting of `tests/unit/entity_id_helper_test.py` using Black for consistent code style across the test suite (#1527) - Added tests for ProtoBuf Training Example Implementation @@ -29,19 +29,13 @@ This changelog is based on [Keep a Changelog](https://keepachangelog.com/en/1.1. - Format tests/unit/network_tls_test.py with black for code style consistency (#1543) - Formatted `ethereum_transaction_test.py` using Black. - Formatted client_test.py using Black. -- Format tests/unit/query\*.py using black (#1547) -- Format `tests/unit/custom_fee_test.py` with black for code style consistency. (#1525) ### Added - Add constructor-style `__repr__` for `FileId` to improve debugging output (#1628) -- Format tests/unit/query*.py using black (#1547) -- Format `tests/unit/custom_fee_test.py` with black for code style consistency. (#1525) - -### Added - Added logging in bot-gfi-assign-on-comment.js to prevent silent skips. (`#1668`) - Added `AssessedCustomFee` domain model to represent assessed custom fees. (`#1637`) -- Add __repr__ method for ContractId class to improve debugging (#1714) +- Add `__repr__` method for ContractId class to improve debugging (#1714) - Added Protobuf Training guide to enhance developer understanding of proto serialization and deserialization (#1645) - Add `__repr__()` method to `TopicId` class for improved debugging with constructor-style representation (#1629) @@ -157,8 +151,6 @@ This changelog is based on [Keep a Changelog](https://keepachangelog.com/en/1.1. ### Documentation - Type hints to exception classes (`PrecheckError`, `MaxAttemptsError`, `ReceiptStatusError`) constructors and string methods. - -### Documentation - Added comprehensive docstring to `compress_with_cryptography` function (#1626) - Replaced the docstring in `entity_id_helper.py` with one that is correct. (#1623) @@ -429,6 +421,7 @@ This changelog is based on [Keep a Changelog](https://keepachangelog.com/en/1.1. - docs: Add `docs/sdk_developers/project_structure.md` to explain repository layout and import paths. ### Changed + - chore: renamed examples to match src where possible - Moved examples/ to be inside subfiles to match src structure - changed example script workflow to run on new subdirectory structure @@ -872,5 +865,4 @@ contract_call_local_pb2.ContractLoginfo -> contract_types_pb2.ContractLoginfo - N/A - # [0.1.0] - 2025-02-19 From 47db9d6a8bc1c2dd6d108ed2d07e61e0e3161852 Mon Sep 17 00:00:00 2001 From: SubhraSameerDash <2303105_cseai@gita.edu.in> Date: Sun, 8 Feb 2026 09:34:58 +0530 Subject: [PATCH 3/3] fix: resolve merge conflicts in bot-next-issue-recommendation.js Signed-off-by: SubhraSameerDash <2303105_cseai@gita.edu.in> --- .../scripts/bot-next-issue-recommendation.js | 157 +++++++----------- 1 file changed, 60 insertions(+), 97 deletions(-) diff --git a/.github/scripts/bot-next-issue-recommendation.js b/.github/scripts/bot-next-issue-recommendation.js index fd8f37ff2..dba56fb5e 100644 --- a/.github/scripts/bot-next-issue-recommendation.js +++ b/.github/scripts/bot-next-issue-recommendation.js @@ -1,10 +1,3 @@ -module.exports = async ({ github, context, core }) => { - const { payload } = context; - - // Get PR information from automatic pull_request_target trigger - let prNumber = payload.pull_request?.number; - let prBody = payload.pull_request?.body || ''; - const SUPPORTED_GFI_REPOS = [ 'hiero-sdk-cpp', 'hiero-sdk-swift', @@ -23,15 +16,11 @@ module.exports = async ({ github, context, core }) => { // Only automatic triggers from merged PRs will work const repoOwner = context.repo.owner; const repoName = context.repo.repo; - if (!prNumber) { core.info('No PR number found, skipping'); return; } - - core.info(`Processing PR #${prNumber}`); - core.info(`Processing PR #${prNumber}`); @@ -43,20 +32,14 @@ module.exports = async ({ github, context, core }) => { } const issueRegex = /(fixes|closes|resolves|fix|close|resolve)\s+(?:[\w-]+\/[\w-]+)?#(\d+)/gi; const matches = [...prBody.matchAll(issueRegex)]; - if (matches.length === 0) { core.info('No linked issues found in PR body'); return; } - - // Get the first linked issue number - const issueNumber = parseInt(matches[0][2]); - core.info(`Found linked issue #${issueNumber}`); - // Get the first linked issue number - const issueNumber = parseInt(matches[0][2]); + const issueNumber = parseInt(matches[0][2], 10); core.info(`Found linked issue #${issueNumber}`); try { @@ -66,13 +49,11 @@ module.exports = async ({ github, context, core }) => { repo: repoName, issue_number: issueNumber, }); - // Normalize and check issue labels (case-insensitive) const labelNames = issue.labels.map(label => label.name.toLowerCase()); const labelSet = new Set(labelNames); core.info(`Issue labels: ${labelNames.join(', ')}`); - // Determine issue difficulty level const difficultyLevels = { @@ -81,30 +62,23 @@ module.exports = async ({ github, context, core }) => { intermediate: labelSet.has('intermediate'), advanced: labelSet.has('advanced'), }; - // Skip if intermediate or advanced if (difficultyLevels.intermediate || difficultyLevels.advanced) { core.info('Issue is intermediate or advanced level, skipping recommendation'); return; } - // Only proceed for Good First Issue or beginner issues if (!difficultyLevels.goodFirstIssue && !difficultyLevels.beginner) { core.info('Issue is not a Good First Issue or beginner issue, skipping'); return; } - - let recommendedIssues = []; - let recommendedLabel = null; - let isFallback = false; - let recommendationScope = 'repo'; - let recommendedIssues = []; let recommendedLabel = null; let isFallback = false; + let recommendationScope = 'repo'; recommendedIssues = await searchIssues(github, core, repoOwner, repoName, 'beginner'); recommendedLabel = 'Beginner'; @@ -118,16 +92,9 @@ module.exports = async ({ github, context, core }) => { if (recommendedIssues.length === 0) { recommendationScope = 'org'; recommendedLabel = 'Good First Issue'; - recommendedIssues = await github.rest.search.issuesAndPullRequests({ - q: `org:hiero-ledger type:issue state:open label:"good first issue" no:assignee`, - per_page: 6, - }).then(res => res.data.items); + recommendedIssues = await searchOrgIssues(github, core, repoOwner, 'good first issue'); } - // Remove the issue they just solved - recommendedIssues = recommendedIssues.filter(i => i.number !== issueNumber); - - // Remove the issue they just solved recommendedIssues = recommendedIssues.filter(i => i.number !== issueNumber); @@ -141,10 +108,6 @@ module.exports = async ({ github, context, core }) => { recommendationScope, }; await generateAndPostComment(github, context, core, prNumber, recommendedIssues, recommendationMeta); - - }; - await generateAndPostComment(github, context, core, prNumber, recommendedIssues, recommendationMeta); - } catch (error) { core.setFailed(`Error processing issue #${issueNumber}: ${error.message}`); } @@ -154,13 +117,11 @@ async function searchIssues(github, core, owner, repo, label) { try { const query = `repo:${owner}/${repo} type:issue state:open label:"${label}" no:assignee`; core.info(`Searching for issues with query: ${query}`); - const { data: searchResult } = await github.rest.search.issuesAndPullRequests({ q: query, per_page: 6, }); - core.info(`Found ${searchResult.items.length} issues with label "${label}"`); return searchResult.items; @@ -170,18 +131,32 @@ async function searchIssues(github, core, owner, repo, label) { } } -async function generateAndPostComment(github, context, core, prNumber, recommendedIssues, { completedLabelText, recommendedLabel, isFallback, recommendationScope }) { - const marker = ''; - - // Build comment content - let comment = `${marker}\n\n🎉 **Nice work completing a ${completedLabelText}!**\n\n`; - comment += `Thank you for your contribution to the Hiero Python SDK! We're excited to have you as part of our community.\n\n`; - - if (recommendedIssues.length > 0) { - if (recommendationScope === 'org') { - comment += `Here are some **Good First Issues across the Hiero organization** you might be interested in working on next:\n\n`; - } else if (isFallback) { -async function generateAndPostComment(github, context, core, prNumber, recommendedIssues, { completedLabelText, recommendedLabel, isFallback}) { +async function searchOrgIssues(github, core, owner, label) { + try { + const query = `org:${owner} type:issue state:open label:"${label}" no:assignee`; + core.info(`Searching org issues with query: ${query}`); + + const { data: searchResult } = await github.rest.search.issuesAndPullRequests({ + q: query, + per_page: 6, + }); + + core.info(`Found ${searchResult.items.length} org issues with label "${label}"`); + return searchResult.items; + } catch (error) { + core.warning(`Error searching org issues with label "${label}": ${error.message}`); + return []; + } +} + +async function generateAndPostComment( + github, + context, + core, + prNumber, + recommendedIssues, + { completedLabelText, recommendedLabel, isFallback, recommendationScope } +) { const marker = ''; // Build comment content @@ -189,13 +164,13 @@ async function generateAndPostComment(github, context, core, prNumber, recommend comment += `Thank you for your contribution to the Hiero Python SDK! We're excited to have you as part of our community.\n\n`; if (recommendedIssues.length > 0) { - - if (isFallback) { + if (recommendationScope === 'org') { + comment += 'Here are some **Good First Issues across the Hiero organization** you might be interested in working on next:\n\n'; + } else if (isFallback) { comment += `Here are some **${recommendedLabel}** issues at a similar level you might be interested in working on next:\n\n`; } else { comment += `Here are some issues labeled **${recommendedLabel}** you might be interested in working on next:\n\n`; } - // Sanitize title: escape markdown link syntax and special characters const sanitizeTitle = (title) => title @@ -216,50 +191,43 @@ async function generateAndPostComment(github, context, core, prNumber, recommend const description = sanitized.substring(0, 150); comment += ` ${description}${sanitized.length > 150 ? '...' : ''}\n\n`; } else { - comment += ` *No description available*\n\n`; + comment += ' *No description available*\n\n'; } }); } else { comment += `There are currently no open issues available at or near the ${completedLabelText} level in this repository.\n\n`; - const orgLabel = recommendedLabel === 'Beginner' ? 'beginner' : 'good first issue'; - const orgLabelQuery = encodeURIComponent(`label:"${orgLabel}"`); - comment += `You can check out ${recommendedLabel.toLowerCase()} issues across the entire Hiero organization: ` + - `[Hiero ${recommendedLabel} Issues](https://github.com/issues?q=org%3Ahiero-ledger+type%3Aissue+state%3Aopen+${orgLabelQuery})\n\n`; - } - - comment += `🌟 **Stay connected with the project:**\n`; - comment += `- ⭐ [Star this repository](https://github.com/${context.repo.owner}/${context.repo.repo}) to show your support\n`; - comment += `- 👀 [Watch this repository](https://github.com/${context.repo.owner}/${context.repo.repo}/watchers) to get notified of new issues and releases\n\n`; - - comment += `We look forward to seeing more contributions from you! If you have any questions, feel free to ask in our [Discord community](https://github.com/hiero-ledger/hiero-sdk-python/blob/main/docs/discord.md).\n\n`; - comment += `From the Hiero Python SDK Team 🚀`; - - comment += `You can check out **Good First Issues** in other Hiero repositories:\n\n`; - const repoQuery = SUPPORTED_GFI_REPOS - .map(repo => `repo:${context.repo.owner}/${repo}`) - .join(' OR '); - - const gfiSearchQuery = [ - 'is:open', - 'is:issue', - `org:${context.repo.owner}`, - 'archived:false', - 'no:assignee', - '(label:"good first issue" OR label:"skill: good first issue")', - `(${repoQuery})`, - ].join(' '); - - const gfiQuery = `https://github.com/issues?q=${encodeURIComponent(gfiSearchQuery)}`; - - comment += `[View Good First Issues across supported Hiero repositories](${gfiQuery})\n\n`; + if (recommendationScope === 'org') { + const orgLabel = recommendedLabel === 'Beginner' ? 'beginner' : 'good first issue'; + const orgQuery = `org:${context.repo.owner} type:issue state:open label:"${orgLabel}"`; + comment += `You can check out ${recommendedLabel.toLowerCase()} issues across the entire Hiero organization: ` + + `[Hiero ${recommendedLabel} Issues](https://github.com/issues?q=${encodeURIComponent(orgQuery)})\n\n`; + } else { + comment += 'You can check out **Good First Issues** in other Hiero repositories:\n\n'; + const repoQuery = SUPPORTED_GFI_REPOS + .map(repo => `repo:${context.repo.owner}/${repo}`) + .join(' OR '); + + const gfiSearchQuery = [ + 'is:open', + 'is:issue', + `org:${context.repo.owner}`, + 'archived:false', + 'no:assignee', + '(label:"good first issue" OR label:"skill: good first issue")', + `(${repoQuery})`, + ].join(' '); + + const gfiQuery = `https://github.com/issues?q=${encodeURIComponent(gfiSearchQuery)}`; + comment += `[View Good First Issues across supported Hiero repositories](${gfiQuery})\n\n`; + } } - comment += `🌟 **Stay connected with the project:**\n`; + comment += '🌟 **Stay connected with the project:**\n'; comment += `- ⭐ [Star this repository](https://github.com/${context.repo.owner}/${context.repo.repo}) to show your support\n`; comment += `- 👀 [Watch this repository](https://github.com/${context.repo.owner}/${context.repo.repo}/watchers) to get notified of new issues and releases\n\n`; - comment += `We look forward to seeing more contributions from you! If you have any questions, feel free to ask in our [Discord community](https://github.com/hiero-ledger/hiero-sdk-python/blob/main/docs/discord.md).\n\n`; - comment += `From the Hiero Python SDK Team 🚀`; + comment += 'We look forward to seeing more contributions from you! If you have any questions, feel free to ask in our [Discord community](https://github.com/hiero-ledger/hiero-sdk-python/blob/main/docs/discord.md).\n\n'; + comment += 'From the Hiero Python SDK Team 🚀'; // Check for existing comment try { @@ -268,9 +236,6 @@ async function generateAndPostComment(github, context, core, prNumber, recommend repo: context.repo.repo, issue_number: prNumber, }); - - const existingComment = comments.find(comment => comment.body.includes(marker)); - const existingComment = comments.find(c => c.body.includes(marker)); @@ -281,7 +246,6 @@ async function generateAndPostComment(github, context, core, prNumber, recommend } catch (error) { core.warning(`Error checking existing comments: ${error.message}`); } - // Post the comment try { @@ -291,7 +255,6 @@ async function generateAndPostComment(github, context, core, prNumber, recommend issue_number: prNumber, body: comment, }); - core.info(`Successfully posted comment to PR #${prNumber}`); } catch (error) {