diff --git a/.github/workflows/deploy-custom-prod.yml b/.github/workflows/deploy-custom-prod.yml index c4dbe59ec..949b6727e 100644 --- a/.github/workflows/deploy-custom-prod.yml +++ b/.github/workflows/deploy-custom-prod.yml @@ -24,7 +24,9 @@ jobs: cost_profile: DEFAULT job_files: >- job_spec/ITS_LIVE_AUTORIFT.yml - job_spec/ITS_LIVE_META.yml + job_spec/ITS_LIVE_CROP.yml + job_spec/ITS_LIVE_CROP_BULK.yml + job_spec/ITS_LIVE_META_BULK.yml instance_types: r7gd.2xlarge,r7gd.4xlarge,r7gd.8xlarge default_max_vcpus: 2000 # Max: 10,406 expanded_max_vcpus: 2000 # Max: 10,406 @@ -310,7 +312,7 @@ jobs: url: https://${{ matrix.domain }} steps: - - uses: actions/checkout@v6.0.0 + - uses: actions/checkout@v6.0.2 with: fetch-depth: 0 diff --git a/.github/workflows/deploy-custom-test.yml b/.github/workflows/deploy-custom-test.yml index 856c11edc..2b71cc099 100644 --- a/.github/workflows/deploy-custom-test.yml +++ b/.github/workflows/deploy-custom-test.yml @@ -65,7 +65,9 @@ jobs: cost_profile: DEFAULT job_files: >- job_spec/ITS_LIVE_AUTORIFT.yml - job_spec/ITS_LIVE_META.yml + job_spec/ITS_LIVE_CROP.yml + job_spec/ITS_LIVE_CROP_BULK.yml + job_spec/ITS_LIVE_META_BULK.yml instance_types: r7gd.2xlarge,r7gd.4xlarge,r7gd.8xlarge default_max_vcpus: 640 # Max: 10,406 expanded_max_vcpus: 640 # Max: 10,406 @@ -127,13 +129,31 @@ jobs: required_surplus: 0 security_environment: ASF ami_id: /aws/service/ecs/optimized-ami/amazon-linux-2023/recommended/image_id + + - environment: hyp3-slimsar-test + domain: hyp3-slimsar-test.asf.alaska.edu + template_bucket: cf-templates-1ce4dbsaugtx1-us-west-2 + image_tag: test + product_lifetime_in_days: 14 + default_credits_per_user: 0 + default_application_status: APPROVED + cost_profile: DEFAULT + job_files: >- + job_spec/INSAR_ISCE_BURST.yml + job_spec/SLIMSAR_TDBP.yml + instance_types: r6id.xlarge,r6id.2xlarge,r6id.4xlarge,r6id.8xlarge,r6idn.xlarge,r6idn.2xlarge,r6idn.4xlarge,r6idn.8xlarge + default_max_vcpus: 640 + expanded_max_vcpus: 640 + required_surplus: 0 + security_environment: ASF + ami_id: /aws/service/ecs/optimized-ami/amazon-linux-2023/recommended/image_id environment: name: ${{ matrix.environment }} url: https://${{ matrix.domain }} steps: - - uses: actions/checkout@v6.0.0 + - uses: actions/checkout@v6.0.2 with: fetch-depth: 0 diff --git a/.github/workflows/deploy-daac-prod.yml b/.github/workflows/deploy-daac-prod.yml index c1dda339a..4917a50e9 100644 --- a/.github/workflows/deploy-daac-prod.yml +++ b/.github/workflows/deploy-daac-prod.yml @@ -42,7 +42,7 @@ jobs: name: ${{ matrix.environment }} steps: - - uses: actions/checkout@v6.0.0 + - uses: actions/checkout@v6.0.2 with: fetch-depth: 0 diff --git a/.github/workflows/deploy-daac-test.yml b/.github/workflows/deploy-daac-test.yml index edd61597c..c9104bc47 100644 --- a/.github/workflows/deploy-daac-test.yml +++ b/.github/workflows/deploy-daac-test.yml @@ -44,7 +44,7 @@ jobs: name: ${{ matrix.environment }} steps: - - uses: actions/checkout@v6.0.0 + - uses: actions/checkout@v6.0.2 with: fetch-depth: 0 diff --git a/.github/workflows/deploy-jth-sandbox.yml b/.github/workflows/deploy-edc-sandbox.yml similarity index 89% rename from .github/workflows/deploy-jth-sandbox.yml rename to .github/workflows/deploy-edc-sandbox.yml index ed3056476..fbe25deed 100644 --- a/.github/workflows/deploy-jth-sandbox.yml +++ b/.github/workflows/deploy-edc-sandbox.yml @@ -1,9 +1,12 @@ -name: Deploy jth sandbox to AWS +name: Deploy HyP3 EDC Sandbox to AWS + +permissions: + contents: read on: push: branches: - - hyp3-jth-sandbox + - hyp3-edc-sandbox concurrency: ${{ github.workflow }}-${{ github.ref }} @@ -14,13 +17,13 @@ jobs: fail-fast: false matrix: include: - - environment: hyp3-jth-sandbox + - environment: hyp3-edc-sandbox template_bucket: cf-templates-bywc0durdnqy-us-west-2 image_tag: test product_lifetime_in_days: 14 default_credits_per_user: 0 default_application_status: APPROVED - cost_profile: DEFAULT + cost_profile: EDC opera_rtc_s1_end_date: Default job_files: >- job_spec/AUTORIFT.yml @@ -36,24 +39,24 @@ jobs: required_surplus: 0 security_environment: EDC ami_id: /ngap/amis/image_id_ecs_al2023_x86 - distribution_url: '' + distribution_url: 'https://d3bvvghf83wjqc.cloudfront.net' environment: name: ${{ matrix.environment }} steps: - - uses: actions/checkout@v4.2.2 + - uses: actions/checkout@v6.0.2 with: fetch-depth: 0 - - uses: aws-actions/configure-aws-credentials@v4 + - uses: aws-actions/configure-aws-credentials@v5 with: aws-access-key-id: ${{ secrets.V2_AWS_ACCESS_KEY_ID }} aws-secret-access-key: ${{ secrets.V2_AWS_SECRET_ACCESS_KEY }} aws-session-token: ${{ secrets.V2_AWS_SESSION_TOKEN }} aws-region: ${{ secrets.AWS_REGION }} - - uses: actions/setup-python@v5 + - uses: actions/setup-python@v6 with: python-version: 3.13 diff --git a/.github/workflows/deploy-plus-prod.yml b/.github/workflows/deploy-plus-prod.yml index c3a21ae5e..070523784 100644 --- a/.github/workflows/deploy-plus-prod.yml +++ b/.github/workflows/deploy-plus-prod.yml @@ -40,7 +40,7 @@ jobs: url: https://${{ matrix.domain }} steps: - - uses: actions/checkout@v6.0.0 + - uses: actions/checkout@v6.0.2 with: fetch-depth: 0 diff --git a/.github/workflows/deploy-plus-test.yml b/.github/workflows/deploy-plus-test.yml index ada84df0c..c20000506 100644 --- a/.github/workflows/deploy-plus-test.yml +++ b/.github/workflows/deploy-plus-test.yml @@ -41,7 +41,7 @@ jobs: url: https://${{ matrix.domain }} steps: - - uses: actions/checkout@v6.0.0 + - uses: actions/checkout@v6.0.2 with: fetch-depth: 0 diff --git a/.github/workflows/static-analysis.yml b/.github/workflows/static-analysis.yml index 306b7abc3..a0b067fb4 100644 --- a/.github/workflows/static-analysis.yml +++ b/.github/workflows/static-analysis.yml @@ -3,7 +3,13 @@ name: Static code analysis permissions: contents: read -on: push +on: + push: + pull_request: + branches: + - main + - develop + env: SETUPTOOLS_SCM_PRETEND_VERSION_FOR_HYP3: v0.0.0 @@ -23,7 +29,7 @@ jobs: matrix: security_environment: [ASF, EDC, JPL, JPL-public] steps: - - uses: actions/checkout@v6.0.0 + - uses: actions/checkout@v6.0.2 - uses: actions/setup-python@v6 with: python-version: 3.13 @@ -37,7 +43,7 @@ jobs: openapi-spec-validator: runs-on: ubuntu-latest steps: - - uses: actions/checkout@v6.0.0 + - uses: actions/checkout@v6.0.2 - uses: actions/setup-python@v6 with: python-version: 3.13 @@ -50,7 +56,7 @@ jobs: statelint: runs-on: ubuntu-latest steps: - - uses: actions/checkout@v6.0.0 + - uses: actions/checkout@v6.0.2 - uses: ruby/setup-ruby@v1 with: ruby-version: 2.7 @@ -69,8 +75,9 @@ jobs: snyk: runs-on: ubuntu-latest + if: ${{ ! github.event.pull_request.head.repo.fork }} steps: - - uses: actions/checkout@v6.0.0 + - uses: actions/checkout@v6.0.2 - uses: snyk/actions/setup@v1.0.0 - uses: actions/setup-python@v6 with: diff --git a/.github/workflows/tests.yml b/.github/workflows/tests.yml index baaa6a2af..960e03bf7 100644 --- a/.github/workflows/tests.yml +++ b/.github/workflows/tests.yml @@ -1,6 +1,11 @@ name: Run tests -on: push +on: + push: + pull_request: + branches: + - main + - develop env: SETUPTOOLS_SCM_PRETEND_VERSION_FOR_HYP3: v0.0.0 @@ -10,7 +15,7 @@ jobs: runs-on: ubuntu-latest steps: - - uses: actions/checkout@v6.0.0 + - uses: actions/checkout@v6.0.2 - uses: actions/setup-python@v6 with: diff --git a/CHANGELOG.md b/CHANGELOG.md index d6c430477..4ca364b1c 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -4,6 +4,32 @@ All notable changes to this project will be documented in this file. The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/), and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0.html). +## [10.13.0] + +### Added +- Added optional `chip_size` and `search_range` parameters to the `ARIA_AUTORIFT.yml` job specification to enable user-defined `chip-size` and `search-range`. +- Updated `AUTORIFT.yml`, `ARIA_AUTORIFT.yml`, and `ITS_LIVE_AUTORIFT.yml` validation schema to support processing of Sentinel-1D and Sentinel-2C/D granules. +- Added a `model_context_length` parameter to the `OPERA_DIST_S1` job specification. +- Added a new custom hyp3-slimsar-test deployment. +- Added a new `SLIMSAR_TDBP` job_spec for slimsar time-domain backprojection processing. +- Added a new `SlimSAR` compute environment for slimsar processing with correct EC2 instances. +- Added a new `ITS_LIVE_CROP_BULK` job spec which re-crops existing ITS_LIVE products which are specified in a parquet file to ensure they are chunk-aligned and have a time dimension, and then it generates STAC JSON and other metadata files. +- Added a new `ITS_LIVE_META_BULK` job spec which generates STAC JSON and other metadata files for existing ITS_LIVE products which are specified in a parquet file. +- Added the `ITS_LIVE_CROP_BULK` and `ITS_LIVE_META_BULK` job spec to the ITS_LIVE deployments. +- Added the `stac_items_endpoint` and `stac_exists_okay` job parameters to the `ITS_LIVE_AUTORIFT` and `ITS_LIVE_CROP` job specs to allow directly publishing STAC items to the ITS_LIVE STAC catalog. + +### Changed +- Increased the maximum `stride_for_norm_param_estimation` to 32 from 16 for the `OPERA_DIST_S1` job specification to handle models with a larger input size. +- Changed readme to reflect current state of AWS, motivation for deploying a hyp3 stack, and clarify naming and formatting. +- The `ITS_LIVE_META` job spec has been renamed `ITS_LIVE_CROP` as it re-crops an existing ITS_LIVE product to ensure it is chunk-aligned and has a time dimension, and then it generates STAC JSON and other metadata files. +- The hyp3-ci stack permission for JPL deployments were expanded to support deploying HyP3-based monitoring stacks: + - Listing CloudFormation stacks is now allowed. + - CloudFormation permissions were expanded to any region from just us-west-2. + - ECR actions are now allowed. + +### Removed +- Removed the `publish_stac_prefix` job parameter from the `ITS_LIVE_AUTORIFT` and `ITS_LIVE_CROP` (previously `ITS_LIVE_META`) job specs as it's no longer used by the ITS_LIVE plugins. + ## [10.12.1] ### Fixed diff --git a/README.md b/README.md index 81f4bca06..87ac49f89 100644 --- a/README.md +++ b/README.md @@ -43,6 +43,10 @@ A processing environment for HyP3 Plugins in AWS. ## Deployment +### Why would you set up a hyp3 deployment? + +A HyP3 deployment stack provides a reproducible cloud processing environment that bundles AWS infrastructure, execution logic, and cost controls, enabling scalable, on-demand computation with clear operational costs. + > [!IMPORTANT] > It's not currently possible to deploy HyP3 fully independent of ASF due to our integration with > [ASF Vertex](https://search.alaska.edu). If you'd like your own deployment of HyP3, please open an issue here or @@ -59,6 +63,10 @@ also referred to as "security environments" throughout our code and docs - JPL - JPL-public +For EDC, you will also need to refer to our +[Deploy HyP3 to Earthdata Cloud](https://github.com/ASFHyP3/.github-private/blob/main/docs/Deploy-HyP3-to-Earthdata-Cloud.md) +internal docs article (only accessible to members of ASF). + > [!IMPORTANT] > JPL deployments _must_ start with the JPL security environment, but can be migrated to `JPL-public` > after they are fully deployed and approved to have a public bucket. @@ -67,19 +75,6 @@ For JPL, these deployment docs assume that: - the JPL account was set up in the "default" manner by the JPL cloud team - the developer deploying the account is able to log in with the `power_user` role -For a new EDC deployment, you need the following items (not necessarily a comprehensive list): -- SSL certificate in AWS Certificate Manager for custom CloudFront domain name -- ID of the CloudFront Origin Access Identity used to access data in S3 - -EDC UAT/prod deployment steps are not fully documented here. -When deploying HyP3 to a new EDC account for the first time, you should also refer to the -[SOP for deploying HyP3 to EDC](https://asfdaac.atlassian.net/wiki/spaces/ST/pages/2290319361/SOP-ASF-DAAC-EDC-011). -You should then be able to deploy additional copies of HyP3 to an EDC Sandbox account -by following this README alone. - -After deploying HyP3 to an EDC Sandbox account, you'll need to follow our documentation on -[Accessing Private API Gateways in Earthdata Cloud](https://github.com/ASFHyP3/.github-private/blob/main/docs/Accessing-Private-API-Gateways-in-Earthdata-Cloud.md). - > [!TIP] > You can expand and collapse details specific to a security environment as you go through this README. > Make sure you're looking at the details for the security environment you're deploying into! @@ -135,7 +130,7 @@ In order to integrate an ASF deployment we'll need: These can be done by deploying the [ASF CI stack](cicd-stacks/ASF-deployment-ci-cf.yml). -*Warning: This stack should only be deployed once per AWS account. This stack also +*Warning: This stack only needs to be deployed once per AWS account. This stack also assumes you are only deploying into a single AWS Region. If you are deploying into multiple regions in the same AWS account, you'll need to adjust the IAM permissions that are limited to a single region.* @@ -155,7 +150,10 @@ Once the `github-actions` IAM user has been created, you can create an AWS acces which we will use to deploy HyP3 via CI/CD tooling: 1. Go to AWS console -> IAM -> Users -> github-actions -> security credentials tab -> "create access key". -2. Store the access key ID and secret access key using your team's password manager. +2. Select "Other" for key usage +3. (Optional) Add tag value to describe the key, such as "For GitHub Actions CI/CD pipelines" +4. Store the access key ID and secret access key using your team's password manager. You will use them below in "Create the GitHub environment" + as `V2_AWS_ACCESS_KEY_ID` and `V2_AWS_SECRET_ACCESS_KEY`.
@@ -193,10 +191,7 @@ aws cloudformation deploy \ --template-file cicd-stacks/JPL-deployment-policy-cf.yml ``` -*Warning: This stack should only be deployed once per AWS account. This stack also -assumes you are only deploying into a single AWS Region. If you are deploying into -multiple regions in the same AWS account, you'll need to adjust the IAM permissions -that are limited to a single region.* +*Warning: This stack should only be deployed once per AWS account.* Then open a [Cloud Team Service Desk](https://itsd-jira.jpl.nasa.gov/servicedesk/customer/portal/13) request for a service user account here: @@ -232,60 +227,33 @@ you will need to create an Earthdata Login user for your deployment if you do no Go to AWS console -> Secrets Manager, then: 1. Click the orange "Store a new secret" button -1. For "Secret Type" select "Other type of secret" -1. Enter all required secret key-value pairs. Notably, the keys should be the secret names as listed (case-sensitive) in the [job specs](./job_spec/) that will be deployed -1. Click the orange "Next" button -1. Give the secret the same name that you plan to give to the HyP3 CloudFormation stack when you deploy it (below) -1. Click the orange "Next" button -1. Click the orange "Next" button (we won't configure rotation) -1. Click the orange "Store" button to save the Secret - -#### Upload SSL cert - -> [!WARNING] -> This step must be done by an ASF employee. - -To allow HTTPS connections, HyP3 needs an SSL certificate that is valid for its deployment domain name (URL). - -If HyP3 is being deployed to an ASF-managed AWS account, we can use the master certificate that covers all -`*.asf.alaska.edu` domains. Otherwise, we'll need a deployment specific certificate. - -*Important: Skip this step for EDC Sandbox deployments.* - -
-ASF-managed AWS account: Upload the ASF master SSL certificate -
- -Upload the `*.asf.alaska.edu` SSL certificate to AWS Certificate Manager (ACM): - -1. AWS console -> Certificate Manager (ACM) -> import certificate -1. Open https://gitlab.asf.alaska.edu/operations/puppet/-/tree/production/modules/certificates/files - 1. The contents of the `asf.alaska.edu.cer` file go in "Certificate body" - 1. The contents of the `asf.alaska.edu.key` file go in "Certificate private key" - 1. The contents of the `intermediates.pem` file go in "Certificate chain" -
- -
-Externally-managed AWS account (e.g., JPL, EDC, CloudBank): Request and upload deployment specific SSL certificate -
- -Submit a Platform request in ASF JIRA for a new certificate, including the domain name -(e.g. `hyp3-foobar.asf.alaska.edu`). - -Once you receive the certificate's private key and links to download the certificate in various formats, -download these files: -1. Certificate Body (the "as Certificate Only, PEM encoded" link from the email) -2. Certificate Private Key (from the Platform team; typically in MatterMost) -3. Certificate Chain (the "as Root/Intermediate(s) only, PEM encoded" link from the email) - -and then upload them to AWS Certificate Manager (ACM): - -1. AWS console -> Certificate Manager (ACM) -> import certificate - 1. The contents of (1) above goes in Certificate body - 1. The contents of (2) above goes in Certificate private key - 1. The contents of (3) above goes in Certificate chain - -
+2. For "Secret Type" select "Other type of secret" +3. Enter all required secret key-value pairs. Notably, the keys should be the secret names as listed (case-sensitive) in the [job specs](./job_spec/) that will be deployed +4. Click the orange "Next" button +5. Give the secret the same name that you plan to give to the HyP3 CloudFormation stack when you deploy it (below) +6. Click the orange "Next" button +7. Click the orange "Next" button (we won't configure rotation) +8. Click the orange "Store" button to save the Secret + +#### Request SSL cert + +To allow HTTPS connections, HyP3 needs an SSL certificate that is valid for its deployment domain name (URL), which we can request from AWS. + +[!NOTE] +> For EDC accounts, you should create the cert in the `us-east-1` region +> for use with the CloudFront distribution that you will create later, +> even if you're deploying HyP3 to `us-west-2`.* + +Go to the AWS console -> AWS Certificate Manager -> Request certificate and then: +1. Select "Request a public certificate" +2. Click the orange "Next" button +3. Choose a "Fully qualified domain name". Domain name should be something like `hyp3-foobar.asf.alaska.edu` or for a test deployment `hyp3-foobar-test.asf.alaska.edu`. +3. Choose "DNS validation" +4. Copy the "CNAME name" and "CNAME value" + +Then create a validation record in +https://gitlab.asf.alaska.edu/operations/puppet/-/edit/production/modules/legacy_dns/files/asf.alaska.edu.db +of the form ` in CNAME `, stripping `.asf.alaska.edu` from the `CNAME_name` (see previous records for examples). ### Create the GitHub environment @@ -294,30 +262,30 @@ and then upload them to AWS Certificate Manager (ACM): > repository, which is generally only possible for ASF employees on HyP3 development teams. 1. Go to https://github.com/ASFHyP3/hyp3/settings/environments -> New Environment -2. Check "required reviewers" and add the appropriate team(s) or user(s) -3. Change "Deployment branches and tags" to "Selected branches and tags" and +2. Name the environment like your chosen domain name i.e. `hyp3-foobar` or `hyp3-foobar-test` +3. Check "required reviewers" and add the appropriate team(s) or user(s) +4. Change "Deployment branches and tags" to "Selected branches and tags" and - add a deployment branch or tag rule - use "Ref Type: Branch" and write the name of the branch it will be deploying out of. (This is typically `main` for prod deployments, `develop` for test deployments, or a feature branch name for sandbox deployments.) -4. Add the following environment secrets: +5. Add the following environment secrets: - `AWS_REGION` - e.g. `us-west-2` - - `BUCKET_READ_PRINCIPALS` (EDC only) - List of AWS IAM principals granted read access to data in S3 for Earthdata Cloud deployments. For EDC Sandbox deployments, if you don't know what to put here, you can simply set it to `arn:aws:iam:::root`, where `` is the AWS account ID for the EDC Sandbox account. - - `CERTIFICATE_ARN` (ASF and JPL only) - ARN of the AWS Certificate Manager certificate that you imported manually (aws console -> certificate manager -> list certificates, e.g. `arn:aws:acm:us-west-2:xxxxxxxxxxxx:certificate/xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx`) + - `CERTIFICATE_ARN` (ASF and JPL only) - ARN of the AWS Certificate Manager certificate that you created manually, e.g. `arn:aws:acm:us-west-2:XXXXXXXXXXXX:certificate/XXXXXXXX-XXXX-XXXX-XXXX-XXXXXXXXXXXX` - `CLOUDFORMATION_ROLE_ARN` (ASF only) - part of the `hyp3-ci` stack that you deployed, e.g. `arn:aws:iam::xxxxxxxxxxxx:role/hyp3-ci-CloudformationDeploymentRole-XXXXXXXXXXXXX` - - `SECRET_ARN` - ARN for the AWS Secrets Manager Secret that you created manually + - `SECRET_ARN` - ARN for the AWS Secrets Manager Secret that you created manually, e.g. `arn:aws:secretsmanager:us-west-X:XXXXXXXXXXXX:secret:hyp3-foobar-XXXXXX` - `V2_AWS_ACCESS_KEY_ID` - AWS access key ID: - - ASF: for the `github-actions` user + - ASF: for the `github-actions` user (created in step "Enable CI/CD above") - JPL: for the service user - EDC: created by an ASF developer via Kion - `V2_AWS_SECRET_ACCESS_KEY` - The corresponding secret access key - - `VPC_ID` - ID of the default VPC for this AWS account and region (aws console -> vpc -> your VPCs, e.g. `vpc-xxxxxxxxxxxxxxxxx`) - - `SUBNET_IDS` - Comma delimited list (no spaces) of the default subnets for the VPC specified in `VPC_ID` (aws console -> vpc -> subnets, e.g. `subnet-xxxxxxxxxxxxxxxxx,subnet-xxxxxxxxxxxxxxxxx,subnet-xxxxxxxxxxxxxxxxx,subnet-xxxxxxxxxxxxxxxxx`) + - `VPC_ID` - ID of the default VPC for this AWS account and region (aws console -> VPC -> Your VPCs, e.g. `vpc-xxxxxxxxxxxxxxxxx`) + - `SUBNET_IDS` - Comma delimited list (no spaces) of the default subnets for the VPC specified in `VPC_ID` (aws console -> VPC -> Subnets, e.g. `subnet-xxxxxxxxxxxxxxxxx,subnet-xxxxxxxxxxxxxxxxx,subnet-xxxxxxxxxxxxxxxxx,subnet-xxxxxxxxxxxxxxxxx`) ### Create the HyP3 deployment -You will need to add the deployment to the matrix in an existing GitHub Actions `deploy-*.yml` workflow or create +You will need to add the deployment to the matrix in an existing GitHub Actions `deploy-*.yml` workflow located in the `.github/workflows/` directory, or create a new one for the deployment. If you need to create a new one, we recommend copying one of the -`deploy-*-sandbox.yml` workflows, and then updating all of the fields (`environment`, `domain`, `template_bucket`, etc.) +existing workflows, and then updating all of the fields as appropriate for your deployment. Also make sure to update the top-level `name` of the workflow and the name of the branch to deploy from. (This is typically `main` for prod deployments, `develop` for test deployments, or a feature branch name for sandbox deployments.) @@ -325,6 +293,9 @@ of the branch to deploy from. (This is typically `main` for prod deployments, `d > If you're deploying from a feature branch, make sure to [protect](https://docs.github.com/en/repositories/configuring-branches-and-merges-in-your-repository/managing-protected-branches/about-protected-branches) > it from accidental deletion. +> [!TIP] +> If your CI/CD workflow fails. Delete the "Rolled Back" stack (AWS Manager -> CloudFormation -> Stacks) before re-running the failed job. + The deployment workflow will run as soon as you merge your changes into the branch specified in the workflow file. ### Finishing touches @@ -336,12 +307,11 @@ Once HyP3 is deployed, there are a few follow on tasks you may need to do for a > [!WARNING] > This step must be done by an ASF employee. -*Important: Skip this step for EDC Sandbox deployments.* - Open a PR adding a line to https://gitlab.asf.alaska.edu/operations/puppet/-/blob/production/modules/legacy_dns/files/asf.alaska.edu.db -for the new custom domain name (AWS console -> api gateway -> custom domain names -> "API Gateway domain name"). +for the new custom domain name (AWS console -> api gateway -> custom domain names -> "API Gateway domain name") of the format +`hyp3-foobar in CNAME .`. Follow similar examples. -Ask the Platform team in the `~development-support` channel in Mattermost to review/merge the PR. +Ask someone from ASF support to review/merge the PR. Changes should take effect within 15-60 minutes after merging. Confirm that a Swagger UI is available at your chosen API URL. @@ -358,6 +328,20 @@ Update the [AWS Accounts and HyP3 Deployments](https://docs.google.com/spreadshe > ``` > Remember to remove this after the DNS PR is merged! +#### Testing and adding user credits to your hyp3 deployment + +After successfully deploying HyP3 and your new DNS record has taken effect (or you've edited your local DNS name resolution), you can test your +deployment by accessing the Swagger UI and using the POST `/user` tab to check if your user is approved and has credits for running jobs on the +deployment. You will need to be authenticated by either providing an Earthdata Login Bearer Token using the "Authorize" button, or by having a +valid `asf-urs` browser cookie, typically obtained by logging into [Vertex](https://search.asf.alaska.edu). Interacting with HyP3 should +automatically add your user to the DynamoDB table with the default number of credits (typically 0). + +To add credits to your (or any) user, log in to the AWS console and navigate to DynamoDB -> Explore items, then: +1. Find the table with a format like `hyp3-foobar-UsersTable-XXXXXXXXXXXXX` +2. Edit your user record if present (after using the Swagger UI in some way) or duplicate an existing reccord updaing the `user_id`. + +You can then return the Swagger UI and use the POST `/jobs` to run a test job and confirm it completes. + #### Optional
diff --git a/cicd-stacks/JPL-deployment-policy-cf.yml b/cicd-stacks/JPL-deployment-policy-cf.yml index c5f18a7f5..c3f06f86b 100644 --- a/cicd-stacks/JPL-deployment-policy-cf.yml +++ b/cicd-stacks/JPL-deployment-policy-cf.yml @@ -18,6 +18,7 @@ Resources: - dynamodb:* - ec2:* - ecs:* + - ecr:GetAuthorizationToken - events:* - iam:CreateServiceLinkedRole - iam:DeleteServiceLinkedRole @@ -49,6 +50,7 @@ Resources: - Effect: Allow Action: - cloudformation:SetStackPolicy + - cloudformation:ListStacks - cloudformation:CreateStack - cloudformation:UpdateStack - cloudformation:DeleteStack @@ -57,7 +59,21 @@ Resources: - cloudformation:ExecuteChangeSet - cloudformation:DeleteChangeSet - cloudformation:GetTemplateSummary - Resource: !Sub "arn:aws:cloudformation:${AWS::Region}:${AWS::AccountId}:stack/*" + Resource: !Sub "arn:aws:cloudformation:*:${AWS::AccountId}:stack/*" + + - Effect: Allow + Action: + - ecr:BatchCheckLayerAvailability + - ecr:GetDownloadUrlForLayer + - ecr:DescribeRepositories + - ecr:ListImages + - ecr:DescribeImages + - ecr:BatchGetImage + - ecr:InitiateLayerUpload + - ecr:UploadLayerPart + - ecr:CompleteLayerUpload + - ecr:PutImage + Resource: !Sub "arn:aws:ecr:*:${AWS::AccountId}:repository/*" ApiGatewayLoggingRole: Type: Custom::JplRole diff --git a/job_spec/ARIA_AUTORIFT.yml b/job_spec/ARIA_AUTORIFT.yml index eef6a2f60..8f2276e22 100644 --- a/job_spec/ARIA_AUTORIFT.yml +++ b/job_spec/ARIA_AUTORIFT.yml @@ -14,7 +14,7 @@ AUTORIFT: anyOf: - description: The name of the Sentinel-1 IW SLC granule to process type: string - pattern: "^S1[ABC]_IW_SLC__1S[SD][VH]" + pattern: "^S1[ABCD]_IW_SLC__1S[SD][VH]" minLength: 67 maxLength: 67 example: S1A_IW_SLC__1SSV_20150621T120220_20150621T120232_006471_008934_72D8 @@ -26,7 +26,7 @@ AUTORIFT: example: S1_136231_IW2_20200604T022312_VV_7C85-BURST - description: The name of the Sentinel-2 granule to process (ESA naming convention) type: string - pattern: "^S2[AB]_MSIL1C_" + pattern: "^S2[ABCD]_MSIL1C_" minLength: 60 maxLength: 60 example: S2A_MSIL1C_20200627T150921_N0209_R025_T22WEB_20200627T170912 @@ -49,7 +49,7 @@ AUTORIFT: anyOf: - description: The name of the Sentinel-1 IW SLC granule to process type: string - pattern: "^S1[ABC]_IW_SLC__1S[SD][VH]" + pattern: "^S1[ABCD]_IW_SLC__1S[SD][VH]" minLength: 67 maxLength: 67 example: S1A_IW_SLC__1SSV_20150621T120220_20150621T120232_006471_008934_72D8 @@ -61,7 +61,7 @@ AUTORIFT: example: S1_136231_IW2_20200604T022312_VV_7C85-BURST - description: The name of the Sentinel-2 granule to process (ESA naming convention) type: string - pattern: "^S2[AB]_MSIL1C_" + pattern: "^S2[ABCD]_MSIL1C_" minLength: 60 maxLength: 60 example: S2A_MSIL1C_20200627T150921_N0209_R025_T22WEB_20200627T170912 @@ -85,13 +85,13 @@ AUTORIFT: anyOf: - description: The name of the Sentinel-1 SLC granule to process type: string - pattern: "^S1[ABC]_IW_SLC__1S[SD][VH]" + pattern: "^S1[ABCD]_IW_SLC__1S[SD][VH]" minLength: 67 maxLength: 67 example: S1A_IW_SLC__1SSV_20150621T120220_20150621T120232_006471_008934_72D8 - description: The name of the Sentinel-2 granule to process (ESA naming convention) type: string - pattern: "^S2[AB]_MSIL1C_" + pattern: "^S2[ABCD]_MSIL1C_" minLength: 60 maxLength: 60 example: S2A_MSIL1C_20200627T150921_N0209_R025_T22WEB_20200627T170912 @@ -106,6 +106,20 @@ AUTORIFT: description: Shapefile for determining the correct search parameters by geographic location. Path to shapefile must be understood by GDAL. type: string default: '/vsicurl/https://its-live-data.s3.amazonaws.com/autorift_parameters/v001/autorift_solidearth_0120m.shp' + chip_size: + api_schema: + description: Custom chip size in pixels for AutoRIFT (overrides parameter_file if provided) + type: integer + nullable: true + default: null + example: 32 + search_range: + api_schema: + description: Custom search range in pixels for AutoRIFT (overrides parameter_file if provided) + type: integer + nullable: true + default: null + example: 64 cost_profiles: DEFAULT: cost: 1.0 @@ -129,6 +143,10 @@ AUTORIFT: - Ref::reference - --secondary - Ref::secondary + - --chip-size + - Ref::chip_size + - --search-range + - Ref::search_range timeout: 10800 compute_environment: AriaAutorift vcpu: 1 diff --git a/job_spec/AUTORIFT.yml b/job_spec/AUTORIFT.yml index 358507aa4..03f6efd30 100644 --- a/job_spec/AUTORIFT.yml +++ b/job_spec/AUTORIFT.yml @@ -14,13 +14,13 @@ AUTORIFT: anyOf: - description: The name of the Sentinel-1 SLC granule to process type: string - pattern: "^S1[ABC]_IW_SLC__1S[SD][VH]" + pattern: "^S1[ABCD]_IW_SLC__1S[SD][VH]" minLength: 67 maxLength: 67 example: S1A_IW_SLC__1SSV_20150621T120220_20150621T120232_006471_008934_72D8 - description: The name of the Sentinel-2 granule to process (ESA naming convention) type: string - pattern: "^S2[AB]_MSIL1C_" + pattern: "^S2[ABCD]_MSIL1C_" minLength: 60 maxLength: 60 example: S2A_MSIL1C_20200627T150921_N0209_R025_T22WEB_20200627T170912 diff --git a/job_spec/ITS_LIVE_AUTORIFT.yml b/job_spec/ITS_LIVE_AUTORIFT.yml index 7561fab85..247fb8bea 100644 --- a/job_spec/ITS_LIVE_AUTORIFT.yml +++ b/job_spec/ITS_LIVE_AUTORIFT.yml @@ -14,7 +14,7 @@ AUTORIFT: anyOf: - description: The name of the Sentinel-1 IW SLC granule to process type: string - pattern: "^S1[ABC]_IW_SLC__1S[SD][VH]" + pattern: "^S1[ABCD]_IW_SLC__1S[SD][VH]" minLength: 67 maxLength: 67 example: S1A_IW_SLC__1SSV_20150621T120220_20150621T120232_006471_008934_72D8 @@ -26,7 +26,7 @@ AUTORIFT: example: S1_136231_IW2_20200604T022312_VV_7C85-BURST - description: The name of the Sentinel-2 granule to process (ESA naming convention) type: string - pattern: "^S2[AB]_MSIL1C_" + pattern: "^S2[ABCD]_MSIL1C_" minLength: 60 maxLength: 60 example: S2A_MSIL1C_20200627T150921_N0209_R025_T22WEB_20200627T170912 @@ -49,7 +49,7 @@ AUTORIFT: anyOf: - description: The name of the Sentinel-1 IW SLC granule to process type: string - pattern: "^S1[ABC]_IW_SLC__1S[SD][VH]" + pattern: "^S1[ABCD]_IW_SLC__1S[SD][VH]" minLength: 67 maxLength: 67 example: S1A_IW_SLC__1SSV_20150621T120220_20150621T120232_006471_008934_72D8 @@ -61,7 +61,7 @@ AUTORIFT: example: S1_136231_IW2_20200604T022312_VV_7C85-BURST - description: The name of the Sentinel-2 granule to process (ESA naming convention) type: string - pattern: "^S2[AB]_MSIL1C_" + pattern: "^S2[ABCD]_MSIL1C_" minLength: 60 maxLength: 60 example: S2A_MSIL1C_20200627T150921_N0209_R025_T22WEB_20200627T170912 @@ -85,13 +85,13 @@ AUTORIFT: anyOf: - description: The name of the Sentinel-1 SLC granule to process type: string - pattern: "^S1[ABC]_IW_SLC__1S[SD][VH]" + pattern: "^S1[ABCD]_IW_SLC__1S[SD][VH]" minLength: 67 maxLength: 67 example: S1A_IW_SLC__1SSV_20150621T120220_20150621T120232_006471_008934_72D8 - description: The name of the Sentinel-2 granule to process (ESA naming convention) type: string - pattern: "^S2[AB]_MSIL1C_" + pattern: "^S2[ABCD]_MSIL1C_" minLength: 60 maxLength: 60 example: S2A_MSIL1C_20200627T150921_N0209_R025_T22WEB_20200627T170912 @@ -108,7 +108,7 @@ AUTORIFT: default: '/vsicurl/https://its-live-data.s3.amazonaws.com/autorift_parameters/v001/autorift_landice_0120m.shp' publish_bucket: api_schema: - description: Publish the resulting product to the ITS_LIVE AWS Open Data (or test) S3 Bucket + description: Publish the resulting product files to the ITS_LIVE AWS Open Data (or test) S3 Bucket type: string nullable: true enum: @@ -116,11 +116,6 @@ AUTORIFT: - "its-live-data" - "its-live-data-test" default: null - publish_stac_prefix: - api_schema: - description: Publish the resulting STAC JSON item under this prefix in the ITS_LIVE AWS Open Data (or test) S3 Bucket - type: string - default: stac-ingest use_static_files: api_schema: description: Use static topographic correction files for ISCE3 processing if available (Sentinel-1 only) @@ -132,6 +127,18 @@ AUTORIFT: default: null nullable: true type: string + stac_items_endpoint: + api_schema: + description: STAC items endpoint URL for the collection you want to add items to + type: string + nullable: true + format: uri + default: null + stac_exists_ok: + api_schema: + description: Allows updating existing STAC items + type: boolean + default: false cost_profiles: DEFAULT: cost: 1.0 @@ -171,18 +178,23 @@ AUTORIFT: - name: METADATA image: ghcr.io/asfhyp3/itslive-metadata command: + - ++plugin + - meta - --bucket - '!Ref Bucket' - --bucket-prefix - Ref::job_id - --publish-bucket - Ref::publish_bucket - - --publish-prefix - - Ref::publish_stac_prefix + - --stac-items-endpoint + - Ref::stac_items_endpoint + - --stac-exists-ok + - Ref::stac_exists_ok timeout: 10800 compute_environment: ItsLiveMeta vcpu: 1 memory: 7875 secrets: + - STAC_API_TOKEN - PUBLISH_ACCESS_KEY_ID - PUBLISH_SECRET_ACCESS_KEY diff --git a/job_spec/ITS_LIVE_META.yml b/job_spec/ITS_LIVE_CROP.yml similarity index 70% rename from job_spec/ITS_LIVE_META.yml rename to job_spec/ITS_LIVE_CROP.yml index d079361fd..b34790092 100644 --- a/job_spec/ITS_LIVE_META.yml +++ b/job_spec/ITS_LIVE_CROP.yml @@ -1,4 +1,4 @@ -ITS_LIVE_META: +ITS_LIVE_CROP: required_parameters: - granule_uri parameters: @@ -9,7 +9,7 @@ ITS_LIVE_META: pattern: '^s3:\/\/.*\.nc$' publish_bucket: api_schema: - description: Publish the resulting metadata files to the ITS_LIVE AWS Open Data (or test) S3 Bucket + description: Publish the resulting product files to the ITS_LIVE AWS Open Data (or test) S3 Bucket type: string nullable: true enum: @@ -17,11 +17,18 @@ ITS_LIVE_META: - "its-live-data" - "its-live-data-test" default: null - publish_stac_prefix: + stac_items_endpoint: api_schema: - description: Publish the resulting STAC JSON item under this prefix in the ITS_LIVE AWS Open Data (or test) S3 Bucket + description: STAC items endpoint URL for the collection you want to add items to type: string - default: stac-ingest + nullable: true + format: uri + default: null + stac_exists_ok: + api_schema: + description: Allows updating existing STAC items + type: boolean + default: true cost_profiles: DEFAULT: cost: 1.0 @@ -49,6 +56,8 @@ ITS_LIVE_META: - name: METADATA image: ghcr.io/asfhyp3/itslive-metadata command: + - ++plugin + - meta - --granule-uri - Ref::granule_uri - --bucket @@ -57,12 +66,15 @@ ITS_LIVE_META: - Ref::job_id - --publish-bucket - Ref::publish_bucket - - --publish-prefix - - Ref::publish_stac_prefix + - --stac-items-endpoint + - Ref::stac_items_endpoint + - --stac-exists-ok + - Ref::stac_exists_ok timeout: 10800 compute_environment: ItsLiveMeta vcpu: 1 memory: 7875 secrets: + - STAC_API_TOKEN - PUBLISH_ACCESS_KEY_ID - PUBLISH_SECRET_ACCESS_KEY diff --git a/job_spec/ITS_LIVE_CROP_BULK.yml b/job_spec/ITS_LIVE_CROP_BULK.yml new file mode 100644 index 000000000..398cae7f5 --- /dev/null +++ b/job_spec/ITS_LIVE_CROP_BULK.yml @@ -0,0 +1,85 @@ +ITS_LIVE_CROP_BULK: + required_parameters: + - granules_parquet + parameters: + granules_parquet: + api_schema: + description: S3 URI for an ITS_LIVE velocity granule to generate metadata for + type: string + pattern: '^s3:\/\/.*\.parquet$' + start_idx: + api_schema: + description: Start index (inclusive) of the contiguous subset of granules in granules_parquet to generate metadata for + type: integer + minimum: 0 + default: 0 + stop_idx: + api_schema: + description: Stop index (exclusive) of the contiguous subset of granules in granules_parquet to generate metadata for + type: integer + nullable: true + minimum: 1 + default: null + publish_bucket: + api_schema: + description: Publish the resulting metadata files to the ITS_LIVE AWS Open Data (or test) S3 Bucket + type: string + nullable: true + enum: + - null + - "its-live-data" + - "its-live-data-test" + default: null + cost_profiles: + DEFAULT: + cost: 1.0 + validators: [] + steps: + - name: '' + image: ghcr.io/asfhyp3/hyp3-autorift + command: + - ++process + - bulk_crop_netcdf_product + - --granules-parquet + - Ref::granules_parquet + - --start-idx + - Ref::start_idx + - --stop-idx + - Ref::stop_idx + - --bucket + - '!Ref Bucket' + - --bucket-prefix + - Ref::job_id + - --publish-bucket + - Ref::publish_bucket + timeout: 21600 + compute_environment: ItsLiveCropBulk + vcpu: 1 + memory: 31500 + secrets: + - PUBLISH_ACCESS_KEY_ID + - PUBLISH_SECRET_ACCESS_KEY + - name: METADATA + image: ghcr.io/asfhyp3/itslive-metadata + command: + - ++plugin + - bulk_meta + - --granules-parquet + - Ref::granules_parquet + - --start-idx + - Ref::start_idx + - --stop-idx + - Ref::stop_idx + - --bucket + - '!Ref Bucket' + - --bucket-prefix + - Ref::job_id + - --publish-bucket + - Ref::publish_bucket + timeout: 21600 + compute_environment: ItsLiveMetaBulk + vcpu: 1 + memory: 31500 + secrets: + - PUBLISH_ACCESS_KEY_ID + - PUBLISH_SECRET_ACCESS_KEY diff --git a/job_spec/ITS_LIVE_META_BULK.yml b/job_spec/ITS_LIVE_META_BULK.yml new file mode 100644 index 000000000..b5ffbc134 --- /dev/null +++ b/job_spec/ITS_LIVE_META_BULK.yml @@ -0,0 +1,61 @@ +ITS_LIVE_META_BULK: + required_parameters: + - granules_parquet + parameters: + granules_parquet: + api_schema: + description: S3 URI for an ITS_LIVE velocity granule to generate metadata for + type: string + pattern: '^s3:\/\/.*\.parquet$' + start_idx: + api_schema: + description: Start index (inclusive) of the contiguous subset of granules in granules_parquet to generate metadata for + type: integer + minimum: 0 + default: 0 + stop_idx: + api_schema: + description: Stop index (exclusive) of the contiguous subset of granules in granules_parquet to generate metadata for + type: integer + nullable: true + minimum: 1 + default: null + publish_bucket: + api_schema: + description: Publish the resulting metadata files to the ITS_LIVE AWS Open Data (or test) S3 Bucket + type: string + nullable: true + enum: + - null + - "its-live-data" + - "its-live-data-test" + default: null + cost_profiles: + DEFAULT: + cost: 1.0 + validators: [] + steps: + - name: '' + image: ghcr.io/asfhyp3/itslive-metadata + command: + - ++plugin + - bulk_meta + - --granules-parquet + - Ref::granules_parquet + - --start-idx + - Ref::start_idx + - --stop-idx + - Ref::stop_idx + - --bucket + - '!Ref Bucket' + - --bucket-prefix + - Ref::job_id + - --publish-bucket + - Ref::publish_bucket + timeout: 21600 + compute_environment: ItsLiveMetaBulk + vcpu: 1 + memory: 31500 + secrets: + - PUBLISH_ACCESS_KEY_ID + - PUBLISH_SECRET_ACCESS_KEY diff --git a/job_spec/OPERA_DIST_S1.yml b/job_spec/OPERA_DIST_S1.yml index 62afe01f3..04c8148e1 100644 --- a/job_spec/OPERA_DIST_S1.yml +++ b/job_spec/OPERA_DIST_S1.yml @@ -119,7 +119,7 @@ OPERA_DIST_S1: type: integer default: 7 minimum: 1 - maximum: 16 + maximum: 32 example: 7 batch_size_for_norm_param_estimation: api_schema: @@ -140,6 +140,12 @@ OPERA_DIST_S1: type: boolean default: false example: false + model_context_length: + api_schema: + description: Maximum number of baseline images to use. If -1, use largest allowable baseline. + type: integer + default: -1 + example: 10 cost_profiles: DEFAULT: cost: 1.0 @@ -186,6 +192,8 @@ OPERA_DIST_S1: - Ref::model_compilation - --use_date_encoding - Ref::use_date_encoding + - --model_context_length + - Ref::model_context_length timeout: 3600 # 1 hr compute_environment: DistS1 vcpu: 1 diff --git a/job_spec/SLIMSAR_TDBP.yml b/job_spec/SLIMSAR_TDBP.yml new file mode 100644 index 000000000..e1b5f8b3f --- /dev/null +++ b/job_spec/SLIMSAR_TDBP.yml @@ -0,0 +1,75 @@ +SLIMSAR_TDBP: + required_parameters: + - data_fp + - nav_fp + - dem_fp + parameters: + data_fp: + api_schema: + type: string + example: s3://asf-bucket/slimsar/SAR_11072025_110035.sar + description: Path to uncompressed slimsar echo/raw file on s3 bucket + type: string + pattern: '^s3:\/\/[a-z0-9.-]+(\/[^/]+)*\/[^/]+\.sar$' + example: s3://asf-frostbyte/slimsar/SAR_11072025_110035.sar + nav_fp: + api_schema: + type: string + example: s3://asf-bucket/slimsar/NMZT23520001E_100_proj.dat + pattern: '^s3:\/\/[a-z0-9.-]+(\/[^/]+)*\/[^/]+\.dat$' + description: Path to post-processed nav .dat file on s3 bucket + dem_fp: + api_schema: + type: string + example: s3://asf-bucket/slimsar/repeat_sub.tif + pattern: '^s3:\/\/[a-z0-9.-]+(\/[^/]+)*\/[^/]+\.tif$' + description: Path to DEM file on S3 bucket + pols: + api_schema: + type: array + minItems: 1 + maxItems: 4 + example: + - VV + - VH + - HV + - HH + default: [VV, VH, HV, HH] + items: + description: Polarization to process + maxLength: 2 + minLength: 2 + type: string + enum: + - VV + - VH + - HV + - HH + validators: [] + cost_profiles: + DEFAULT: + cost: 1.0 + steps: + - name: '' + image: 242399506617.dkr.ecr.us-west-2.amazonaws.com/hyp3-slimsar + command: + - --data_fp + - Ref::data_fp + - --nav_fp + - Ref::nav_fp + - --dem_fp + - Ref::dem_fp + - --pols + - Ref:pols + - --bucket + - '!Ref Bucket' + - --bucket_prefix + - Ref::job_id + timeout: 126000 # 35 hours + compute_environment: SlimSAR + vcpu: 1 + gpu: 1 + memory: 30500 + secrets: + - EARTHDATA_USERNAME + - EARTHDATA_PASSWORD diff --git a/job_spec/config/compute_environments.yml b/job_spec/config/compute_environments.yml index dfc3460bd..a0a30f7cd 100644 --- a/job_spec/config/compute_environments.yml +++ b/job_spec/config/compute_environments.yml @@ -24,6 +24,21 @@ compute_environments: instance_types: m6id.2xlarge,m6id.4xlarge,m6id.8xlarge allocation_type: EC2 allocation_strategy: BEST_FIT_PROGRESSIVE + ItsLiveCropBulk: + instance_types: r7gd.medium,r7gd.large,r7gd.xlarge,r7gd.2xlarge,r7gd.4xlarge,r7gd.8xlarge + ami_id: ami-0692356050549192d # /aws/service/ecs/optimized-ami/amazon-linux-2023/arm64/recommended/image_id + allocation_type: EC2 + allocation_strategy: BEST_FIT_PROGRESSIVE ItsLiveMeta: instance_types: r6id.xlarge,r6id.2xlarge,r6id.4xlarge,r6id.8xlarge,r6idn.xlarge,r6idn.2xlarge,r6idn.4xlarge,r6idn.8xlarge ami_id: ami-0aece254fc7c27a77 # /aws/service/ecs/optimized-ami/amazon-linux-2023/recommended/image_id + ItsLiveMetaBulk: + instance_types: r6id.xlarge,r6id.2xlarge,r6id.4xlarge,r6id.8xlarge,r6idn.xlarge,r6idn.2xlarge,r6idn.4xlarge,r6idn.8xlarge + ami_id: ami-0aece254fc7c27a77 # /aws/service/ecs/optimized-ami/amazon-linux-2023/recommended/image_id + allocation_type: EC2 + allocation_strategy: BEST_FIT_PROGRESSIVE + SlimSAR: + instance_types: g4dn.2xlarge,g4dn.4xlarge,g4dn.8xlarge,g4dn.16xlarge + ami_id: ami-03aa99ddf5498ceb9 # /aws/service/ecs/optimized-ami/amazon-linux-2/gpu/recommended/image_id + allocation_type: EC2 + allocation_strategy: BEST_FIT_PROGRESSIVE