diff --git a/.github/copilot-instructions.md b/.github/copilot-instructions.md new file mode 100644 index 0000000..402b2bf --- /dev/null +++ b/.github/copilot-instructions.md @@ -0,0 +1,112 @@ +# Copilot Instructions for docker-diskmark + +## Project Overview + +Docker DiskMark is a fio-based disk benchmarking tool packaged as a minimal Docker container. It provides CrystalDiskMark-like functionality for Linux systems. + +**Container registries:** +- Docker Hub: `e7db/diskmark` (tags only) +- GHCR: `ghcr.io/e7db/diskmark` (all builds) + +## Project Structure + +``` +diskmark.sh # Main entry point (~70 lines) +lib/ +├── args.sh # CLI argument parsing + help/version +├── validate.sh # Input validation functions +├── utils.sh # Utility functions (color, size conversion, cleanup) +├── profiles.sh # Profile definitions (default, nvme, custom job) +├── detect.sh # Drive/filesystem detection +├── benchmark.sh # fio benchmark execution + warmup + result parsing +├── output.sh # Output formatting (human/JSON/YAML/XML) +└── update.sh # Update check functionality +``` + +## Default Values + +Key defaults (defined in Dockerfile ENV and scripts): +- `TARGET=/disk` - Benchmark directory +- `PROFILE=auto` - Auto-detect drive type +- `IO=direct` - Direct I/O mode +- `DATA=random` - Random data pattern +- `SIZE=1G` - Test file size +- `WARMUP=1` - Warmup enabled +- `RUNTIME=5s` - Runtime per job +- `UPDATE_CHECK=1` - Update check enabled + +## Clean Code Principles + +Follow these clean code principles when contributing: + +### Single Responsibility +- Each function should do one thing and do it well +- Keep functions small and focused (ideally < 30 lines) +- Separate concerns: parsing, validation, execution, output + +### Meaningful Names +- Use descriptive function names: `validate_size_string` not `check` +- Use consistent naming conventions (snake_case for functions/variables) +- Prefix validation functions with `validate_` +- Prefix parsing functions with `parse_` + +### DRY (Don't Repeat Yourself) +- Extract common patterns into reusable functions +- Use helper functions for repeated validation logic +- Centralize error handling and output formatting + +### Comments and Documentation +- Functions should be self-documenting through clear names +- Add comments only when explaining "why", not "what" +- Keep help text and documentation in sync with code + +### Error Handling +- Fail fast with clear error messages +- Validate inputs early before processing +- Use consistent exit codes (0=success, 1=error) + +### Code Organization +- Group related functions together +- Order: constants → helpers → validators → core logic → main +- Keep configuration separate from logic + +## Shell Script Best Practices + +- Use `set -e` to exit on errors +- Quote variables: `"$VAR"` not `$VAR` +- Use `[[` for conditionals (bash) +- Prefer `local` variables in functions +- Use meaningful return codes +- Avoid global state when possible + +## Testing Guidelines + +- All features should have corresponding tests in `.github/workflows/tests.yml` +- Test both valid and invalid inputs +- Test CLI arguments in all formats: `--key value`, `--key=value`, `-k value` +- Use dry-run mode for input validation tests +- Use minimal sizes/runtimes for actual benchmark tests + +## Docker Best Practices + +- Keep the container minimal (scratch-based) +- Only include necessary binaries +- Use multi-stage builds +- Set appropriate defaults via ENV +- Run as non-root user (65534:65534) + +## CI/CD Workflows + +- `tests.yml` - Input validation and benchmark tests +- `docker-image.yml` - Build and push to GHCR (always) and Docker Hub (tags only) +- `codeql.yml` - Security scanning + +## Output Formats + +The tool supports multiple output formats: +- Human-readable (default): colored, with emojis +- JSON: structured, machine-readable +- YAML: structured, human-friendly +- XML: structured, enterprise-compatible + +When modifying output, ensure all formats are updated consistently. diff --git a/.github/workflows/docker-image.yml b/.github/workflows/docker-image.yml index 7aa52b8..de4482b 100644 --- a/.github/workflows/docker-image.yml +++ b/.github/workflows/docker-image.yml @@ -17,37 +17,40 @@ permissions: contents: read packages: write pull-requests: write + security-events: write env: PLATFORMS: linux/amd64,linux/arm/v7,linux/arm64/v8,linux/ppc64le,linux/s390x jobs: build: + name: Build and Push Docker Image runs-on: ubuntu-latest env: - HAS_DOCKERHUB_SECRETS: ${{ github.event_name != 'pull_request' || github.repository == github.event.pull_request.head.repo.full_name }} + IS_TAG: ${{ startsWith(github.ref, 'refs/tags/') }} + SHOULD_PUSH: ${{ github.event_name != 'pull_request' || github.event.pull_request.head.repo.full_name == github.repository }} steps: - name: Checkout - uses: actions/checkout@v4 + uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2 - name: Set up QEMU - uses: docker/setup-qemu-action@v3 + uses: docker/setup-qemu-action@53851d14592bedcffcf25ea515637cff71ef929a # v3.4.0 - name: Set up Docker Buildx - uses: docker/setup-buildx-action@v3 + uses: docker/setup-buildx-action@6524bf65af31da8d45b59e8c27de4bd072b392f5 # v3.9.0 - name: Login to Docker Hub - if: ${{ env.HAS_DOCKERHUB_SECRETS }} - uses: docker/login-action@v3 + if: ${{ env.IS_TAG }} + uses: docker/login-action@9780b0c442fbb1117ed29e0efdff1e18412f7567 # v3.3.0 with: username: ${{ secrets.DOCKERHUB_USERNAME }} password: ${{ secrets.DOCKERHUB_TOKEN }} - name: Login to GitHub Container Registry - uses: docker/login-action@v3 + uses: docker/login-action@9780b0c442fbb1117ed29e0efdff1e18412f7567 # v3.3.0 with: registry: ghcr.io username: ${{ github.repository_owner }} password: ${{ secrets.GITHUB_TOKEN }} - name: Extract Docker metadata id: meta - uses: docker/metadata-action@v5 + uses: docker/metadata-action@369eb591f429131d6889c46b94e711f089e6ca96 # v5.6.1 with: images: | name=${{ vars.GHCR_IMAGE }} @@ -55,50 +58,100 @@ jobs: labels: | org.opencontainers.image.title=docker-diskmark org.opencontainers.image.description=A disk benchmarking tool for Docker - org.opencontainers.image.revision=${{ env.SHA }} + org.opencontainers.image.revision=${{ github.sha }} tags: | type=semver,pattern={{version}} type=semver,pattern={{major}}.{{minor}} type=semver,pattern={{major}} type=ref,event=pr type=raw,value=latest,enable={{is_default_branch}} + type=sha,format=long,prefix=sha- - name: Determine version id: version run: | if [[ "${{ github.ref }}" == refs/tags/* ]]; then - echo "version=${{ github.ref_name }}" >> $GITHUB_OUTPUT + VERSION="${{ github.ref_name }}" + VERSION="${VERSION#v}" else - echo "version=${{ github.sha }}" >> $GITHUB_OUTPUT + GIT_DESC=$(git describe --tags --always 2>/dev/null) + if [[ "$GIT_DESC" =~ ^v?([0-9]+\.[0-9]+\.[0-9]+)-([0-9]+)-g([a-f0-9]+)$ ]]; then + VERSION="${BASH_REMATCH[1]}-dev.${BASH_REMATCH[2]}+${BASH_REMATCH[3]}" + else + VERSION="0.0.0-dev+${GITHUB_SHA}" + fi fi + echo "version=$VERSION" >> $GITHUB_OUTPUT - name: Build and push Docker image - uses: docker/build-push-action@v5 + uses: docker/build-push-action@48aba3b46d1b1fec4febb7c5d0c644b249a11355 # v5.4.0 with: context: . platforms: ${{ env.PLATFORMS }} pull: true cache-from: type=gha cache-to: type=gha - push: true + push: ${{ env.SHOULD_PUSH }} tags: ${{ steps.meta.outputs.tags }} labels: ${{ steps.meta.outputs.labels }} build-args: | VERSION=${{ steps.version.outputs.version }} - - name: Docker Scout - id: docker-scout - if: ${{ github.event_name == 'pull_request' }} - uses: docker/scout-action@v1 - with: - command: cves,compare - image: ${{ steps.meta.outputs.tags }} - to: ${{ vars.GHCR_IMAGE }}:latest - ignore-unchanged: true - only-fixed: true - write-comment: true - github-token: ${{ secrets.GITHUB_TOKEN }} + + update-description: + name: Update DockerHub Description + runs-on: ubuntu-latest + needs: build + if: ${{ github.ref == 'refs/heads/main' }} + steps: - name: Update repo description - if: ${{ github.ref == 'refs/heads/main' && env.HAS_DOCKERHUB_SECRETS }} - uses: peter-evans/dockerhub-description@v4 + uses: peter-evans/dockerhub-description@e98e4d1628a5f3be2be7c231e50981aee98723ae # v4.0.2 with: username: ${{ secrets.DOCKERHUB_USERNAME }} password: ${{ secrets.DOCKERHUB_TOKEN }} repository: ${{ vars.DOCKERHUB_IMAGE }} + + scan: + name: Security Scan (${{ matrix.scanner }}) + runs-on: ubuntu-latest + needs: build + strategy: + fail-fast: false + matrix: + scanner: [trivy, grype] + steps: + - name: Checkout + uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2 + - name: Login to GitHub Container Registry + uses: docker/login-action@9780b0c442fbb1117ed29e0efdff1e18412f7567 # v3.3.0 + with: + registry: ghcr.io + username: ${{ github.repository_owner }} + password: ${{ secrets.GITHUB_TOKEN }} + + # Trivy + - name: Run Trivy vulnerability scanner + if: ${{ matrix.scanner == 'trivy' }} + uses: aquasecurity/trivy-action@18f2510ee396bbf400402947b394f2dd8c87dbb0 # 0.30.0 + with: + image-ref: ${{ vars.GHCR_IMAGE }}:sha-${{ github.sha }} + format: sarif + output: trivy-results.sarif + severity: CRITICAL,HIGH,MEDIUM + - name: Upload Trivy scan results + if: ${{ matrix.scanner == 'trivy' && always() }} + uses: github/codeql-action/upload-sarif@48ab28a6f5dbc2a99bf1e0131198dd8f1df78169 # v3.28.1 + with: + sarif_file: trivy-results.sarif + + # Grype + - name: Run Grype vulnerability scanner + if: ${{ matrix.scanner == 'grype' }} + id: grype + uses: anchore/scan-action@abae793926ec39a78ab18002bc7fc45bbbd94342 # v6.0.0 + with: + image: ${{ vars.GHCR_IMAGE }}:sha-${{ github.sha }} + fail-build: false + severity-cutoff: medium + - name: Upload Grype scan results + if: ${{ matrix.scanner == 'grype' && always() }} + uses: github/codeql-action/upload-sarif@48ab28a6f5dbc2a99bf1e0131198dd8f1df78169 # v3.28.1 + with: + sarif_file: ${{ steps.grype.outputs.sarif }} diff --git a/.github/workflows/tests.yml b/.github/workflows/tests.yml index 3045932..4aab329 100644 --- a/.github/workflows/tests.yml +++ b/.github/workflows/tests.yml @@ -20,11 +20,11 @@ env: jobs: # ============================================ - # INPUT VALIDATION TESTS (dry-run mode) + # INPUT VALIDATION - ENV VARIABLES (dry-run) # ============================================ - input-validation: - name: Input Validation + validation-env: + name: "Validation: ENV" runs-on: ubuntu-latest steps: - name: Checkout @@ -73,18 +73,14 @@ jobs: - name: Build test image run: docker build -t diskmark:test . - # ============================================ - # VALID INPUT TESTS - Should all succeed - # ============================================ - - - name: "Valid: Default configuration" + - name: "Default: configuration" if: always() run: | source /tmp/test_helpers.sh run_valid "Default configuration" docker run --rm -e DRY_RUN=1 diskmark:test check_step_result - - name: "Valid: SIZE variations" + - name: "SIZE: valid values" if: always() run: | source /tmp/test_helpers.sh @@ -95,200 +91,145 @@ jobs: run_valid "SIZE=1P" docker run --rm -e DRY_RUN=1 -e SIZE=1P diskmark:test run_valid "SIZE=100m (lowercase)" docker run --rm -e DRY_RUN=1 -e SIZE=100m diskmark:test run_valid "SIZE=1g (lowercase)" docker run --rm -e DRY_RUN=1 -e SIZE=1g diskmark:test + run_valid "SIZE=(empty, defaults to 1G)" docker run --rm -e DRY_RUN=1 -e SIZE= diskmark:test + run_valid "SIZE=1 (bytes)" docker run --rm -e DRY_RUN=1 -e SIZE=1 diskmark:test + run_valid "SIZE=999P" docker run --rm -e DRY_RUN=1 -e SIZE=999P diskmark:test check_step_result - - name: "Valid: PROFILE options" - if: always() - run: | - source /tmp/test_helpers.sh - run_valid "PROFILE=auto" docker run --rm -e DRY_RUN=1 -e PROFILE=auto diskmark:test - run_valid "PROFILE=default" docker run --rm -e DRY_RUN=1 -e PROFILE=default diskmark:test - run_valid "PROFILE=nvme" docker run --rm -e DRY_RUN=1 -e PROFILE=nvme diskmark:test - check_step_result - - - name: "Valid: IO modes" - if: always() - run: | - source /tmp/test_helpers.sh - run_valid "IO=direct" docker run --rm -e DRY_RUN=1 -e IO=direct diskmark:test - run_valid "IO=buffered" docker run --rm -e DRY_RUN=1 -e IO=buffered diskmark:test - check_step_result - - - name: "Valid: DATA patterns" - if: always() - run: | - source /tmp/test_helpers.sh - run_valid "DATA=random" docker run --rm -e DRY_RUN=1 -e DATA=random diskmark:test - run_valid "DATA=rand" docker run --rm -e DRY_RUN=1 -e DATA=rand diskmark:test - run_valid "DATA=zero" docker run --rm -e DRY_RUN=1 -e DATA=zero diskmark:test - run_valid "DATA=0" docker run --rm -e DRY_RUN=1 -e DATA=0 diskmark:test - run_valid "DATA=0x00" docker run --rm -e DRY_RUN=1 -e DATA=0x00 diskmark:test - check_step_result - - - name: "Valid: WARMUP options" - if: always() - run: | - source /tmp/test_helpers.sh - run_valid "WARMUP=0" docker run --rm -e DRY_RUN=1 -e WARMUP=0 diskmark:test - run_valid "WARMUP=1" docker run --rm -e DRY_RUN=1 -e WARMUP=1 diskmark:test - run_valid "WARMUP=1 WARMUP_SIZE=8M" docker run --rm -e DRY_RUN=1 -e WARMUP=1 -e WARMUP_SIZE=8M diskmark:test - run_valid "WARMUP=1 WARMUP_SIZE=64M" docker run --rm -e DRY_RUN=1 -e WARMUP=1 -e WARMUP_SIZE=64M diskmark:test - run_valid "WARMUP=1 WARMUP_SIZE=128M" docker run --rm -e DRY_RUN=1 -e WARMUP=1 -e WARMUP_SIZE=128M diskmark:test - check_step_result - - - name: "Valid: RUNTIME formats" - if: always() - run: | - source /tmp/test_helpers.sh - run_valid "RUNTIME=500ms" docker run --rm -e DRY_RUN=1 -e RUNTIME=500ms diskmark:test - run_valid "RUNTIME=5s" docker run --rm -e DRY_RUN=1 -e RUNTIME=5s diskmark:test - run_valid "RUNTIME=2m" docker run --rm -e DRY_RUN=1 -e RUNTIME=2m diskmark:test - run_valid "RUNTIME=1h" docker run --rm -e DRY_RUN=1 -e RUNTIME=1h diskmark:test - check_step_result - - - name: "Valid: LOOPS option" - if: always() - run: | - source /tmp/test_helpers.sh - run_valid "LOOPS=1" docker run --rm -e DRY_RUN=1 -e LOOPS=1 diskmark:test - run_valid "LOOPS=5" docker run --rm -e DRY_RUN=1 -e LOOPS=5 diskmark:test - run_valid "LOOPS=100" docker run --rm -e DRY_RUN=1 -e LOOPS=100 diskmark:test - check_step_result - - - name: "Valid: Custom JOB formats" + - name: "SIZE: invalid values" if: always() run: | source /tmp/test_helpers.sh - run_valid "JOB=SEQ1MQ8T1" docker run --rm -e DRY_RUN=1 -e JOB=SEQ1MQ8T1 diskmark:test - run_valid "JOB=SEQ128KQ32T1" docker run --rm -e DRY_RUN=1 -e JOB=SEQ128KQ32T1 diskmark:test - run_valid "JOB=RND4KQ32T16" docker run --rm -e DRY_RUN=1 -e JOB=RND4KQ32T16 diskmark:test - run_valid "JOB=RND4KQ1T1" docker run --rm -e DRY_RUN=1 -e JOB=RND4KQ1T1 diskmark:test - run_valid "JOB=SEQ4MQ1T4" docker run --rm -e DRY_RUN=1 -e JOB=SEQ4MQ1T4 diskmark:test + run_invalid "SIZE=-1G" docker run --rm -e DRY_RUN=1 -e SIZE=-1G diskmark:test + run_invalid "SIZE=0" docker run --rm -e DRY_RUN=1 -e SIZE=0 diskmark:test + run_invalid "SIZE=1X (invalid unit)" docker run --rm -e DRY_RUN=1 -e SIZE=1X diskmark:test + run_invalid "SIZE=abc (non-numeric)" docker run --rm -e DRY_RUN=1 -e SIZE=abc diskmark:test + run_invalid "SIZE=2.5G (float)" docker run --rm -e DRY_RUN=1 -e SIZE=2.5G diskmark:test check_step_result - - name: "Valid: Combined parameters" + - name: "PROFILE: valid values" if: always() run: | source /tmp/test_helpers.sh - run_valid "SIZE+WARMUP+PROFILE+IO+DATA" docker run --rm -e DRY_RUN=1 -e SIZE=2G -e WARMUP=1 -e WARMUP_SIZE=64M -e PROFILE=nvme -e IO=direct -e DATA=random diskmark:test - run_valid "SIZE+LOOPS+DATA+IO" docker run --rm -e DRY_RUN=1 -e SIZE=512M -e LOOPS=3 -e DATA=zero -e IO=buffered diskmark:test + run_valid "PROFILE=auto" docker run --rm -e DRY_RUN=1 -e PROFILE=auto diskmark:test + run_valid "PROFILE=default" docker run --rm -e DRY_RUN=1 -e PROFILE=default diskmark:test + run_valid "PROFILE=nvme" docker run --rm -e DRY_RUN=1 -e PROFILE=nvme diskmark:test check_step_result - - name: "Valid: UPDATE_CHECK options" + - name: "PROFILE: invalid values" if: always() run: | source /tmp/test_helpers.sh - run_valid "UPDATE_CHECK=0" docker run --rm -e DRY_RUN=1 -e UPDATE_CHECK=0 diskmark:test - run_valid "UPDATE_CHECK=1" docker run --rm -e DRY_RUN=1 -e UPDATE_CHECK=1 diskmark:test + run_invalid "PROFILE=fast" docker run --rm -e DRY_RUN=1 -e PROFILE=fast diskmark:test + run_invalid "PROFILE=ssd" docker run --rm -e DRY_RUN=1 -e PROFILE=ssd diskmark:test check_step_result - - name: "Valid: FORMAT options" + - name: "TARGET: valid values" if: always() run: | source /tmp/test_helpers.sh - run_valid "FORMAT unset (default)" docker run --rm -e DRY_RUN=1 diskmark:test - run_valid "FORMAT=json" docker run --rm -e DRY_RUN=1 -e FORMAT=json diskmark:test - run_valid "FORMAT=yaml" docker run --rm -e DRY_RUN=1 -e FORMAT=yaml diskmark:test - run_valid "FORMAT=xml" docker run --rm -e DRY_RUN=1 -e FORMAT=xml diskmark:test + run_valid "TARGET=/disk" docker run --rm -e DRY_RUN=1 -e TARGET=/disk diskmark:test + run_valid "TARGET=/tmp (tmpfs)" docker run --rm --tmpfs /tmp -e DRY_RUN=1 -e TARGET=/tmp diskmark:test check_step_result - # ============================================ - # INVALID INPUT TESTS - Should all fail - # ============================================ - - - name: "Invalid: SIZE - negative value" + - name: "TARGET: invalid values" if: always() run: | source /tmp/test_helpers.sh - run_invalid "SIZE=-1G" docker run --rm -e DRY_RUN=1 -e SIZE=-1G diskmark:test + run_invalid "TARGET=/ (root)" docker run --rm -e DRY_RUN=1 -e TARGET=/ diskmark:test + run_invalid "TARGET=/nonexistent/readonly" docker run --rm -e DRY_RUN=1 -e TARGET=/nonexistent/readonly diskmark:test check_step_result - - name: "Invalid: SIZE - zero value" + - name: "IO: valid values" if: always() run: | source /tmp/test_helpers.sh - run_invalid "SIZE=0" docker run --rm -e DRY_RUN=1 -e SIZE=0 diskmark:test + run_valid "IO=direct" docker run --rm -e DRY_RUN=1 -e IO=direct diskmark:test + run_valid "IO=buffered" docker run --rm -e DRY_RUN=1 -e IO=buffered diskmark:test check_step_result - - name: "Invalid: SIZE - invalid unit" + - name: "IO: invalid values" if: always() run: | source /tmp/test_helpers.sh - run_invalid "SIZE=1X" docker run --rm -e DRY_RUN=1 -e SIZE=1X diskmark:test + run_invalid "IO=sync" docker run --rm -e DRY_RUN=1 -e IO=sync diskmark:test + run_invalid "IO=async" docker run --rm -e DRY_RUN=1 -e IO=async diskmark:test check_step_result - - name: "Invalid: SIZE - non-numeric" + - name: "DATA: valid values" if: always() run: | source /tmp/test_helpers.sh - run_invalid "SIZE=abc" docker run --rm -e DRY_RUN=1 -e SIZE=abc diskmark:test + run_valid "DATA=random" docker run --rm -e DRY_RUN=1 -e DATA=random diskmark:test + run_valid "DATA=rand" docker run --rm -e DRY_RUN=1 -e DATA=rand diskmark:test + run_valid "DATA=zero" docker run --rm -e DRY_RUN=1 -e DATA=zero diskmark:test + run_valid "DATA=0" docker run --rm -e DRY_RUN=1 -e DATA=0 diskmark:test + run_valid "DATA=0x00" docker run --rm -e DRY_RUN=1 -e DATA=0x00 diskmark:test check_step_result - - name: "Valid: SIZE - empty string (defaults to 1G)" + - name: "DATA: invalid values" if: always() run: | source /tmp/test_helpers.sh - run_valid "SIZE=(empty, defaults to 1G)" docker run --rm -e DRY_RUN=1 -e SIZE= diskmark:test + run_invalid "DATA=ones" docker run --rm -e DRY_RUN=1 -e DATA=ones diskmark:test + run_invalid "DATA=0xFF" docker run --rm -e DRY_RUN=1 -e DATA=0xFF diskmark:test check_step_result - - name: "Invalid: SIZE - float value" + - name: "WARMUP: valid values" if: always() run: | source /tmp/test_helpers.sh - run_invalid "SIZE=2.5G" docker run --rm -e DRY_RUN=1 -e SIZE=2.5G diskmark:test + run_valid "WARMUP=0" docker run --rm -e DRY_RUN=1 -e WARMUP=0 diskmark:test + run_valid "WARMUP=1" docker run --rm -e DRY_RUN=1 -e WARMUP=1 diskmark:test + run_valid "WARMUP=1 WARMUP_SIZE=8M" docker run --rm -e DRY_RUN=1 -e WARMUP=1 -e WARMUP_SIZE=8M diskmark:test + run_valid "WARMUP=1 WARMUP_SIZE=64M" docker run --rm -e DRY_RUN=1 -e WARMUP=1 -e WARMUP_SIZE=64M diskmark:test + run_valid "WARMUP=1 WARMUP_SIZE=128M" docker run --rm -e DRY_RUN=1 -e WARMUP=1 -e WARMUP_SIZE=128M diskmark:test check_step_result - - name: "Invalid: WARMUP - not 0 or 1" + - name: "WARMUP: invalid values" if: always() run: | source /tmp/test_helpers.sh run_invalid "WARMUP=2" docker run --rm -e DRY_RUN=1 -e WARMUP=2 diskmark:test run_invalid "WARMUP=yes" docker run --rm -e DRY_RUN=1 -e WARMUP=yes diskmark:test run_invalid "WARMUP=-1" docker run --rm -e DRY_RUN=1 -e WARMUP=-1 diskmark:test - check_step_result - - - name: "Invalid: WARMUP_SIZE - invalid format" - if: always() - run: | - source /tmp/test_helpers.sh run_invalid "WARMUP_SIZE=invalid" docker run --rm -e DRY_RUN=1 -e WARMUP=1 -e WARMUP_SIZE=invalid diskmark:test run_invalid "WARMUP_SIZE=-8M" docker run --rm -e DRY_RUN=1 -e WARMUP=1 -e WARMUP_SIZE=-8M diskmark:test check_step_result - - name: "Invalid: IO - invalid mode" - if: always() - run: | - source /tmp/test_helpers.sh - run_invalid "IO=sync" docker run --rm -e DRY_RUN=1 -e IO=sync diskmark:test - run_invalid "IO=async" docker run --rm -e DRY_RUN=1 -e IO=async diskmark:test - check_step_result - - - name: "Invalid: DATA - invalid pattern" + - name: "RUNTIME: valid values" if: always() run: | source /tmp/test_helpers.sh - run_invalid "DATA=ones" docker run --rm -e DRY_RUN=1 -e DATA=ones diskmark:test - run_invalid "DATA=0xFF" docker run --rm -e DRY_RUN=1 -e DATA=0xFF diskmark:test + run_valid "RUNTIME=500ms" docker run --rm -e DRY_RUN=1 -e RUNTIME=500ms diskmark:test + run_valid "RUNTIME=5s" docker run --rm -e DRY_RUN=1 -e RUNTIME=5s diskmark:test + run_valid "RUNTIME=2m" docker run --rm -e DRY_RUN=1 -e RUNTIME=2m diskmark:test + run_valid "RUNTIME=1h" docker run --rm -e DRY_RUN=1 -e RUNTIME=1h diskmark:test + run_valid "RUNTIME=1ms" docker run --rm -e DRY_RUN=1 -e RUNTIME=1ms diskmark:test + run_valid "RUNTIME=999h" docker run --rm -e DRY_RUN=1 -e RUNTIME=999h diskmark:test check_step_result - - name: "Invalid: PROFILE - invalid value" + - name: "RUNTIME: invalid values" if: always() run: | source /tmp/test_helpers.sh - run_invalid "PROFILE=fast" docker run --rm -e DRY_RUN=1 -e PROFILE=fast diskmark:test - run_invalid "PROFILE=ssd" docker run --rm -e DRY_RUN=1 -e PROFILE=ssd diskmark:test + run_invalid "RUNTIME=5 (no unit)" docker run --rm -e DRY_RUN=1 -e RUNTIME=5 diskmark:test + run_invalid "RUNTIME=5sec" docker run --rm -e DRY_RUN=1 -e RUNTIME=5sec diskmark:test + run_invalid "RUNTIME=-5s" docker run --rm -e DRY_RUN=1 -e RUNTIME=-5s diskmark:test check_step_result - - name: "Invalid: RUNTIME - invalid format" + - name: "LOOPS: valid values" if: always() run: | source /tmp/test_helpers.sh - run_invalid "RUNTIME=5 (no unit)" docker run --rm -e DRY_RUN=1 -e RUNTIME=5 diskmark:test - run_invalid "RUNTIME=5sec" docker run --rm -e DRY_RUN=1 -e RUNTIME=5sec diskmark:test - run_invalid "RUNTIME=-5s" docker run --rm -e DRY_RUN=1 -e RUNTIME=-5s diskmark:test + run_valid "LOOPS=1" docker run --rm -e DRY_RUN=1 -e LOOPS=1 diskmark:test + run_valid "LOOPS=5" docker run --rm -e DRY_RUN=1 -e LOOPS=5 diskmark:test + run_valid "LOOPS=100" docker run --rm -e DRY_RUN=1 -e LOOPS=100 diskmark:test + run_valid "LOOPS=9999" docker run --rm -e DRY_RUN=1 -e LOOPS=9999 diskmark:test + run_valid "LOOPS+RUNTIME (hybrid mode)" docker run --rm -e DRY_RUN=1 -e LOOPS=5 -e RUNTIME=10s diskmark:test check_step_result - - name: "Invalid: LOOPS - not a positive integer" + - name: "LOOPS: invalid values" if: always() run: | source /tmp/test_helpers.sh @@ -297,14 +238,18 @@ jobs: run_invalid "LOOPS=abc" docker run --rm -e DRY_RUN=1 -e LOOPS=abc diskmark:test check_step_result - - name: "Valid: LOOPS and RUNTIME together" + - name: "JOB: valid values" if: always() run: | source /tmp/test_helpers.sh - run_valid "LOOPS+RUNTIME (hybrid mode)" docker run --rm -e DRY_RUN=1 -e LOOPS=5 -e RUNTIME=10s diskmark:test + run_valid "JOB=SEQ1MQ8T1" docker run --rm -e DRY_RUN=1 -e JOB=SEQ1MQ8T1 diskmark:test + run_valid "JOB=SEQ128KQ32T1" docker run --rm -e DRY_RUN=1 -e JOB=SEQ128KQ32T1 diskmark:test + run_valid "JOB=RND4KQ32T16" docker run --rm -e DRY_RUN=1 -e JOB=RND4KQ32T16 diskmark:test + run_valid "JOB=RND4KQ1T1" docker run --rm -e DRY_RUN=1 -e JOB=RND4KQ1T1 diskmark:test + run_valid "JOB=SEQ4MQ1T4" docker run --rm -e DRY_RUN=1 -e JOB=SEQ4MQ1T4 diskmark:test check_step_result - - name: "Invalid: JOB - malformed job name" + - name: "JOB: invalid values" if: always() run: | source /tmp/test_helpers.sh @@ -314,23 +259,17 @@ jobs: run_invalid "JOB=XXX4KQ32T1 (invalid prefix)" docker run --rm -e DRY_RUN=1 -e JOB=XXX4KQ32T1 diskmark:test check_step_result - - name: "Invalid: DRY_RUN - not 0 or 1" - if: always() - run: | - source /tmp/test_helpers.sh - run_invalid "DRY_RUN=yes" docker run --rm -e DRY_RUN=yes diskmark:test - check_step_result - - - name: "Invalid: UPDATE_CHECK - not 0 or 1" + - name: "FORMAT: valid values" if: always() run: | source /tmp/test_helpers.sh - run_invalid "UPDATE_CHECK=2" docker run --rm -e DRY_RUN=1 -e UPDATE_CHECK=2 diskmark:test - run_invalid "UPDATE_CHECK=yes" docker run --rm -e DRY_RUN=1 -e UPDATE_CHECK=yes diskmark:test - run_invalid "UPDATE_CHECK=-1" docker run --rm -e DRY_RUN=1 -e UPDATE_CHECK=-1 diskmark:test + run_valid "FORMAT unset (default)" docker run --rm -e DRY_RUN=1 diskmark:test + run_valid "FORMAT=json" docker run --rm -e DRY_RUN=1 -e FORMAT=json diskmark:test + run_valid "FORMAT=yaml" docker run --rm -e DRY_RUN=1 -e FORMAT=yaml diskmark:test + run_valid "FORMAT=xml" docker run --rm -e DRY_RUN=1 -e FORMAT=xml diskmark:test check_step_result - - name: "Invalid: FORMAT - unsupported format" + - name: "FORMAT: invalid values" if: always() run: | source /tmp/test_helpers.sh @@ -340,93 +279,71 @@ jobs: run_invalid "FORMAT=TEXT" docker run --rm -e DRY_RUN=1 -e FORMAT=TEXT diskmark:test check_step_result - # ============================================ - # EDGE CASE TESTS - # ============================================ - - - name: "Edge: Very large SIZE value" - if: always() - run: | - source /tmp/test_helpers.sh - run_valid "SIZE=999P" docker run --rm -e DRY_RUN=1 -e SIZE=999P diskmark:test - check_step_result - - - name: "Edge: Minimum valid SIZE" + - name: "UPDATE_CHECK: valid values" if: always() run: | source /tmp/test_helpers.sh - run_valid "SIZE=1 (bytes)" docker run --rm -e DRY_RUN=1 -e SIZE=1 diskmark:test + run_valid "UPDATE_CHECK=0" docker run --rm -e DRY_RUN=1 -e UPDATE_CHECK=0 diskmark:test + run_valid "UPDATE_CHECK=1" docker run --rm -e DRY_RUN=1 -e UPDATE_CHECK=1 diskmark:test check_step_result - - name: "Edge: Very short RUNTIME" + - name: "UPDATE_CHECK: invalid values" if: always() run: | source /tmp/test_helpers.sh - run_valid "RUNTIME=1ms" docker run --rm -e DRY_RUN=1 -e RUNTIME=1ms diskmark:test + run_invalid "UPDATE_CHECK=2" docker run --rm -e DRY_RUN=1 -e UPDATE_CHECK=2 diskmark:test + run_invalid "UPDATE_CHECK=yes" docker run --rm -e DRY_RUN=1 -e UPDATE_CHECK=yes diskmark:test + run_invalid "UPDATE_CHECK=-1" docker run --rm -e DRY_RUN=1 -e UPDATE_CHECK=-1 diskmark:test check_step_result - - name: "Edge: Very long RUNTIME" + - name: "DRY_RUN: invalid values" if: always() run: | source /tmp/test_helpers.sh - run_valid "RUNTIME=999h" docker run --rm -e DRY_RUN=1 -e RUNTIME=999h diskmark:test + run_invalid "DRY_RUN=yes" docker run --rm -e DRY_RUN=yes diskmark:test check_step_result - - name: "Edge: High LOOPS value" + - name: "Combined: valid parameters" if: always() run: | source /tmp/test_helpers.sh - run_valid "LOOPS=9999" docker run --rm -e DRY_RUN=1 -e LOOPS=9999 diskmark:test + run_valid "SIZE+WARMUP+PROFILE+IO+DATA" docker run --rm -e DRY_RUN=1 -e SIZE=2G -e WARMUP=1 -e WARMUP_SIZE=64M -e PROFILE=nvme -e IO=direct -e DATA=random diskmark:test + run_valid "SIZE+LOOPS+DATA+IO" docker run --rm -e DRY_RUN=1 -e SIZE=512M -e LOOPS=3 -e DATA=zero -e IO=buffered diskmark:test check_step_result - # ============================================ - # VALIDATION SUMMARY - # ============================================ - - name: Test Summary if: always() run: | echo "" echo "============================================" - echo " INPUT VALIDATION TEST SUMMARY" + echo " ENV VALIDATION TEST SUMMARY" echo "============================================" echo "" - - PASS_COUNT=0 - FAIL_COUNT=0 - if [ -f "$RESULTS_FILE" ]; then - PASS_COUNT=$(grep -c "^pass:" "$RESULTS_FILE" || true) - FAIL_COUNT=$(grep -c "^fail:" "$RESULTS_FILE" || true) - fi + PASS_COUNT=$(grep -c "^pass:" "$RESULTS_FILE" 2>/dev/null || echo 0) + FAIL_COUNT=$(grep -c "^fail:" "$RESULTS_FILE" 2>/dev/null || echo 0) TOTAL=$((PASS_COUNT + FAIL_COUNT)) - echo "✅ Passed: $PASS_COUNT" echo "❌ Failed: $FAIL_COUNT" echo "━━━━━━━━━━━━━━━━━━━━━━━━" echo " Total: $TOTAL" echo "" - if [ "$FAIL_COUNT" -gt 0 ]; then echo "Failed tests:" grep "^fail:" "$RESULTS_FILE" | cut -d: -f2- | while read -r test; do echo " ❌ $test" done - echo "" exit 1 else echo "All tests passed! 🎉" fi # ============================================ - # REAL BENCHMARK TESTS (non dry-run) + # INPUT VALIDATION - CLI ARGUMENTS (dry-run) # ============================================ - benchmark: - name: Benchmark Tests + validation-cli: + name: "Validation: CLI" runs-on: ubuntu-latest - needs: input-validation - env: - RESULTS_FILE: /tmp/benchmark_results.txt steps: - name: Checkout uses: actions/checkout@v4 @@ -439,19 +356,10 @@ jobs: RESET='\033[0m' STEP_FAILED=0 - # run_simple: Uses UPDATE_CHECK=0, JOB=SEQ1MQ1T1, FORMAT=json for fast simple tests - run_simple() { + run_valid() { local name="$1" shift - local cmd=("$@") - local new_cmd=() - for arg in "${cmd[@]}"; do - new_cmd+=("$arg") - if [[ "$arg" == "--rm" ]]; then - new_cmd+=("-e" "UPDATE_CHECK=0" "-e" "JOB=SEQ1MQ1T1" "-e" "FORMAT=json") - fi - done - if "${new_cmd[@]}"; then + if "$@" > /dev/null 2>&1; then echo -e "${GREEN}✓ Passed${RESET}: $name" echo "pass:$name" >> "$RESULTS_FILE" else @@ -461,25 +369,16 @@ jobs: fi } - # run_json: Uses UPDATE_CHECK=0, FORMAT=json (runs all jobs but outputs JSON) - run_json() { + run_invalid() { local name="$1" shift - local cmd=("$@") - local new_cmd=() - for arg in "${cmd[@]}"; do - new_cmd+=("$arg") - if [[ "$arg" == "--rm" ]]; then - new_cmd+=("-e" "UPDATE_CHECK=0" "-e" "FORMAT=json") - fi - done - if "${new_cmd[@]}"; then - echo -e "${GREEN}✓ Passed${RESET}: $name" - echo "pass:$name" >> "$RESULTS_FILE" - else - echo -e "${RED}✗ Failed${RESET}: $name" + if "$@" > /dev/null 2>&1; then + echo -e "${RED}✗ Failed (should have been rejected)${RESET}: $name" echo "fail:$name" >> "$RESULTS_FILE" STEP_FAILED=1 + else + echo -e "${GREEN}✓ Passed (correctly rejected)${RESET}: $name" + echo "pass:$name" >> "$RESULTS_FILE" fi } @@ -492,408 +391,598 @@ jobs: - name: Build test image run: docker build -t diskmark:test . - # ---------- PROFILE variations ---------- - - name: "PROFILE: auto" + - name: "Help/Version: options" if: always() run: | source /tmp/test_helpers.sh - run_json "PROFILE=auto" docker run --rm -e SIZE=16M -e RUNTIME=1s -e PROFILE=auto diskmark:test + run_valid "--help" docker run --rm diskmark:test --help + run_valid "-h" docker run --rm diskmark:test -h + run_valid "--version" docker run --rm diskmark:test --version + run_valid "-v" docker run --rm diskmark:test -v check_step_result - - name: "PROFILE: default" + - name: "SIZE: long options" if: always() run: | source /tmp/test_helpers.sh - run_json "PROFILE=default" docker run --rm -e SIZE=16M -e RUNTIME=1s -e PROFILE=default diskmark:test + run_valid "--size 1G --dry-run" docker run --rm diskmark:test --size 1G --dry-run + run_valid "--size=512M --dry-run" docker run --rm diskmark:test --size=512M --dry-run check_step_result - - name: "PROFILE: nvme" + - name: "SIZE: short options" if: always() run: | source /tmp/test_helpers.sh - run_json "PROFILE=nvme" docker run --rm -e SIZE=16M -e RUNTIME=1s -e PROFILE=nvme diskmark:test + run_valid "-s 1G -n" docker run --rm diskmark:test -s 1G -n check_step_result - # ---------- IO variations ---------- - - name: "IO: direct" + - name: "PROFILE: long options" if: always() run: | source /tmp/test_helpers.sh - run_simple "IO=direct" docker run --rm -e SIZE=16M -e RUNTIME=1s -e IO=direct diskmark:test + run_valid "--profile nvme --dry-run" docker run --rm diskmark:test --profile nvme --dry-run + run_valid "--profile=default --dry-run" docker run --rm diskmark:test --profile=default --dry-run check_step_result - - name: "IO: buffered" + - name: "PROFILE: short options" if: always() run: | source /tmp/test_helpers.sh - run_simple "IO=buffered" docker run --rm -e SIZE=16M -e RUNTIME=1s -e IO=buffered diskmark:test + run_valid "-p nvme -n" docker run --rm diskmark:test -p nvme -n check_step_result - # ---------- DATA variations ---------- - - name: "DATA: random" + - name: "TARGET: long options" if: always() run: | source /tmp/test_helpers.sh - run_simple "DATA=random" docker run --rm -e SIZE=16M -e RUNTIME=1s -e DATA=random diskmark:test + run_valid "--target /disk --dry-run" docker run --rm diskmark:test --target /disk --dry-run + run_valid "--target=/disk --dry-run" docker run --rm diskmark:test --target=/disk --dry-run check_step_result - - name: "DATA: rand" + - name: "TARGET: short options" if: always() run: | source /tmp/test_helpers.sh - run_simple "DATA=rand" docker run --rm -e SIZE=16M -e RUNTIME=1s -e DATA=rand diskmark:test + run_valid "-t /disk -n" docker run --rm diskmark:test -t /disk -n check_step_result - - name: "DATA: zero" + - name: "IO: long options" if: always() run: | source /tmp/test_helpers.sh - run_simple "DATA=zero" docker run --rm -e SIZE=16M -e RUNTIME=1s -e DATA=zero diskmark:test + run_valid "--io buffered --dry-run" docker run --rm diskmark:test --io buffered --dry-run + run_valid "--io=direct --dry-run" docker run --rm diskmark:test --io=direct --dry-run check_step_result - - name: "DATA: 0" + - name: "IO: short options" if: always() run: | source /tmp/test_helpers.sh - run_simple "DATA=0" docker run --rm -e SIZE=16M -e RUNTIME=1s -e DATA=0 diskmark:test + run_valid "-i buffered -n" docker run --rm diskmark:test -i buffered -n check_step_result - - name: "DATA: 0x00" + - name: "DATA: long options" if: always() run: | source /tmp/test_helpers.sh - run_simple "DATA=0x00" docker run --rm -e SIZE=16M -e RUNTIME=1s -e DATA=0x00 diskmark:test + run_valid "--data zero --dry-run" docker run --rm diskmark:test --data zero --dry-run + run_valid "--data=random --dry-run" docker run --rm diskmark:test --data=random --dry-run check_step_result - # ---------- SIZE variations ---------- - - name: "SIZE: bytes only" + - name: "DATA: short options" if: always() run: | source /tmp/test_helpers.sh - run_simple "SIZE=1048576 (bytes)" docker run --rm -e SIZE=1048576 -e RUNTIME=1s diskmark:test + run_valid "-d zero -n" docker run --rm diskmark:test -d zero -n check_step_result - - name: "SIZE: kilobytes" + - name: "WARMUP: flag options" if: always() run: | source /tmp/test_helpers.sh - run_simple "SIZE=512K" docker run --rm -e SIZE=512K -e RUNTIME=1s diskmark:test + run_valid "--warmup --dry-run" docker run --rm diskmark:test --warmup --dry-run + run_valid "-w --dry-run" docker run --rm diskmark:test -w --dry-run + run_valid "--no-warmup --dry-run" docker run --rm diskmark:test --no-warmup --dry-run + run_valid "--warmup-size 64M --dry-run" docker run --rm diskmark:test --warmup-size 64M --dry-run + run_valid "--warmup-size=128M --dry-run" docker run --rm diskmark:test --warmup-size=128M --dry-run check_step_result - - name: "SIZE: megabytes" + - name: "RUNTIME: long options" if: always() run: | source /tmp/test_helpers.sh - run_simple "SIZE=16M" docker run --rm -e SIZE=16M -e RUNTIME=1s diskmark:test + run_valid "--runtime 5s --dry-run" docker run --rm diskmark:test --runtime 5s --dry-run + run_valid "--runtime=500ms --dry-run" docker run --rm diskmark:test --runtime=500ms --dry-run check_step_result - - name: "SIZE: lowercase unit" + - name: "RUNTIME: short options" if: always() run: | source /tmp/test_helpers.sh - run_simple "SIZE=16m (lowercase)" docker run --rm -e SIZE=16m -e RUNTIME=1s diskmark:test + run_valid "-r 5s -n" docker run --rm diskmark:test -r 5s -n check_step_result - # ---------- WARMUP variations ---------- - - name: "WARMUP: disabled" + - name: "LOOPS: long options" if: always() run: | source /tmp/test_helpers.sh - run_simple "WARMUP=0" docker run --rm -e SIZE=16M -e RUNTIME=1s -e WARMUP=0 diskmark:test + run_valid "--loops 3 --dry-run" docker run --rm diskmark:test --loops 3 --dry-run + run_valid "--loops=5 --dry-run" docker run --rm diskmark:test --loops=5 --dry-run check_step_result - - name: "WARMUP: enabled (default block)" + - name: "LOOPS: short options" if: always() run: | source /tmp/test_helpers.sh - run_simple "WARMUP=1" docker run --rm -e SIZE=16M -e RUNTIME=1s -e WARMUP=1 diskmark:test + run_valid "-l 3 -n" docker run --rm diskmark:test -l 3 -n check_step_result - - name: "WARMUP: with custom WARMUP_SIZE" + - name: "JOB: long options" if: always() run: | source /tmp/test_helpers.sh - run_simple "WARMUP=1 WARMUP_SIZE=4M" docker run --rm -e SIZE=16M -e RUNTIME=1s -e WARMUP=1 -e WARMUP_SIZE=4M diskmark:test + run_valid "--job SEQ1MQ1T1 --dry-run" docker run --rm diskmark:test --job SEQ1MQ1T1 --dry-run + run_valid "--job=RND4KQ32T1 --dry-run" docker run --rm diskmark:test --job=RND4KQ32T1 --dry-run check_step_result - # ---------- RUNTIME variations ---------- - - name: "RUNTIME: milliseconds" + - name: "JOB: short options" if: always() run: | source /tmp/test_helpers.sh - run_simple "RUNTIME=500ms" docker run --rm -e SIZE=16M -e RUNTIME=500ms diskmark:test + run_valid "-j SEQ1MQ1T1 -n" docker run --rm diskmark:test -j SEQ1MQ1T1 -n check_step_result - - name: "RUNTIME: seconds" + - name: "FORMAT: long options" if: always() run: | source /tmp/test_helpers.sh - run_simple "RUNTIME=1s" docker run --rm -e SIZE=16M -e RUNTIME=1s diskmark:test + run_valid "--format json --dry-run" docker run --rm diskmark:test --format json --dry-run + run_valid "--format=yaml --dry-run" docker run --rm diskmark:test --format=yaml --dry-run check_step_result - # ---------- LOOPS variations ---------- - - name: "LOOPS: single loop" + - name: "FORMAT: short options" if: always() run: | source /tmp/test_helpers.sh - run_simple "LOOPS=1" docker run --rm -e SIZE=16M -e LOOPS=1 diskmark:test + run_valid "-f json -n" docker run --rm diskmark:test -f json -n check_step_result - - name: "LOOPS: multiple loops" + - name: "UPDATE_CHECK: flag options" if: always() run: | source /tmp/test_helpers.sh - run_simple "LOOPS=2" docker run --rm -e SIZE=16M -e LOOPS=2 diskmark:test + run_valid "--no-update-check --dry-run" docker run --rm diskmark:test --no-update-check --dry-run + run_valid "-u --dry-run" docker run --rm diskmark:test -u --dry-run check_step_result - # ---------- JOB variations ---------- - - name: "JOB: sequential kilobytes" + - name: "Display: flag options" if: always() run: | source /tmp/test_helpers.sh - run_json "JOB=SEQ128KQ1T1" docker run --rm -e SIZE=16M -e RUNTIME=1s -e JOB=SEQ128KQ1T1 diskmark:test + run_valid "--color --dry-run" docker run --rm diskmark:test --color --dry-run + run_valid "--no-color --dry-run" docker run --rm diskmark:test --no-color --dry-run + run_valid "--emoji --dry-run" docker run --rm diskmark:test --emoji --dry-run + run_valid "--no-emoji --dry-run" docker run --rm diskmark:test --no-emoji --dry-run check_step_result - - name: "JOB: sequential megabytes" + - name: "Combined: mixed options" if: always() run: | source /tmp/test_helpers.sh - run_json "JOB=SEQ1MQ8T1" docker run --rm -e SIZE=16M -e RUNTIME=1s -e JOB=SEQ1MQ8T1 diskmark:test + run_valid "Multiple short options" docker run --rm diskmark:test -s 1G -p nvme -w -r 5s -n + run_valid "Mixed long and short" docker run --rm diskmark:test --size 1G -p nvme --warmup -n + run_valid "All with equals" docker run --rm diskmark:test --size=1G --profile=nvme --runtime=5s --dry-run check_step_result - - name: "JOB: random access" + - name: "Override: CLI overrides ENV" if: always() run: | source /tmp/test_helpers.sh - run_json "JOB=RND4KQ1T1" docker run --rm -e SIZE=16M -e RUNTIME=1s -e JOB=RND4KQ1T1 diskmark:test + run_valid "CLI --size overrides env SIZE" docker run --rm -e SIZE=100G diskmark:test --size 1G --dry-run + run_valid "CLI --profile overrides env PROFILE" docker run --rm -e PROFILE=default diskmark:test --profile nvme --dry-run + run_valid "CLI --target overrides env TARGET" docker run --rm -e TARGET=/tmp diskmark:test --target /disk --dry-run check_step_result - - name: "JOB: high queue depth" + - name: "Invalid: rejected options" if: always() run: | source /tmp/test_helpers.sh - run_json "JOB=RND4KQ32T1" docker run --rm -e SIZE=16M -e RUNTIME=1s -e JOB=RND4KQ32T1 diskmark:test + run_invalid "Unknown option --invalid" docker run --rm diskmark:test --invalid + run_invalid "Unknown short option -x" docker run --rm diskmark:test -x + run_invalid "Missing value for --size" docker run --rm diskmark:test --size + run_invalid "Missing value for -s" docker run --rm diskmark:test -s check_step_result - - name: "JOB: multiple threads" + - name: Test Summary if: always() run: | - source /tmp/test_helpers.sh - run_json "JOB=RND4KQ1T4" docker run --rm -e SIZE=16M -e RUNTIME=1s -e JOB=RND4KQ1T4 diskmark:test - check_step_result + echo "" + echo "============================================" + echo " CLI VALIDATION TEST SUMMARY" + echo "============================================" + echo "" + PASS_COUNT=$(grep -c "^pass:" "$RESULTS_FILE" 2>/dev/null || echo 0) + FAIL_COUNT=$(grep -c "^fail:" "$RESULTS_FILE" 2>/dev/null || echo 0) + TOTAL=$((PASS_COUNT + FAIL_COUNT)) + echo "✅ Passed: $PASS_COUNT" + echo "❌ Failed: $FAIL_COUNT" + echo "━━━━━━━━━━━━━━━━━━━━━━━━" + echo " Total: $TOTAL" + echo "" + if [ "$FAIL_COUNT" -gt 0 ]; then + echo "Failed tests:" + grep "^fail:" "$RESULTS_FILE" | cut -d: -f2- | while read -r test; do + echo " ❌ $test" + done + exit 1 + else + echo "All tests passed! 🎉" + fi - # ---------- COMBINED PARAMETERS ---------- - - name: "Combo: NVMe profile + warmup + random data" - if: always() + # ============================================ + # BENCHMARK TESTS (actual execution, concurrent) + # ============================================ + + benchmark-profiles: + name: "Benchmark: Profiles" + runs-on: ubuntu-latest + needs: [validation-env, validation-cli] + env: + RESULTS_FILE: /tmp/benchmark_results.txt + steps: + - name: Checkout + uses: actions/checkout@v4 + + - name: Setup test helpers run: | - source /tmp/test_helpers.sh - run_json "NVMe+warmup+random" docker run --rm -e SIZE=16M -e RUNTIME=1s -e PROFILE=nvme -e WARMUP=1 -e DATA=random diskmark:test - check_step_result + cat > /tmp/test_helpers.sh << 'EOF' + GREEN='\033[32m' + RED='\033[31m' + RESET='\033[0m' + STEP_FAILED=0 - - name: "Combo: Default profile + zero data + loops" - if: always() + run_profile() { + local name="$1" + shift + local cmd=("$@") + local new_cmd=() + for arg in "${cmd[@]}"; do + new_cmd+=("$arg") + if [[ "$arg" == "--rm" ]]; then + new_cmd+=("-e" "UPDATE_CHECK=0" "-e" "FORMAT=json" "-e" "RUNTIME=500ms") + fi + done + if "${new_cmd[@]}" > /dev/null 2>&1; then + echo -e "${GREEN}✓ Passed${RESET}: $name" + echo "pass:$name" >> "$RESULTS_FILE" + else + echo -e "${RED}✗ Failed${RESET}: $name" + echo "fail:$name" >> "$RESULTS_FILE" + STEP_FAILED=1 + fi + } + + check_step_result() { + exit $STEP_FAILED + } + EOF + touch "$RESULTS_FILE" + + - name: Build test image + run: docker build -t diskmark:test . + + - name: "PROFILE: auto" run: | source /tmp/test_helpers.sh - run_json "default+zero+loops" docker run --rm -e SIZE=16M -e LOOPS=1 -e PROFILE=default -e DATA=zero diskmark:test + run_profile "PROFILE=auto" docker run --rm -e SIZE=8M -e PROFILE=auto diskmark:test check_step_result - - name: "Combo: Buffered IO + warmup + custom block size" - if: always() + - name: "PROFILE: default" run: | source /tmp/test_helpers.sh - run_simple "buffered+warmup+custom" docker run --rm -e SIZE=16M -e RUNTIME=1s -e IO=buffered -e WARMUP=1 -e WARMUP_SIZE=2M diskmark:test + run_profile "PROFILE=default" docker run --rm -e SIZE=8M -e PROFILE=default diskmark:test check_step_result - - name: "Combo: Custom job + warmup + zero data" - if: always() + - name: "PROFILE: nvme" run: | source /tmp/test_helpers.sh - run_json "job+warmup+zero" docker run --rm -e SIZE=16M -e RUNTIME=1s -e JOB=SEQ1MQ1T1 -e WARMUP=1 -e DATA=zero diskmark:test + run_profile "PROFILE=nvme" docker run --rm -e SIZE=8M -e PROFILE=nvme diskmark:test check_step_result - # ---------- EDGE CASES ---------- - - name: "Edge: Cross-profile (NVMe profile with buffered IO)" + - name: Test Summary if: always() run: | - source /tmp/test_helpers.sh - run_json "NVMe+buffered" docker run --rm -e SIZE=16M -e RUNTIME=1s -e PROFILE=nvme -e IO=buffered diskmark:test - check_step_result + echo "" + echo "============================================" + echo " PROFILE BENCHMARK SUMMARY" + echo "============================================" + PASS_COUNT=$(grep -c "^pass:" "$RESULTS_FILE" 2>/dev/null || echo 0) + FAIL_COUNT=$(grep -c "^fail:" "$RESULTS_FILE" 2>/dev/null || echo 0) + echo "✅ Passed: $PASS_COUNT | ❌ Failed: $FAIL_COUNT" + [ "$FAIL_COUNT" -gt 0 ] && exit 1 || echo "All tests passed! 🎉" - - name: "Edge: Mixed data and IO modes" - if: always() + benchmark-params: + name: "Benchmark: Parameters" + runs-on: ubuntu-latest + needs: [validation-env, validation-cli] + env: + RESULTS_FILE: /tmp/benchmark_results.txt + steps: + - name: Checkout + uses: actions/checkout@v4 + + - name: Setup test helpers + run: | + cat > /tmp/test_helpers.sh << 'EOF' + GREEN='\033[32m' + RED='\033[31m' + RESET='\033[0m' + STEP_FAILED=0 + + run_fast() { + local name="$1" + shift + local cmd=("$@") + local new_cmd=() + for arg in "${cmd[@]}"; do + new_cmd+=("$arg") + if [[ "$arg" == "--rm" ]]; then + new_cmd+=("-e" "UPDATE_CHECK=0" "-e" "JOB=SEQ1MQ1T1" "-e" "FORMAT=json" "-e" "RUNTIME=100ms") + fi + done + if "${new_cmd[@]}" > /dev/null 2>&1; then + echo -e "${GREEN}✓ Passed${RESET}: $name" + echo "pass:$name" >> "$RESULTS_FILE" + else + echo -e "${RED}✗ Failed${RESET}: $name" + echo "fail:$name" >> "$RESULTS_FILE" + STEP_FAILED=1 + fi + } + + check_step_result() { + exit $STEP_FAILED + } + EOF + touch "$RESULTS_FILE" + + - name: Build test image + run: docker build -t diskmark:test . + + - name: "IO: modes" run: | source /tmp/test_helpers.sh - run_simple "0x00+buffered+warmup" docker run --rm -e SIZE=16M -e RUNTIME=1s -e WARMUP=1 -e DATA=0x00 -e IO=buffered diskmark:test + run_fast "IO=direct" docker run --rm -e SIZE=8M -e IO=direct diskmark:test + run_fast "IO=buffered" docker run --rm -e SIZE=8M -e IO=buffered diskmark:test check_step_result - - name: "Edge: Custom job with PROFILE set (job takes precedence)" - if: always() + - name: "DATA: patterns" run: | source /tmp/test_helpers.sh - run_json "job+profile" docker run --rm -e SIZE=16M -e RUNTIME=1s -e JOB=RND4KQ32T1 -e PROFILE=nvme diskmark:test + run_fast "DATA=random" docker run --rm -e SIZE=8M -e DATA=random diskmark:test + run_fast "DATA=zero" docker run --rm -e SIZE=8M -e DATA=zero diskmark:test + run_fast "DATA=0x00" docker run --rm -e SIZE=8M -e DATA=0x00 diskmark:test check_step_result - - name: "Edge: Small warmup block size" - if: always() + - name: "SIZE: formats" run: | source /tmp/test_helpers.sh - run_simple "WARMUP_SIZE=1K" docker run --rm -e SIZE=16M -e RUNTIME=1s -e WARMUP=1 -e WARMUP_SIZE=1K diskmark:test + run_fast "SIZE=1048576 (bytes)" docker run --rm -e SIZE=1048576 diskmark:test + run_fast "SIZE=512K" docker run --rm -e SIZE=512K diskmark:test + run_fast "SIZE=8M" docker run --rm -e SIZE=8M diskmark:test + run_fast "SIZE=8m (lowercase)" docker run --rm -e SIZE=8m diskmark:test check_step_result - - name: "Edge: WARMUP_SIZE set with WARMUP=0 (ignored)" - if: always() + - name: "WARMUP: options" run: | source /tmp/test_helpers.sh - run_simple "WARMUP=0+WARMUP_SIZE" docker run --rm -e SIZE=16M -e RUNTIME=1s -e WARMUP=0 -e WARMUP_SIZE=64M diskmark:test + run_fast "WARMUP=0" docker run --rm -e SIZE=8M -e WARMUP=0 diskmark:test + run_fast "WARMUP=1" docker run --rm -e SIZE=8M -e WARMUP=1 diskmark:test + run_fast "WARMUP=1 WARMUP_SIZE=2M" docker run --rm -e SIZE=8M -e WARMUP=1 -e WARMUP_SIZE=2M diskmark:test check_step_result - - name: "Edge: All parameters explicitly set" - if: always() + - name: "RUNTIME: formats" run: | source /tmp/test_helpers.sh - run_json "all params" docker run --rm -e SIZE=16M -e RUNTIME=1s -e PROFILE=default -e IO=direct -e DATA=random -e WARMUP=1 -e WARMUP_SIZE=4M diskmark:test + run_fast "RUNTIME=50ms" docker run --rm -e SIZE=8M -e RUNTIME=50ms diskmark:test + run_fast "RUNTIME=100ms" docker run --rm -e SIZE=8M -e RUNTIME=100ms diskmark:test check_step_result - - name: "Edge: Minimal size and runtime" - if: always() + - name: "LOOPS: counts" run: | source /tmp/test_helpers.sh - run_simple "SIZE=4K RUNTIME=100ms" docker run --rm -e SIZE=4K -e RUNTIME=100ms diskmark:test + run_fast "LOOPS=1" docker run --rm -e SIZE=8M -e LOOPS=1 diskmark:test + run_fast "LOOPS=2" docker run --rm -e SIZE=8M -e LOOPS=2 diskmark:test check_step_result - - name: "Edge: SIZE smaller than WARMUP_SIZE" - if: always() + - name: "JOB: patterns" run: | source /tmp/test_helpers.sh - run_simple "SIZE/dev/null || echo 0) + FAIL_COUNT=$(grep -c "^fail:" "$RESULTS_FILE" 2>/dev/null || echo 0) + echo "✅ Passed: $PASS_COUNT | ❌ Failed: $FAIL_COUNT" + [ "$FAIL_COUNT" -gt 0 ] && exit 1 || echo "All tests passed! 🎉" + + benchmark-formats: + name: "Benchmark: Formats" + runs-on: ubuntu-latest + needs: [validation-env, validation-cli] + env: + RESULTS_FILE: /tmp/benchmark_results.txt + steps: + - name: Checkout + uses: actions/checkout@v4 + + - name: Setup test helpers + run: | + cat > /tmp/test_helpers.sh << 'EOF' + GREEN='\033[32m' + RED='\033[31m' + RESET='\033[0m' + STEP_FAILED=0 + EOF + touch "$RESULTS_FILE" + + - name: Build test image + run: docker build -t diskmark:test . + + - name: Install PyYAML run: pip install pyyaml - - name: "FORMAT: json produces valid JSON" - if: always() + - name: "FORMAT: json" run: | source /tmp/test_helpers.sh - docker run --rm -e SIZE=16M -e RUNTIME=1s -e JOB=SEQ1MQ1T1 -e FORMAT=json -e UPDATE_CHECK=0 diskmark:test > /tmp/output.json - if python3 -m json.tool /tmp/output.json > /dev/null 2>&1; then + docker run --rm -e SIZE=8M -e RUNTIME=100ms -e JOB=SEQ1MQ1T1 -e FORMAT=json -e UPDATE_CHECK=0 diskmark:test > /tmp/output.json + if python3 -c "import json; json.load(open('/tmp/output.json'))"; then echo -e "${GREEN}✓ Passed${RESET}: FORMAT=json produces valid JSON" - echo "pass:FORMAT=json produces valid JSON" >> "$RESULTS_FILE" + echo "pass:FORMAT=json valid" >> "$RESULTS_FILE" else echo -e "${RED}✗ Failed${RESET}: FORMAT=json produces valid JSON" - echo "fail:FORMAT=json produces valid JSON" >> "$RESULTS_FILE" - STEP_FAILED=1 + echo "fail:FORMAT=json valid" >> "$RESULTS_FILE" + exit 1 fi - check_step_result - - - name: "FORMAT: json contains expected structure" - if: always() - run: | - source /tmp/test_helpers.sh - if python3 -c "import json; data=json.load(open('/tmp/output.json')); assert 'configuration' in data and 'results' in data and len(data['results'])>0 and 'name' in data['results'][0] and 'status' in data['results'][0]" > /dev/null 2>&1; then + if grep -q '"results"' /tmp/output.json && grep -q '"configuration"' /tmp/output.json; then echo -e "${GREEN}✓ Passed${RESET}: FORMAT=json contains expected structure" - echo "pass:FORMAT=json contains expected structure" >> "$RESULTS_FILE" + echo "pass:FORMAT=json structure" >> "$RESULTS_FILE" else - echo -e "${RED}✗ Failed${RESET}: FORMAT=json missing expected structure" - echo "fail:FORMAT=json contains expected structure" >> "$RESULTS_FILE" - STEP_FAILED=1 + echo -e "${RED}✗ Failed${RESET}: FORMAT=json contains expected structure" + echo "fail:FORMAT=json structure" >> "$RESULTS_FILE" + exit 1 fi - check_step_result - - name: "FORMAT: yaml produces valid YAML" - if: always() + - name: "FORMAT: yaml" run: | source /tmp/test_helpers.sh - docker run --rm -e SIZE=16M -e RUNTIME=1s -e JOB=SEQ1MQ1T1 -e FORMAT=yaml -e UPDATE_CHECK=0 diskmark:test > /tmp/output.yaml - if python3 -c "import yaml; yaml.safe_load(open('/tmp/output.yaml'))" > /dev/null 2>&1; then + docker run --rm -e SIZE=8M -e RUNTIME=100ms -e JOB=SEQ1MQ1T1 -e FORMAT=yaml -e UPDATE_CHECK=0 diskmark:test > /tmp/output.yaml + if python3 -c "import yaml; yaml.safe_load(open('/tmp/output.yaml'))"; then echo -e "${GREEN}✓ Passed${RESET}: FORMAT=yaml produces valid YAML" - echo "pass:FORMAT=yaml produces valid YAML" >> "$RESULTS_FILE" + echo "pass:FORMAT=yaml valid" >> "$RESULTS_FILE" else echo -e "${RED}✗ Failed${RESET}: FORMAT=yaml produces valid YAML" - echo "fail:FORMAT=yaml produces valid YAML" >> "$RESULTS_FILE" - STEP_FAILED=1 + echo "fail:FORMAT=yaml valid" >> "$RESULTS_FILE" + exit 1 fi - check_step_result - - - name: "FORMAT: yaml contains expected structure" - if: always() - run: | - source /tmp/test_helpers.sh - if python3 -c "import yaml; data=yaml.safe_load(open('/tmp/output.yaml')); assert 'configuration' in data and 'results' in data and len(data['results'])>0 and 'name' in data['results'][0] and 'status' in data['results'][0]" > /dev/null 2>&1; then + if grep -q 'results:' /tmp/output.yaml && grep -q 'configuration:' /tmp/output.yaml; then echo -e "${GREEN}✓ Passed${RESET}: FORMAT=yaml contains expected structure" - echo "pass:FORMAT=yaml contains expected structure" >> "$RESULTS_FILE" + echo "pass:FORMAT=yaml structure" >> "$RESULTS_FILE" else - echo -e "${RED}✗ Failed${RESET}: FORMAT=yaml missing expected structure" - echo "fail:FORMAT=yaml contains expected structure" >> "$RESULTS_FILE" - STEP_FAILED=1 + echo -e "${RED}✗ Failed${RESET}: FORMAT=yaml contains expected structure" + echo "fail:FORMAT=yaml structure" >> "$RESULTS_FILE" + exit 1 fi - check_step_result - - name: "FORMAT: xml produces valid XML" - if: always() + - name: "FORMAT: xml" run: | source /tmp/test_helpers.sh - docker run --rm -e SIZE=16M -e RUNTIME=1s -e JOB=SEQ1MQ1T1 -e FORMAT=xml -e UPDATE_CHECK=0 diskmark:test > /tmp/output.xml - if python3 -c "import xml.etree.ElementTree as ET; ET.parse('/tmp/output.xml')" > /dev/null 2>&1; then + docker run --rm -e SIZE=8M -e RUNTIME=100ms -e JOB=SEQ1MQ1T1 -e FORMAT=xml -e UPDATE_CHECK=0 diskmark:test > /tmp/output.xml + if python3 -c "import xml.etree.ElementTree as ET; ET.parse('/tmp/output.xml')"; then echo -e "${GREEN}✓ Passed${RESET}: FORMAT=xml produces valid XML" - echo "pass:FORMAT=xml produces valid XML" >> "$RESULTS_FILE" + echo "pass:FORMAT=xml valid" >> "$RESULTS_FILE" else echo -e "${RED}✗ Failed${RESET}: FORMAT=xml produces valid XML" - echo "fail:FORMAT=xml produces valid XML" >> "$RESULTS_FILE" - STEP_FAILED=1 + echo "fail:FORMAT=xml valid" >> "$RESULTS_FILE" + exit 1 fi - check_step_result - - - name: "FORMAT: xml contains expected structure" - if: always() - run: | - source /tmp/test_helpers.sh - if python3 -c "import xml.etree.ElementTree as ET; root=ET.parse('/tmp/output.xml').getroot(); jobs=root.find('results').findall('job'); assert root.find('configuration') is not None and root.find('results') is not None and len(jobs)>0 and jobs[0].get('name') is not None and jobs[0].get('status') is not None" > /dev/null 2>&1; then + if grep -q '' /tmp/output.xml && grep -q '' /tmp/output.xml; then echo -e "${GREEN}✓ Passed${RESET}: FORMAT=xml contains expected structure" - echo "pass:FORMAT=xml contains expected structure" >> "$RESULTS_FILE" + echo "pass:FORMAT=xml structure" >> "$RESULTS_FILE" else - echo -e "${RED}✗ Failed${RESET}: FORMAT=xml missing expected structure" - echo "fail:FORMAT=xml contains expected structure" >> "$RESULTS_FILE" - STEP_FAILED=1 + echo -e "${RED}✗ Failed${RESET}: FORMAT=xml contains expected structure" + echo "fail:FORMAT=xml structure" >> "$RESULTS_FILE" + exit 1 fi - check_step_result - - # ============================================ - # BENCHMARK SUMMARY - # ============================================ - name: Test Summary if: always() run: | echo "" echo "============================================" - echo " BENCHMARK TEST SUMMARY" + echo " FORMAT BENCHMARK SUMMARY" echo "============================================" - echo "" + PASS_COUNT=$(grep -c "^pass:" "$RESULTS_FILE" 2>/dev/null || echo 0) + FAIL_COUNT=$(grep -c "^fail:" "$RESULTS_FILE" 2>/dev/null || echo 0) + echo "✅ Passed: $PASS_COUNT | ❌ Failed: $FAIL_COUNT" + [ "$FAIL_COUNT" -gt 0 ] && exit 1 || echo "All tests passed! 🎉" - PASS_COUNT=0 - FAIL_COUNT=0 - if [ -f "$RESULTS_FILE" ]; then - PASS_COUNT=$(grep -c "^pass:" "$RESULTS_FILE" || true) - FAIL_COUNT=$(grep -c "^fail:" "$RESULTS_FILE" || true) - fi - TOTAL=$((PASS_COUNT + FAIL_COUNT)) + benchmark-edge: + name: "Benchmark: Edge Cases" + runs-on: ubuntu-latest + needs: [validation-env, validation-cli] + env: + RESULTS_FILE: /tmp/benchmark_results.txt + steps: + - name: Checkout + uses: actions/checkout@v4 - echo "✅ Passed: $PASS_COUNT" - echo "❌ Failed: $FAIL_COUNT" - echo "━━━━━━━━━━━━━━━━━━━━━━━━" - echo " Total: $TOTAL" - echo "" + - name: Setup test helpers + run: | + cat > /tmp/test_helpers.sh << 'EOF' + GREEN='\033[32m' + RED='\033[31m' + RESET='\033[0m' + STEP_FAILED=0 - if [ "$FAIL_COUNT" -gt 0 ]; then - echo "Failed tests:" - grep "^fail:" "$RESULTS_FILE" | cut -d: -f2- | while read -r test; do - echo " ❌ $test" + run_fast() { + local name="$1" + shift + local cmd=("$@") + local new_cmd=() + for arg in "${cmd[@]}"; do + new_cmd+=("$arg") + if [[ "$arg" == "--rm" ]]; then + new_cmd+=("-e" "UPDATE_CHECK=0" "-e" "JOB=SEQ1MQ1T1" "-e" "FORMAT=json" "-e" "RUNTIME=100ms") + fi done - echo "" - exit 1 - else - echo "All benchmarks passed! 🎉" - fi + if "${new_cmd[@]}" > /dev/null 2>&1; then + echo -e "${GREEN}✓ Passed${RESET}: $name" + echo "pass:$name" >> "$RESULTS_FILE" + else + echo -e "${RED}✗ Failed${RESET}: $name" + echo "fail:$name" >> "$RESULTS_FILE" + STEP_FAILED=1 + fi + } + + check_step_result() { + exit $STEP_FAILED + } + EOF + touch "$RESULTS_FILE" + + - name: Build test image + run: docker build -t diskmark:test . + + - name: "Edge: combined parameters" + run: | + source /tmp/test_helpers.sh + run_fast "buffered+warmup+zero" docker run --rm -e SIZE=8M -e IO=buffered -e WARMUP=1 -e DATA=zero diskmark:test + run_fast "job+warmup+random" docker run --rm -e SIZE=8M -e JOB=SEQ1MQ1T1 -e WARMUP=1 -e DATA=random diskmark:test + run_fast "SIZE/dev/null || echo 0) + FAIL_COUNT=$(grep -c "^fail:" "$RESULTS_FILE" 2>/dev/null || echo 0) + echo "✅ Passed: $PASS_COUNT | ❌ Failed: $FAIL_COUNT" + [ "$FAIL_COUNT" -gt 0 ] && exit 1 || echo "All tests passed! 🎉" diff --git a/Dockerfile b/Dockerfile index bb3230f..439127c 100644 --- a/Dockerfile +++ b/Dockerfile @@ -16,6 +16,7 @@ RUN echo "$VERSION" > /etc/diskmark-version FROM scratch COPY --from=builder /diskmark/ / COPY --from=version /etc/diskmark-version /etc/diskmark-version +COPY lib/ /usr/lib/diskmark/ COPY diskmark.sh /usr/bin/diskmark VOLUME /disk WORKDIR /disk diff --git a/README.md b/README.md index ef85f0a..3d47c09 100644 --- a/README.md +++ b/README.md @@ -1,16 +1,50 @@ # docker-diskmark -[![GitHub tag](https://img.shields.io/github/v/tag/e7db/docker-diskmark)](https://github.com/e7db/docker-diskmark/tags) [![codeql](https://github.com/e7d/docker-diskmark/actions/workflows/codeql.yml/badge.svg)](https://github.com/e7d/docker-diskmark/actions/workflows/codeql.yml) [![tests](https://github.com/e7d/docker-diskmark/actions/workflows/tests.yml/badge.svg)](https://github.com/e7d/docker-diskmark/actions/workflows/tests.yml) [![docker-image](https://github.com/e7d/docker-diskmark/actions/workflows/docker-image.yml/badge.svg)](https://github.com/e7d/docker-diskmark/actions/workflows/docker-image.yml) +[![GitHub tag](https://img.shields.io/github/v/tag/e7db/docker-diskmark)](https://github.com/e7db/docker-diskmark/tags) [![codeql](https://github.com/e7db/docker-diskmark/actions/workflows/codeql.yml/badge.svg)](https://github.com/e7db/docker-diskmark/actions/workflows/codeql.yml) [![tests](https://github.com/e7db/docker-diskmark/actions/workflows/tests.yml/badge.svg)](https://github.com/e7db/docker-diskmark/actions/workflows/tests.yml) [![docker-image](https://github.com/e7db/docker-diskmark/actions/workflows/docker-image.yml/badge.svg)](https://github.com/e7db/docker-diskmark/actions/workflows/docker-image.yml) A [fio](https://github.com/axboe/fio)-based disk benchmark [docker container](https://hub.docker.com/r/e7db/diskmark), similar to what [CrystalDiskMark](https://crystalmark.info/en/software/crystaldiskmark/) does. ## Basic Usage -``` +```bash +# From Docker Hub docker pull e7db/diskmark docker run -it --rm e7db/diskmark -``` -![Docker DiskMark](https://github.com/e7d/docker-diskmark/raw/main/assets/diskmark.png?raw=true "Docker DiskMark") +# From GitHub Container Registry +docker pull ghcr.io/e7db/diskmark +docker run -it --rm ghcr.io/e7db/diskmark +``` + +![Docker DiskMark](https://github.com/e7db/docker-diskmark/raw/main/assets/diskmark.png?raw=true "Docker DiskMark") + +## Options + +All options can be configured via CLI arguments (recommended) or environment variables. CLI arguments take precedence. + +The container supports multiple CLI formats: `--key value`, `--key=value`, and `-k value`. + +| Short | Long | Env Var | Default | Description | +| :---: | :--- | :------ | :------ | :---------- | +| `-h` | `--help` | | | Show help message and exit | +| `-v` | `--version` | | | Show version information and exit | +| `-t` | `--target` | `TARGET` | `/disk` | Target directory for benchmark | +| `-p` | `--profile` | `PROFILE` | `auto` | Benchmark profile: `auto`, `default`, `nvme` | +| `-j` | `--job` | `JOB` | | Custom job definition (e.g., `RND4KQ32T16`). Overrides profile. | +| `-i` | `--io` | `IO` | `direct` | I/O mode: `direct` (sync), `buffered` (async) | +| `-d` | `--data` | `DATA` | `random` | Data pattern: `random`, `zero` | +| `-s` | `--size` | `SIZE` | `1G` | Test file size (e.g., `500M`, `1G`, `10G`) | +| `-w` | `--warmup` | `WARMUP=1` | `1` | Enable warmup phase | +| | `--no-warmup` | `WARMUP=0` | | Disable warmup phase | +| | `--warmup-size` | `WARMUP_SIZE` | _(profile)_ | Warmup block size (`8M` default, `64M` nvme) | +| `-r` | `--runtime` | `RUNTIME` | `5s` | Runtime per job (e.g., `500ms`, `5s`, `2m`) | +| `-l` | `--loops` | `LOOPS` | | Number of test loops | +| `-n` | `--dry-run` | `DRY_RUN=1` | `0` | Validate configuration without running | +| `-f` | `--format` | `FORMAT` | | Output format: `json`, `yaml`, `xml` | +| `-u` | `--no-update-check` | `UPDATE_CHECK=0` | `1` | Disable update check at startup | +| | `--color` | `COLOR=1` | | Force colored output | +| | `--no-color` | `COLOR=0` | | Disable colored output | +| | `--emoji` | `EMOJI=1` | | Force emoji output | +| | `--no-emoji` | `EMOJI=0` | | Disable emoji output | ## Profiles @@ -26,89 +60,58 @@ The container contains two different test profiles: - Random 4K Q32T16 - Random 4K Q1T1 -## Advanced usage - -Find below a table listing all the different parameters you can use with the container: -| Parameter | Type | Default | Description | -| :- | :- |:- | :- | -| `PROFILE` | Environment | `auto` | The profile to apply:
- `auto` to try and autoselect the best one based on the used drive detection,
- `default`, best suited for hard disk drives,
- `nvme`, best suited for NVMe SSD drives. | -| `JOB` | Environment | | A custom job to use: details below in the [Custom job](#custom-job) section.
This parameter overrides the `PROFILE` parameter. | -| `IO` | Environment | `direct` | The drive access mode:
- `direct` for synchronous I/O,
- `buffered` for asynchronous I/O. | -| `DATA` | Environment | `random` | The test data:
- `random` to use random data,
- `0x00` to fill with 0 (zero) values. | -| `SIZE` | Environment | `1G` | The size of the test file (e.g., `500M`, `1G`, `10G`). | -| `WARMUP` | Environment | `0` | When set to `1`, use a warmup phase, thus preparing the test file with `dd`, using either random data or zero values as set by `DATA`. | -| `WARMUP_SIZE` | Environment | | Warmup block size. Defaults depend on the profile:
- `8M` for the default profile
- `64M` for the NVMe profile. | -| `RUNTIME` | Environment | `5s` | The duration for each job (e.g., `1s`, `5s`, `2m`).
Used alone: time-based benchmark.
Used with `LOOPS`: caps each loop to this duration. | -| `LOOPS` | Environment | | The number of test loops to run.
Used alone: runs exactly N loops with no time limit.
Used with `RUNTIME`: runs N loops, each capped to `RUNTIME`. | -| `DRY_RUN` | Environment | `0` | When set to `1`, validates configuration without running the benchmark. | -| `UPDATE_CHECK` | Environment | `1` | When set to `0`, skips the update check at startup. | -| `FORMAT` | Environment | _(empty)_| Output format:
- Empty or unset for human-readable output,
- `json` for JSON format,
- `yaml` for YAML format,
- `xml` for XML format.
Machine-readable formats disable colors, emojis, and update check. | -| `/disk` | Volume | | The target path to benchmark. | - -By default, a 1 GB test file is used, with a 5 seconds duration for each test, reading and writing random bytes on the disk where Docker is installed. - -### With parameters - -For example, you can use a 4 GB file, looping each test twice, but after a warmup phase, and writting only zeros instead of random data. -You can achieve this using the following command: -``` -docker run -it --rm -e SIZE=4G -e WARMUP=1 -e LOOPS=2 -e DATA=0x00 e7db/diskmark -``` +## Examples -You can also combine `LOOPS` and `RUNTIME` for hybrid mode — run a fixed number of loops, but cap each loop's duration: -``` -docker run -it --rm -e SIZE=1G -e LOOPS=3 -e RUNTIME=10s e7db/diskmark -``` +### Basic parameters -Warmup block size is tunable with `WARMUP_SIZE` (e.g. `8M`, `64M`, `128M`). By default it adapts to the selected profile: `8M` for the default profile (HDD-friendly) and `64M` for the NVMe profile. You can override it explicitly if needed: -``` -docker run -it --rm -e WARMUP=1 -e WARMUP_SIZE=128M e7db/diskmark +```bash +# 4 GB file, 2 loops, warmup, zero data pattern +docker run -it --rm e7db/diskmark --size 4G --warmup --loops 2 --data zero +docker run -it --rm e7db/diskmark -s 4G -w -l 2 -d zero + +# Hybrid mode: 3 loops, each capped at 10 seconds +docker run -it --rm e7db/diskmark --loops 3 --runtime 10s + +# Custom warmup block size +docker run -it --rm e7db/diskmark --warmup --warmup-size 128M ``` ### Force profile -A detection of your disk is tried, so the benchmark uses the appropriate profile, `default` or `nvme`. -In the event that the detection fails, yielding "Unknown", or returns the wrong profile, you can force the use of either of the profiles: -``` -docker run -it --rm -e PROFILE=nvme e7db/diskmark +Drive detection selects the appropriate profile (`default` or `nvme`). Override if needed: +```bash +docker run -it --rm e7db/diskmark --profile nvme ``` ### Custom job -You can run a custom single job using the `JOB` parameter. -The job expression must follow a specific format, such as follows: `RND4KQ32T16`. -It is composed of 4 parts: -- `RND` or `SEQ`, for random or sequential access -- `xxK` or `xxM`, where `xx` is the block size, and `K` or `M` is the unit (Kilobytes or Megabytes) -- `Qyy`, where `yy` is the queue depth -- `Tzz`, where `zz` is the number of threads +Run a custom job using the format `[RND|SEQ][size][Q depth][T threads]`: +- `RND` or `SEQ` — random or sequential access +- `xxK` or `xxM` — block size (e.g., `4K`, `1M`) +- `Qyy` — queue depth +- `Tzz` — number of threads -In the previous example `RND4KQ32T16`, the job uses **random accesses**, with a **block size of 4K**, a **queue depth of 32**, and **16 threads**. - -Construct your custom chain, then run the benchmark using the following command: -``` -docker run -it --rm -e JOB=RND4KQ32T16 e7db/diskmark +Example: `RND4KQ32T16` = random 4K blocks, queue depth 32, 16 threads. +```bash +docker run -it --rm e7db/diskmark --job RND4KQ32T16 ``` ### Specific disk -By default, the benchmark runs on the disk where Docker is installed, using a [Docker volume](https://docs.docker.com/storage/volumes/) mounted on the `/disk` path inside the container. -To run the benchmark on a different disk, use a path belonging to that disk, and mount it as the `/disk` volume: -``` -docker run -it --rm -v /path/to/specific/disk:/disk e7db/diskmark +By default, the benchmark uses a [Docker volume](https://docs.docker.com/storage/volumes/) at `/disk`. Mount a different path to benchmark another disk: +```bash +docker run -it --rm -v /path/to/disk:/disk e7db/diskmark ``` ### Machine-readable output -For scripting and automation, you can output results in JSON, YAML, or XML format: -``` -docker run -it --rm -e FORMAT=json e7db/diskmark -docker run -it --rm -e FORMAT=yaml e7db/diskmark -docker run -it --rm -e FORMAT=xml e7db/diskmark +Output in JSON, YAML, or XML for scripting (automatically disables colors, emojis, and update check): +```bash +docker run -it --rm e7db/diskmark --format json +docker run -it --rm e7db/diskmark --format yaml +docker run -it --rm e7db/diskmark --format xml ``` -Machine-readable formats automatically disable colors, emojis, and the update check to produce clean output. - #### JSON output sample ```json diff --git a/diskmark.sh b/diskmark.sh old mode 100755 new mode 100644 index 52d3ec5..6b3889b --- a/diskmark.sh +++ b/diskmark.sh @@ -1,809 +1,72 @@ #!/bin/bash +# diskmark - A fio-based disk benchmark tool +# Main entry point - sources modular components set -e -is_semver() { - [[ "$1" =~ ^v?[0-9]+\.[0-9]+\.[0-9]+(-[a-zA-Z0-9.]+)?$ ]] -} - -UPDATE_CHECK="${UPDATE_CHECK:-1}" -if [[ ! "$UPDATE_CHECK" =~ ^[01]$ ]]; then - echo "Error: UPDATE_CHECK must be either 0 or 1." >&2 - exit 1 -fi - -FORMAT="${FORMAT:-}" -if [[ -n "$FORMAT" && ! "$FORMAT" =~ ^(json|yaml|xml)$ ]]; then - echo "Error: FORMAT must be empty or one of: json, yaml, xml." >&2 - exit 1 -fi - -if [[ -n "$FORMAT" ]]; then - COLOR=0 - EMOJI=0 - UPDATE_CHECK=0 -fi - -VERSION_FILE="/etc/diskmark-version" -if [[ "$UPDATE_CHECK" -eq 1 ]] && [ -f "$VERSION_FILE" ]; then - CURRENT_VERSION=$(cat "$VERSION_FILE") - LATEST_VERSION=$(wget --no-check-certificate -qO- https://api.github.com/repos/e7db/docker-diskmark/releases/latest 2>/dev/null | grep '"tag_name"' | cut -d'"' -f4 || true) - if [[ "$CURRENT_VERSION" != "unknown" ]] && is_semver "$CURRENT_VERSION" && is_semver "$LATEST_VERSION" && [[ "$CURRENT_VERSION" != "$LATEST_VERSION" ]]; then - echo -e "Update available: \e[1;37m$CURRENT_VERSION\e[0m => \e[1;37m$LATEST_VERSION\e[0m (docker pull e7db/diskmark:latest)\n" - fi -fi - -detect_color_support() { - if [[ "$TERM" == "dumb" ]]; then - echo 0 - else - echo 1 - fi -} - -detect_emoji_support() { - if [[ "$TERM" == "dumb" ]]; then - echo 0 - else - echo 1 - fi -} - -if [[ -z "$COLOR" ]]; then - COLOR=$(detect_color_support) -elif [[ ! "$COLOR" =~ ^[01]$ ]]; then - echo "Error: COLOR must be either 0 or 1." >&2 - exit 1 -fi -if [[ -z "$EMOJI" ]]; then - EMOJI=$(detect_emoji_support) -elif [[ ! "$EMOJI" =~ ^[01]$ ]]; then - echo "Error: EMOJI must be either 0 or 1." >&2 - exit 1 -fi - -RESET="0m" -NORMAL="0" -BOLD="1" -BLACK=";30m" -RED=";31m" -GREEN=";32m" -YELLOW=";33m" -BLUE=";34m" -MAGENTA=";35m" -CYAN=";36m" -WHITE=";37m" - -function color() { - if [[ "$COLOR" -eq 1 ]]; then - echo "\e[$1$2" - else - echo "" - fi -} - -if [[ "$EMOJI" -eq 1 ]]; then - SYM_SUCCESS="✅" - SYM_FAILURE="❌" - SYM_STOP="🛑" +if [ -d "/usr/lib/diskmark" ]; then + LIB_DIR="/usr/lib/diskmark" +elif [ -d "$(dirname "$0")/../lib" ]; then + LIB_DIR="$(dirname "$0")/../lib" else - SYM_SUCCESS="[OK]" - SYM_FAILURE="[FAIL]" - SYM_STOP="[STOP]" + LIB_DIR="$(dirname "$0")/lib" fi -function clean() { - [[ -z $TARGET ]] && return - if [[ -n $ISNEWDIR ]]; then - rm -rf "$TARGET" - else - rm -f "$TARGET"/.diskmark.{json,tmp} - fi -} +source "$LIB_DIR/utils.sh" +source "$LIB_DIR/args.sh" +source "$LIB_DIR/validate.sh" +source "$LIB_DIR/detect.sh" +source "$LIB_DIR/profiles.sh" +source "$LIB_DIR/benchmark.sh" +source "$LIB_DIR/output.sh" +source "$LIB_DIR/update.sh" -function interrupt() { - local EXIT_CODE="${1:-0}" - echo -e "\r\n\n$SYM_STOP The benchmark was $(color $BOLD $RED)interrupted$(color $RESET)." - if [ ! -z "$2" ]; then - echo -e "$2" - fi - clean - exit "${EXIT_CODE}" -} -trap 'interrupt $? "The benchmark was aborted before its completion."' HUP INT QUIT KILL TERM +main() { + parse_args "$@" -function fail() { - local EXIT_CODE="${1:-1}" - echo -e "\r\n\n$SYM_FAILURE The benchmark had $(color $BOLD $RED)failed$(color $RESET)." - if [ ! -z "$2" ]; then - echo -e "$2" - fi - clean - exit "${EXIT_CODE}" -} -trap 'fail $? "The benchmark failed before its completion."' ERR + validate_update_check + validate_format + check_for_updates + init_display_settings + setup_traps -function error() { - local EXIT_CODE="${1:-1}" - echo -e "\r\n$SYM_FAILURE The benchmark encountered an $(color $BOLD $RED)error$(color $RESET)." - if [ ! -z "$2" ]; then - echo -e "$2" + require_command fio + require_command dd + require_command awk + require_command df + if [[ -n "$JOB" ]]; then + require_command perl fi - clean - exit "${EXIT_CODE}" -} -function requireCommand() { - command -v "$1" >/dev/null 2>&1 || fail 1 "Missing required dependency: $(color $BOLD $WHITE)$1$(color $RESET). Please install it and try again." -} - -function validateSizeString() { - local VALUE="$1" - local LABEL="$2" - if [[ -z "$VALUE" ]]; then - error 1 "$LABEL must be provided." - fi - if [[ ! "$VALUE" =~ ^[0-9]+([KkMmGgTtPp])?$ ]]; then - error 1 "$LABEL must be a positive integer optionally followed by K, M, G, T, or P (example: 1G)." + TARGET="${TARGET:-$(pwd)}" + ensure_writable_target "$TARGET" + if [ ! -d "$TARGET" ]; then + ISNEWDIR=1 + mkdir -p "$TARGET" fi - local BYTES=$(toBytes "$VALUE") - if [[ -z "$BYTES" || "$BYTES" -le 0 ]]; then - error 1 "$LABEL must be greater than zero." - fi -} -function validateBinaryFlag() { - local VALUE="$1" - local LABEL="$2" - if [[ ! "$VALUE" =~ ^[01]$ ]]; then - error 1 "$LABEL must be either 0 or 1." - fi -} - -function validateRuntime() { - local VALUE="$1" - if [[ -z "$VALUE" ]]; then - return 0 - fi - if [[ ! "$VALUE" =~ ^[0-9]+(ms|s|m|h)$ ]]; then - error 1 "RUNTIME must match the fio time format (e.g., 500ms, 5s, 2m, 1h)." - fi -} - -function validateInteger() { - local VALUE="$1" - local LABEL="$2" - local ALLOW_ZERO="${3:-0}" - local REGEX='^[1-9][0-9]*$' - local ERROR_MSG="$LABEL must be a positive integer." - - if [[ "$ALLOW_ZERO" -eq 1 ]]; then - REGEX='^[0-9]+$' - ERROR_MSG="$LABEL must be a non-negative integer." - fi - - if [[ -z "$VALUE" ]]; then - error 1 "$LABEL must be provided." - fi - if [[ ! "$VALUE" =~ $REGEX ]]; then - error 1 "$ERROR_MSG" - fi -} - -function ensureWritableTarget() { - local PATH_TO_CHECK="$1" - if [[ "$PATH_TO_CHECK" == "/" ]]; then - error 1 "Refusing to run against the filesystem root. Please set TARGET to a dedicated directory." - fi - if [[ -d "$PATH_TO_CHECK" && ! -w "$PATH_TO_CHECK" ]]; then - error 1 "TARGET directory is not writable: $PATH_TO_CHECK" - fi -} - -function toBytes() { - local SIZE=$1 - local UNIT=${SIZE//[0-9]/} - local NUMBER=${SIZE//[a-zA-Z]/} - case $UNIT in - P|p) echo $((NUMBER * 1024 * 1024 * 1024 * 1024 * 1024));; - T|t) echo $((NUMBER * 1024 * 1024 * 1024 * 1024));; - G|g) echo $((NUMBER * 1024 * 1024 * 1024));; - M|m) echo $((NUMBER * 1024 * 1024));; - K|k) echo $((NUMBER * 1024));; - *) echo $NUMBER;; - esac -} + validate_all_inputs + detect_all + select_profile + validate_io_mode + validate_data_pattern + prepare_benchmark_params -function fromBytes() { - local SIZE=$1 - local UNIT="" - if (( SIZE > 1024 )); then - SIZE=$((SIZE / 1024)) - UNIT="K" - fi - if (( SIZE > 1024 )); then - SIZE=$((SIZE / 1024)) - UNIT="M" - fi - if (( SIZE > 1024 )); then - SIZE=$((SIZE / 1024)) - UNIT="G" - fi - if (( SIZE > 1024 )); then - SIZE=$((SIZE / 1024)) - UNIT="T" - fi - if (( SIZE > 1024 )); then - SIZE=$((SIZE / 1024)) - UNIT="P" - fi - echo "${SIZE}${UNIT}" -} - -function parseResult() { - local bandwidth=$(cat "$TARGET/.diskmark.json" | grep -A"$2" '"name" : "'"$1"'"' | grep "$3" | sed 's/ "'"$3"'" : //g' | sed 's:,::g' | awk '{ SUM += $1} END { printf "%.0f", SUM }') - local throughput=$(cat "$TARGET/.diskmark.json" | grep -A"$2" '"name" : "'"$1"'"' | grep "$4" | sed 's/ "'"$4"'" : //g' | sed 's:,::g' | awk '{ SUM += $1} END { printf "%.0f", SUM }' | cut -d. -f1) - echo "$(($bandwidth / 1024 / 1024)) MB/s, $throughput IO/s" -} - -function parseReadResult() { - parseResult "$1" 15 bw_bytes iops -} - -function parseWriteResult() { - parseResult "$1" 80 bw_bytes iops -} - - -function parseRandomReadResult() { - parseResult "$1" 15 bw_bytes iops -} - -function parseRandomWriteResult() { - parseResult "$1" 80 bw_bytes iops -} - -function loadDefaultProfile() { - NAME=("SEQ1MQ8T1" "SEQ1MQ1T1" "RND4KQ32T1" "RND4KQ1T1") - LABEL=("Sequential 1M Q8T1" "Sequential 1M Q1T1" "Random 4K Q32T1" "Random 4K Q1T1") - JOBCOLOR=($(color $NORMAL $YELLOW) $(color $NORMAL $YELLOW) $(color $NORMAL $CYAN) $(color $NORMAL $CYAN)) - BLOCKSIZE=("1M" "1M" "4K" "4K") - IODEPTH=(8 1 32 1) - NUMJOBS=(1 1 1 1) - READWRITE=("" "" "rand" "rand") - SIZEDIVIDER=(-1 -1 16 32) -} - -function loadNVMeProfile() { - NAME=("SEQ1MQ8T1" "SEQ128KQ32T1" "RND4KQ32T16" "RND4KQ1T1") - LABEL=("Sequential 1M Q8T1" "Sequential 128K Q32T1" "Random 4K Q32T16" "Random 4K Q1T1") - JOBCOLOR=($(color $NORMAL $YELLOW) $(color $NORMAL $GREEN) $(color $NORMAL $CYAN) $(color $NORMAL $CYAN)) - BLOCKSIZE=("1M" "128K" "4K" "4K") - IODEPTH=(8 32 32 1) - NUMJOBS=(1 1 16 1) - READWRITE=("" "" "rand" "rand") - SIZEDIVIDER=(-1 -1 16 32) -} - -function loadJob() { - PARAMS=($(echo "$JOB" | perl -nle '/^(RND|SEQ)([0-9]+[KM])Q([0-9]+)T([0-9]+)$/; print "$1 $2 $3 $4"')) - if [ -z ${PARAMS[0]} ]; then - error 1 "Invalid job name: $(color $BOLD $WHITE)$JOB$(color $RESET)" - fi - - case "${PARAMS[0]}" in - RND) - READWRITE=("rand") - READWRITELABEL="Random" - ;; - SEQ) - READWRITE=("") - READWRITELABEL="Sequential" - ;; - esac - BLOCKSIZE=(${PARAMS[1]}) - IODEPTH=(${PARAMS[2]}) - NUMJOBS=(${PARAMS[3]}) - - NAME=($JOB) - LABEL="$READWRITELABEL $BLOCKSIZE Q${IODEPTH}T${NUMJOBS}" - JOBCOLOR=($(color $NORMAL $MAGENTA)) -} - -requireCommand fio -requireCommand dd -requireCommand awk -requireCommand df -if [[ -n "$JOB" ]]; then - requireCommand perl -fi - -TARGET="${TARGET:-$(pwd)}" -ensureWritableTarget "$TARGET" -if [ ! -d "$TARGET" ]; then - ISNEWDIR=1 - mkdir -p "$TARGET" -fi - -validateSizeString "${SIZE:-1G}" "SIZE" -validateBinaryFlag "${WARMUP:-0}" "WARMUP" -validateBinaryFlag "${DRY_RUN:-0}" "DRY_RUN" -if [[ -n "$WARMUP_SIZE" ]]; then - validateSizeString "$WARMUP_SIZE" "WARMUP_SIZE" -fi -if [[ -n "$LOOPS" ]]; then - validateInteger "$LOOPS" "LOOPS" -fi -if [[ -n "$RUNTIME" ]]; then - validateRuntime "$RUNTIME" -fi -DRIVELABEL="Drive" - -FILESYSTEMPARTITION="" -if command -v lsblk &> /dev/null; then - FILESYSTEMPARTITION=$(lsblk -P 2>/dev/null | grep "$TARGET" | head -n 1 | awk '{print $1}' | cut -d"=" -f2 | cut -d"\"" -f2) -fi -if [ -z "$FILESYSTEMPARTITION" ] && command -v findmnt &> /dev/null; then - FILESYSTEMPARTITION=$(findmnt -n -o SOURCE "$TARGET" 2>/dev/null | sed 's|/dev/||') -fi -if [ -z "$FILESYSTEMPARTITION" ]; then - FILESYSTEMPARTITION=$(df "$TARGET" 2>/dev/null | tail +2 | awk '{print $1}' | sed 's|/dev/||') -fi - -FILESYSTEMTYPE=$(df -T "$TARGET" | tail +2 | awk '{print $2}') -FILESYSTEMSIZE=$(df -Th "$TARGET" | tail +2 | awk '{print $3}') -ISOVERLAY=0 -ISTMPFS=0 -ISNVME=0 -ISEMMC=0 -ISMDADM=0 -if [[ "$FILESYSTEMTYPE" == overlay ]]; then - ISOVERLAY=1 -elif [[ "$FILESYSTEMTYPE" == tmpfs ]]; then - ISTMPFS=1 -elif [[ "$FILESYSTEMPARTITION" == mmcblk* ]]; then - DRIVE=$(echo $FILESYSTEMPARTITION | rev | cut -c 3- | rev) - ISEMMC=1 -elif [[ "$FILESYSTEMPARTITION" == nvme* ]]; then - DRIVE=$(echo $FILESYSTEMPARTITION | rev | cut -c 3- | rev) - ISNVME=1 -elif [[ "$FILESYSTEMPARTITION" == hd* ]] || [[ "$FILESYSTEMPARTITION" == sd* ]] || [[ "$FILESYSTEMPARTITION" == vd* ]]; then - DRIVE=$(echo $FILESYSTEMPARTITION | sed 's/[0-9]*$//') -elif [[ "$FILESYSTEMPARTITION" == md* ]]; then - DRIVE=$FILESYSTEMPARTITION - ISMDADM=1 -else - DRIVE="" -fi -if [ $ISOVERLAY -eq 1 ]; then - DRIVENAME="Overlay" - DRIVE="overlay" - DRIVESIZE=$FILESYSTEMSIZE -elif [ $ISTMPFS -eq 1 ]; then - DRIVENAME="RAM" - DRIVE="tmpfs" - DRIVESIZE=$(free -h --si | grep Mem: | awk '{print $2}') -elif [ $ISEMMC -eq 1 ]; then - DEVICE=() - if [ -f /sys/block/$DRIVE/device/type ]; then - case "$(cat /sys/block/$DRIVE/device/type)" in - SD) DEVICE+=("SD Card");; - *) DEVICE+=();; - esac - fi - [ -f /sys/block/$DRIVE/device/name ] && DEVICE+=($(cat /sys/block/$DRIVE/device/name | sed 's/ *$//g')) - DRIVENAME=${DEVICE[@]:-"eMMC flash storage"} - DRIVESIZE=$(fromBytes $(($(cat /sys/block/$DRIVE/size) * 512))) -elif [ $ISMDADM -eq 1 ]; then - DRIVELABEL="Drives" - DRIVENAME="mdadm $(cat /sys/block/$DRIVE/md/level)" - DRIVESIZE=$(fromBytes $(($(cat /sys/block/$DRIVE/size) * 512))) - DISKS=$(ls /sys/block/$DRIVE/slaves/) - DRIVEDETAILS="using $(echo $DISKS | wc -w) disks ($(echo $DISKS | sed 's/ /, /g'))" -elif [ -d /sys/block/$DRIVE/device ]; then - DEVICE=() - [ -f /sys/block/$DRIVE/device/vendor ] && DEVICE+=($(cat /sys/block/$DRIVE/device/vendor | sed 's/ *$//g')) - [ -f /sys/block/$DRIVE/device/model ] && DEVICE+=($(cat /sys/block/$DRIVE/device/model | sed 's/ *$//g')) - DRIVENAME=${DEVICE[@]:-"Unknown drive"} - DRIVESIZE=$(fromBytes $(($(cat /sys/block/$DRIVE/size) * 512))) -else - DRIVE="Unknown" - DRIVENAME="Unknown" - DRIVESIZE="Unknown" -fi -if [ "$DRIVE" = "Unknown" ]; then - DRIVEINFO="Unknown" -else - DRIVEINFO="$DRIVENAME ($DRIVE, $DRIVESIZE) $DRIVEDETAILS" -fi -if [ ! -z $JOB ]; then - PROFILE="Job \"$JOB\"" - loadJob -else - case "$PROFILE" in - ""|auto) - if [ $ISNVME -eq 1 ]; then - PROFILE="auto (nvme)" - loadNVMeProfile - else - PROFILE="auto (default)" - loadDefaultProfile - fi - ;; - default) - loadDefaultProfile - ;; - nvme) - loadNVMeProfile - ;; - *) - error 1 "Invalid PROFILE: $(color $BOLD $WHITE)$PROFILE$(color $RESET). Allowed values are 'auto', 'default', or 'nvme'." - ;; - esac -fi -case "$IO" in - ""|direct) - IO="direct (synchronous)" - DIRECT=1 - ;; - buffered) - IO="buffered (asynchronous)" - DIRECT=0 - ;; - *) - error 1 "Invalid IO mode: $(color $BOLD $WHITE)$IO$(color $RESET). Allowed values are 'direct' or 'buffered'." - ;; -esac -case "$DATA" in - ""|random|rand) - DATA="random" - WRITEZERO=0 - ;; - zero | 0 | 0x00) - DATA="zero (0x00)" - WRITEZERO=1 - ;; - *) - error 1 "Invalid DATA pattern: $(color $BOLD $WHITE)$DATA$(color $RESET). Allowed values are 'random' or 'zero'." - ;; -esac -SIZE="${SIZE:-1G}" -BYTESIZE=$(toBytes $SIZE) -WARMUP="${WARMUP:-0}" -if [ -z "$WARMUP_SIZE" ]; then - case "$PROFILE" in - *nvme*) WARMUP_SIZE="64M" ;; - *) WARMUP_SIZE="8M" ;; - esac -fi -validateSizeString "$WARMUP_SIZE" "WARMUP_SIZE" -WARMUP_BLOCK_BYTES=$(toBytes $WARMUP_SIZE) -if [ -z "$WARMUP_BLOCK_BYTES" ] || [ "$WARMUP_BLOCK_BYTES" -le 0 ]; then - WARMUP_BLOCK_BYTES=$(toBytes 8M) - WARMUP_SIZE="8M" -fi -BLOCK_MB=$((WARMUP_BLOCK_BYTES / 1024 / 1024)) -[ "$BLOCK_MB" -lt 1 ] && BLOCK_MB=1 -[ "$BLOCK_MB" -gt 1024 ] && BLOCK_MB=1024 - -if [[ -n "$LOOPS" ]] && [[ -n "$RUNTIME" ]]; then - LIMIT="Loops: $LOOPS (max $RUNTIME each)" - LIMIT_OPTION="--loops=$LOOPS --runtime=$RUNTIME" -elif [[ -n "$LOOPS" ]]; then - LIMIT="Loops: $LOOPS" - LIMIT_OPTION="--loops=$LOOPS" -else - RUNTIME="${RUNTIME:-5s}" - LIMIT="Runtime: $RUNTIME" - LIMIT_OPTION="--time_based --runtime=$RUNTIME" -fi - -if [[ -z "$FORMAT" ]]; then - echo -e "$(color $BOLD $WHITE)Configuration:$(color $RESET) -- Target: $TARGET - - $DRIVELABEL: $DRIVEINFO - - Filesystem: $FILESYSTEMTYPE ($FILESYSTEMPARTITION, $FILESYSTEMSIZE) -- Profile: $PROFILE - - I/O: $IO - - Data: $DATA - - Size: $SIZE - - Warmup: $WARMUP$([ "$WARMUP" -eq 1 ] && echo " (block: ${BLOCK_MB}M)") - - $LIMIT -" -fi - -DRY_RUN="${DRY_RUN:-0}" -if [ "$DRY_RUN" -eq 1 ]; then - echo -e "$SYM_SUCCESS Dry run $(color $BOLD $GREEN)completed$(color $RESET). Configuration is valid." - exit 0 -fi - -if [[ -z "$FORMAT" ]]; then - echo -e "The benchmark is $(color $BOLD $WHITE)running$(color $RESET), please wait..." -fi - -TOTAL_JOBS=${#NAME[@]} - -clear_progress() { - if [[ -z "$FORMAT" ]]; then - printf "\r\033[K" - fi -} - -show_progress() { if [[ -z "$FORMAT" ]]; then - if [[ "$2" == *"read"* ]]; then - printf "\n[%d/%d] %s..." "$1" "$TOTAL_JOBS" "$2" - else - printf "\r[%d/%d] %s..." "$1" "$TOTAL_JOBS" "$2" - fi - fi -} - -fio_benchmark() { - fio --filename="$TARGET/.diskmark.tmp" \ - --stonewall --ioengine=libaio --direct=$DIRECT --zero_buffers=$WRITEZERO \ - $LIMIT_OPTION --size="$1" \ - --name="$2" --blocksize="$3" --iodepth="$4" --numjobs="$5" --readwrite="$6" \ - --output-format=json >"$TARGET/.diskmark.json" -} - -if [ $WARMUP -eq 1 ]; then - if [ $WRITEZERO -eq 1 ]; then - FILESOURCE=/dev/zero - else - FILESOURCE=/dev/urandom - fi - TOTAL_MB=$((BYTESIZE / 1024 / 1024)) - if [ "$TOTAL_MB" -eq 0 ]; then - dd if="$FILESOURCE" of="$TARGET/.diskmark.tmp" bs="$BYTESIZE" count=1 oflag=direct status=none - else - CHUNKS=$((TOTAL_MB / BLOCK_MB)) - REMAINDER_MB=$((TOTAL_MB % BLOCK_MB)) - if [ $CHUNKS -gt 0 ]; then - dd if="$FILESOURCE" of="$TARGET/.diskmark.tmp" bs=${BLOCK_MB}M count=$CHUNKS oflag=direct status=none - fi - if [ $REMAINDER_MB -gt 0 ]; then - dd if="$FILESOURCE" of="$TARGET/.diskmark.tmp" bs=1M count=$REMAINDER_MB oflag=direct conv=notrunc seek=$((CHUNKS * BLOCK_MB)) status=none - fi - fi -fi - -RESULTS_NAME=() -RESULTS_STATUS=() -RESULTS_READ_BW=() -RESULTS_READ_IOPS=() -RESULTS_READ_LAT=() -RESULTS_WRITE_BW=() -RESULTS_WRITE_IOPS=() -RESULTS_WRITE_LAT=() -SKIPPED_JOBS=() - -function parseResultRaw() { - local bandwidth=$(cat "$TARGET/.diskmark.json" | grep -A"$2" '"name" : "'"$1"'"' | grep "$3" | sed 's/ "'"$3"'" : //g' | sed 's:,::g' | awk '{ SUM += $1} END { printf "%.6f", SUM / 1024 / 1024 }') - local throughput=$(cat "$TARGET/.diskmark.json" | grep -A"$2" '"name" : "'"$1"'"' | grep "$4" | sed 's/ "'"$4"'" : //g' | sed 's:,::g' | awk '{ SUM += $1} END { printf "%.6f", SUM }') - echo "$bandwidth $throughput" -} - -function parseLatency() { - local job_name="$1" - local operation="$2" - local lat_ns=$(cat "$TARGET/.diskmark.json" | \ - grep -A200 '"name" : "'"$job_name"'"' | \ - grep -A50 "\"$operation\" :" | \ - grep -A5 '"clat_ns"' | \ - grep '"mean"' | head -1 | sed 's/.*: //g' | sed 's:,::g') - if [[ -n "$lat_ns" ]]; then - echo "$lat_ns" | awk '{printf "%.6f", $1 / 1000000}' - else - echo "0" + output_config_human fi -} - -function formatLatency() { - local lat_ms="$1" - if [[ -z "$lat_ms" ]] || [[ "$lat_ms" == "0" ]]; then - echo "0.00ms" - elif awk "BEGIN {exit !($lat_ms >= 0.01)}"; then - - printf "%.2fms" "$lat_ms" - elif awk "BEGIN {exit !($lat_ms >= 0.001)}"; then - - printf "%.3fms" "$lat_ms" - else - local lat_ns=$(awk "BEGIN {printf \"%.0f\", $lat_ms * 1000000}") - echo "${lat_ns}ns" + DRY_RUN="${DRY_RUN:-0}" + if [ "$DRY_RUN" -eq 1 ]; then + output_dry_run_success + exit 0 fi -} - -function escapeJson() { - echo "$1" | sed 's/\\/\\\\/g' | sed 's/"/\\"/g' -} -function outputResults() { - local total=${#RESULTS_NAME[@]} - case "$FORMAT" in - "") - local has_skipped=0 - for ((j = 0; j < total; j++)); do - [[ "${RESULTS_STATUS[$j]}" == "skipped" ]] && has_skipped=1 && break - done - if [ $has_skipped -eq 1 ]; then - echo -e "\n$SYM_SUCCESS The benchmark is $(color $BOLD $GREEN)finished$(color $RESET) with $(color $BOLD $YELLOW)warnings$(color $RESET):" - for job in "${SKIPPED_JOBS[@]}"; do - echo -e " - $job" - done - else - echo -e "\n$SYM_SUCCESS The benchmark is $(color $BOLD $GREEN)finished$(color $RESET)." - fi - ;; - json) - echo "{" - echo " \"configuration\": {" - echo " \"target\": \"$(escapeJson "$TARGET")\"," - echo " \"drive\": {" - echo " \"label\": \"$(escapeJson "$DRIVELABEL")\"," - echo " \"info\": \"$(escapeJson "$DRIVEINFO")\"" - echo " }," - echo " \"filesystem\": {" - echo " \"type\": \"$(escapeJson "$FILESYSTEMTYPE")\"," - echo " \"partition\": \"$(escapeJson "$FILESYSTEMPARTITION")\"," - echo " \"size\": \"$(escapeJson "$FILESYSTEMSIZE")\"" - echo " }," - echo " \"profile\": \"$(escapeJson "$PROFILE")\"," - echo " \"io\": \"$(escapeJson "$IO")\"," - echo " \"data\": \"$(escapeJson "$DATA")\"," - echo " \"size\": \"$SIZE\"," - echo " \"warmup\": $WARMUP," - if [[ -n "$LOOPS" ]]; then - echo " \"loops\": $LOOPS" - else - echo " \"runtime\": \"$RUNTIME\"" - fi - echo " }," - echo " \"results\": [" - for ((j = 0; j < total; j++)); do - echo -n " {\"name\": \"$(escapeJson "${RESULTS_NAME[$j]}")\", \"status\": \"${RESULTS_STATUS[$j]}\"" - if [[ "${RESULTS_STATUS[$j]}" != "skipped" ]]; then - echo -n ", \"read\": {\"bandwidth_mb\": ${RESULTS_READ_BW[$j]}, \"iops\": ${RESULTS_READ_IOPS[$j]}, \"latency_ms\": ${RESULTS_READ_LAT[$j]}}, \"write\": {\"bandwidth_mb\": ${RESULTS_WRITE_BW[$j]}, \"iops\": ${RESULTS_WRITE_IOPS[$j]}, \"latency_ms\": ${RESULTS_WRITE_LAT[$j]}}" - fi - echo -n "}" - [[ $j -lt $((total - 1)) ]] && echo "," || echo - done - echo " ]" - echo "}" - ;; - yaml) - echo "configuration:" - echo " target: \"$(escapeJson "$TARGET")\"" - echo " drive:" - echo " label: \"$(escapeJson "$DRIVELABEL")\"" - echo " info: \"$(escapeJson "$DRIVEINFO")\"" - echo " filesystem:" - echo " type: \"$(escapeJson "$FILESYSTEMTYPE")\"" - echo " partition: \"$(escapeJson "$FILESYSTEMPARTITION")\"" - echo " size: \"$(escapeJson "$FILESYSTEMSIZE")\"" - echo " profile: \"$(escapeJson "$PROFILE")\"" - echo " io: \"$(escapeJson "$IO")\"" - echo " data: \"$(escapeJson "$DATA")\"" - echo " size: \"$SIZE\"" - echo " warmup: $WARMUP" - if [[ -n "$LOOPS" ]]; then - echo " loops: $LOOPS" - else - echo " runtime: \"$RUNTIME\"" - fi - echo "results:" - for ((j = 0; j < total; j++)); do - echo " - name: \"$(escapeJson "${RESULTS_NAME[$j]}")\"" - echo " status: \"${RESULTS_STATUS[$j]}\"" - if [[ "${RESULTS_STATUS[$j]}" != "skipped" ]]; then - echo " read:" - echo " bandwidth_mb: ${RESULTS_READ_BW[$j]}" - echo " iops: ${RESULTS_READ_IOPS[$j]}" - echo " latency_ms: ${RESULTS_READ_LAT[$j]}" - echo " write:" - echo " bandwidth_mb: ${RESULTS_WRITE_BW[$j]}" - echo " iops: ${RESULTS_WRITE_IOPS[$j]}" - echo " latency_ms: ${RESULTS_WRITE_LAT[$j]}" - fi - done - ;; - xml) - echo '' - echo "" - echo " " - echo " $TARGET" - echo " $DRIVEINFO" - echo " " - echo " $PROFILE" - echo " $IO" - echo " $DATA" - echo " $SIZE" - echo " $WARMUP" - if [[ -n "$LOOPS" ]]; then - echo " $LOOPS" - else - echo " $RUNTIME" - fi - echo " " - echo " " - for ((j = 0; j < total; j++)); do - echo " " - if [[ "${RESULTS_STATUS[$j]}" != "skipped" ]]; then - echo " " - echo " " - fi - echo " " - done - echo " " - echo "" - ;; - esac + output_running_message + run_warmup + run_all_benchmarks + output_results + clean } -for ((i = 0; i < ${#NAME[@]}; i++)); do - JOB_NUM=$((i + 1)) - DIVIDER=${SIZEDIVIDER[$i]:-1} - if [ "$DIVIDER" -le 0 ]; then - TESTSIZE=$BYTESIZE - else - TESTSIZE=$((BYTESIZE / DIVIDER)) - fi - BLOCKSIZE_BYTES=$(toBytes "${BLOCKSIZE[$i]}") - - if [ "$TESTSIZE" -lt "$BLOCKSIZE_BYTES" ]; then - SKIPPED_JOBS+=("${NAME[$i]} (size $(fromBytes $TESTSIZE) < block size ${BLOCKSIZE[$i]})") - RESULTS_NAME+=("${NAME[$i]}") - RESULTS_STATUS+=("skipped") - RESULTS_READ_BW+=(0) - RESULTS_READ_IOPS+=(0) - RESULTS_READ_LAT+=(0) - RESULTS_WRITE_BW+=(0) - RESULTS_WRITE_IOPS+=(0) - RESULTS_WRITE_LAT+=(0) - if [[ -z "$FORMAT" ]]; then - echo - echo -e "${JOBCOLOR[$i]}${LABEL[$i]}:$(color $RESET) Skipped" - fi - continue - fi - - case "${READWRITE[$i]}" in - rand) PARSE="parseRandom" ;; - *) PARSE="parse" ;; - esac - - show_progress "$JOB_NUM" "${NAME[$i]} read" - fio_benchmark "$TESTSIZE" "${NAME[$i]}Read" "${BLOCKSIZE[$i]}" "${IODEPTH[$i]}" "${NUMJOBS[$i]}" "${READWRITE[$i]}read" - READ_RAW=$(parseResultRaw "${NAME[$i]}Read" 15 bw_bytes iops) - READ_BW=$(echo "$READ_RAW" | awk '{print $1}') - READ_IOPS=$(echo "$READ_RAW" | awk '{print $2}') - READ_LAT=$(parseLatency "${NAME[$i]}Read" "read") - show_progress "$JOB_NUM" "${NAME[$i]} write" - fio_benchmark "$TESTSIZE" "${NAME[$i]}Write" "${BLOCKSIZE[$i]}" "${IODEPTH[$i]}" "${NUMJOBS[$i]}" "${READWRITE[$i]}write" - WRITE_RAW=$(parseResultRaw "${NAME[$i]}Write" 80 bw_bytes iops) - WRITE_BW=$(echo "$WRITE_RAW" | awk '{print $1}') - WRITE_IOPS=$(echo "$WRITE_RAW" | awk '{print $2}') - WRITE_LAT=$(parseLatency "${NAME[$i]}Write" "write") - if [[ -z "$FORMAT" ]]; then - clear_progress - echo -e "${JOBCOLOR[$i]}${LABEL[$i]}:$(color $RESET)" - printf "<= Read: %.0f MB/s, %.0f IO/s, %s\n" "$READ_BW" "$READ_IOPS" "$(formatLatency $READ_LAT)" - printf "=> Write: %.0f MB/s, %.0f IO/s, %s\n" "$WRITE_BW" "$WRITE_IOPS" "$(formatLatency $WRITE_LAT)" - fi - - RESULTS_NAME+=("${NAME[$i]}") - RESULTS_STATUS+=("success") - RESULTS_READ_BW+=($READ_BW) - RESULTS_READ_IOPS+=($READ_IOPS) - RESULTS_READ_LAT+=($READ_LAT) - RESULTS_WRITE_BW+=($WRITE_BW) - RESULTS_WRITE_IOPS+=($WRITE_IOPS) - RESULTS_WRITE_LAT+=($WRITE_LAT) -done - -outputResults - -clean +main "$@" diff --git a/lib/args.sh b/lib/args.sh new file mode 100644 index 0000000..12d8c2e --- /dev/null +++ b/lib/args.sh @@ -0,0 +1,214 @@ +#!/bin/bash +# args.sh - CLI argument parsing and help display +# Provides: show_help, show_version, parse_args + +VERSION_FILE="/etc/diskmark-version" +if [ -f "$VERSION_FILE" ]; then + SCRIPT_VERSION=$(cat "$VERSION_FILE") +elif command -v git &>/dev/null && git rev-parse --short HEAD &>/dev/null; then + GIT_DESC=$(git describe --tags --always 2>/dev/null) + if [[ "$GIT_DESC" =~ ^v?([0-9]+\.[0-9]+\.[0-9]+)$ ]]; then + SCRIPT_VERSION="${BASH_REMATCH[1]}" + elif [[ "$GIT_DESC" =~ ^v?([0-9]+\.[0-9]+\.[0-9]+)-([0-9]+)-g([a-f0-9]+)$ ]]; then + SCRIPT_VERSION="${BASH_REMATCH[1]}-dev.${BASH_REMATCH[2]}+${BASH_REMATCH[3]}" + else + SCRIPT_VERSION="0.0.0-dev+$(git rev-parse --short HEAD)" + fi +else + SCRIPT_VERSION="unknown" +fi + +show_help() { + cat << EOF +Docker DiskMark - A fio-based disk benchmark tool +Version: $SCRIPT_VERSION + +Usage: diskmark [OPTIONS] + +Options: + -h, --help Show this help message and exit + -v, --version Show version information and exit + + -t, --target PATH Target directory for benchmark (default: /disk or \$PWD) + -p, --profile PROFILE Benchmark profile: auto, default, nvme (default: auto) + -j, --job JOB Custom job definition (e.g., RND4KQ32T16) + Overrides --profile when specified + + -i, --io MODE I/O mode: direct, buffered (default: direct) + -d, --data PATTERN Data pattern: random, zero (default: random) + -s, --size SIZE Test file size (e.g., 500M, 1G, 10G) (default: 1G) + + -w, --warmup Enable warmup phase (default: enabled) + --no-warmup Disable warmup phase + --warmup-size SIZE Warmup block size (default: 8M for default, 64M for nvme) + + -r, --runtime DURATION Runtime per job (e.g., 500ms, 5s, 2m) (default: 5s) + -l, --loops COUNT Number of test loops to run + Can be combined with --runtime to cap each loop + + -n, --dry-run Validate configuration without running benchmark + -f, --format FORMAT Output format: json, yaml, xml (default: human-readable) + -u, --no-update-check Disable update check at startup + + --color Force colored output + --no-color Disable colored output + --emoji Force emoji output + --no-emoji Disable emoji output + +Environment Variables: + All options can also be set via environment variables: + TARGET, PROFILE, JOB, IO, DATA, SIZE, WARMUP, WARMUP_SIZE, + RUNTIME, LOOPS, DRY_RUN, FORMAT, UPDATE_CHECK, COLOR, EMOJI + + CLI arguments take precedence over environment variables. + +Examples: + diskmark --size 4G --warmup --loops 2 + diskmark -s 1G -r 10s -p nvme + diskmark --job RND4KQ32T16 --format json + diskmark -t /mnt/data -w -d zero + +For more information, visit: https://github.com/e7db/docker-diskmark +EOF + exit 0 +} + +show_version() { + echo "Docker DiskMark version $SCRIPT_VERSION" + exit 0 +} + +parse_args() { + while [[ $# -gt 0 ]]; do + case "$1" in + -h|--help) + show_help + ;; + -v|--version) + show_version + ;; + -t|--target) + TARGET="$2" + shift 2 + ;; + --target=*) + TARGET="${1#*=}" + shift + ;; + -p|--profile) + PROFILE="$2" + shift 2 + ;; + --profile=*) + PROFILE="${1#*=}" + shift + ;; + -j|--job) + JOB="$2" + shift 2 + ;; + --job=*) + JOB="${1#*=}" + shift + ;; + -i|--io) + IO="$2" + shift 2 + ;; + --io=*) + IO="${1#*=}" + shift + ;; + -d|--data) + DATA="$2" + shift 2 + ;; + --data=*) + DATA="${1#*=}" + shift + ;; + -s|--size) + SIZE="$2" + shift 2 + ;; + --size=*) + SIZE="${1#*=}" + shift + ;; + -w|--warmup) + WARMUP=1 + shift + ;; + --no-warmup) + WARMUP=0 + shift + ;; + --warmup-size) + WARMUP_SIZE="$2" + shift 2 + ;; + --warmup-size=*) + WARMUP_SIZE="${1#*=}" + shift + ;; + -r|--runtime) + RUNTIME="$2" + shift 2 + ;; + --runtime=*) + RUNTIME="${1#*=}" + shift + ;; + -l|--loops) + LOOPS="$2" + shift 2 + ;; + --loops=*) + LOOPS="${1#*=}" + shift + ;; + -n|--dry-run) + DRY_RUN=1 + shift + ;; + -f|--format) + FORMAT="$2" + shift 2 + ;; + --format=*) + FORMAT="${1#*=}" + shift + ;; + -u|--no-update-check) + UPDATE_CHECK=0 + shift + ;; + --color) + COLOR=1 + shift + ;; + --no-color) + COLOR=0 + shift + ;; + --emoji) + EMOJI=1 + shift + ;; + --no-emoji) + EMOJI=0 + shift + ;; + -*) + echo "Error: Unknown option: $1" >&2 + echo "Use --help for usage information." >&2 + exit 1 + ;; + *) + echo "Error: Unexpected argument: $1" >&2 + echo "Use --help for usage information." >&2 + exit 1 + ;; + esac + done +} diff --git a/lib/benchmark.sh b/lib/benchmark.sh new file mode 100644 index 0000000..8dfd67f --- /dev/null +++ b/lib/benchmark.sh @@ -0,0 +1,191 @@ +#!/bin/bash +# benchmark.sh - Benchmark execution functions +# Provides: warmup, fio_benchmark, run_benchmarks + +prepare_benchmark_params() { + SIZE="${SIZE:-1G}" + BYTESIZE=$(toBytes $SIZE) + WARMUP="${WARMUP:-1}" + + if [ -z "$WARMUP_SIZE" ]; then + case "$PROFILE" in + *nvme*) WARMUP_SIZE="64M" ;; + *) WARMUP_SIZE="8M" ;; + esac + fi + + validate_size_string "$WARMUP_SIZE" "WARMUP_SIZE" + WARMUP_BLOCK_BYTES=$(toBytes $WARMUP_SIZE) + if [ -z "$WARMUP_BLOCK_BYTES" ] || [ "$WARMUP_BLOCK_BYTES" -le 0 ]; then + WARMUP_BLOCK_BYTES=$(toBytes 8M) + WARMUP_SIZE="8M" + fi + BLOCK_MB=$((WARMUP_BLOCK_BYTES / 1024 / 1024)) + [ "$BLOCK_MB" -lt 1 ] && BLOCK_MB=1 + [ "$BLOCK_MB" -gt 1024 ] && BLOCK_MB=1024 + + if [[ -n "$LOOPS" ]] && [[ -n "$RUNTIME" ]]; then + LIMIT="Loops: $LOOPS (max $RUNTIME each)" + LIMIT_OPTION="--loops=$LOOPS --runtime=$RUNTIME" + elif [[ -n "$LOOPS" ]]; then + LIMIT="Loops: $LOOPS" + LIMIT_OPTION="--loops=$LOOPS" + else + RUNTIME="${RUNTIME:-5s}" + LIMIT="Runtime: $RUNTIME" + LIMIT_OPTION="--time_based --runtime=$RUNTIME" + fi +} + +run_warmup() { + if [ $WARMUP -eq 1 ]; then + if [ $WRITEZERO -eq 1 ]; then + FILESOURCE=/dev/zero + else + FILESOURCE=/dev/urandom + fi + TOTAL_MB=$((BYTESIZE / 1024 / 1024)) + if [ "$TOTAL_MB" -eq 0 ]; then + dd if="$FILESOURCE" of="$TARGET/.diskmark.tmp" bs="$BYTESIZE" count=1 oflag=direct status=none + else + CHUNKS=$((TOTAL_MB / BLOCK_MB)) + REMAINDER_MB=$((TOTAL_MB % BLOCK_MB)) + if [ $CHUNKS -gt 0 ]; then + dd if="$FILESOURCE" of="$TARGET/.diskmark.tmp" bs=${BLOCK_MB}M count=$CHUNKS oflag=direct status=none + fi + if [ $REMAINDER_MB -gt 0 ]; then + dd if="$FILESOURCE" of="$TARGET/.diskmark.tmp" bs=1M count=$REMAINDER_MB oflag=direct conv=notrunc seek=$((CHUNKS * BLOCK_MB)) status=none + fi + fi + fi +} + +fio_benchmark() { + fio --filename="$TARGET/.diskmark.tmp" \ + --stonewall --ioengine=libaio --direct=$DIRECT --zero_buffers=$WRITEZERO \ + $LIMIT_OPTION --size="$1" \ + --name="$2" --blocksize="$3" --iodepth="$4" --numjobs="$5" --readwrite="$6" \ + --output-format=json >"$TARGET/.diskmark.json" +} + +parse_result_raw() { + local bandwidth=$(cat "$TARGET/.diskmark.json" | grep -A"$2" '"name" : "'"$1"'"' | grep "$3" | sed 's/ "'"$3"'" : //g' | sed 's:,::g' | awk '{ SUM += $1} END { printf "%.6f", SUM / 1024 / 1024 }') + local throughput=$(cat "$TARGET/.diskmark.json" | grep -A"$2" '"name" : "'"$1"'"' | grep "$4" | sed 's/ "'"$4"'" : //g' | sed 's:,::g' | awk '{ SUM += $1} END { printf "%.6f", SUM }') + echo "$bandwidth $throughput" +} + +parse_latency() { + local job_name="$1" + local operation="$2" + local lat_ns=$(cat "$TARGET/.diskmark.json" | \ + grep -A200 '"name" : "'"$job_name"'"' | \ + grep -A50 "\"$operation\" :" | \ + grep -A5 '"clat_ns"' | \ + grep '"mean"' | head -1 | sed 's/.*: //g' | sed 's:,::g') + if [[ -n "$lat_ns" ]]; then + echo "$lat_ns" | awk '{printf "%.6f", $1 / 1000000}' + else + echo "0" + fi +} + +format_latency() { + local lat_ms="$1" + if [[ -z "$lat_ms" ]] || [[ "$lat_ms" == "0" ]]; then + echo "0.00ms" + elif awk "BEGIN {exit !($lat_ms >= 0.01)}"; then + printf "%.2fms" "$lat_ms" + elif awk "BEGIN {exit !($lat_ms >= 0.001)}"; then + printf "%.3fms" "$lat_ms" + else + local lat_ns=$(awk "BEGIN {printf \"%.0f\", $lat_ms * 1000000}") + echo "${lat_ns}ns" + fi +} + +clear_progress() { + if [[ -z "$FORMAT" ]]; then + printf "\r\033[K" + fi +} + +show_progress() { + if [[ -z "$FORMAT" ]]; then + if [[ "$2" == *"read"* ]]; then + printf "\n[%d/%d] %s..." "$1" "$TOTAL_JOBS" "$2" + else + printf "\r[%d/%d] %s..." "$1" "$TOTAL_JOBS" "$2" + fi + fi +} + +run_all_benchmarks() { + TOTAL_JOBS=${#NAME[@]} + RESULTS_NAME=() + RESULTS_STATUS=() + RESULTS_READ_BW=() + RESULTS_READ_IOPS=() + RESULTS_READ_LAT=() + RESULTS_WRITE_BW=() + RESULTS_WRITE_IOPS=() + RESULTS_WRITE_LAT=() + SKIPPED_JOBS=() + + for ((i = 0; i < ${#NAME[@]}; i++)); do + JOB_NUM=$((i + 1)) + DIVIDER=${SIZEDIVIDER[$i]:-1} + if [ "$DIVIDER" -le 0 ]; then + TESTSIZE=$BYTESIZE + else + TESTSIZE=$((BYTESIZE / DIVIDER)) + fi + BLOCKSIZE_BYTES=$(toBytes "${BLOCKSIZE[$i]}") + + if [ "$TESTSIZE" -lt "$BLOCKSIZE_BYTES" ]; then + SKIPPED_JOBS+=("${NAME[$i]} (size $(fromBytes $TESTSIZE) < block size ${BLOCKSIZE[$i]})") + RESULTS_NAME+=("${NAME[$i]}") + RESULTS_STATUS+=("skipped") + RESULTS_READ_BW+=(0) + RESULTS_READ_IOPS+=(0) + RESULTS_READ_LAT+=(0) + RESULTS_WRITE_BW+=(0) + RESULTS_WRITE_IOPS+=(0) + RESULTS_WRITE_LAT+=(0) + if [[ -z "$FORMAT" ]]; then + echo + echo -e "${JOBCOLOR[$i]}${LABEL[$i]}:$(color $RESET) Skipped" + fi + continue + fi + + show_progress "$JOB_NUM" "${NAME[$i]} read" + fio_benchmark "$TESTSIZE" "${NAME[$i]}Read" "${BLOCKSIZE[$i]}" "${IODEPTH[$i]}" "${NUMJOBS[$i]}" "${READWRITE[$i]}read" + READ_RAW=$(parse_result_raw "${NAME[$i]}Read" 15 bw_bytes iops) + READ_BW=$(echo "$READ_RAW" | awk '{print $1}') + READ_IOPS=$(echo "$READ_RAW" | awk '{print $2}') + READ_LAT=$(parse_latency "${NAME[$i]}Read" "read") + + show_progress "$JOB_NUM" "${NAME[$i]} write" + fio_benchmark "$TESTSIZE" "${NAME[$i]}Write" "${BLOCKSIZE[$i]}" "${IODEPTH[$i]}" "${NUMJOBS[$i]}" "${READWRITE[$i]}write" + WRITE_RAW=$(parse_result_raw "${NAME[$i]}Write" 80 bw_bytes iops) + WRITE_BW=$(echo "$WRITE_RAW" | awk '{print $1}') + WRITE_IOPS=$(echo "$WRITE_RAW" | awk '{print $2}') + WRITE_LAT=$(parse_latency "${NAME[$i]}Write" "write") + + if [[ -z "$FORMAT" ]]; then + clear_progress + echo -e "${JOBCOLOR[$i]}${LABEL[$i]}:$(color $RESET)" + printf "<= Read: %.0f MB/s, %.0f IO/s, %s\n" "$READ_BW" "$READ_IOPS" "$(format_latency $READ_LAT)" + printf "=> Write: %.0f MB/s, %.0f IO/s, %s\n" "$WRITE_BW" "$WRITE_IOPS" "$(format_latency $WRITE_LAT)" + fi + + RESULTS_NAME+=("${NAME[$i]}") + RESULTS_STATUS+=("success") + RESULTS_READ_BW+=($READ_BW) + RESULTS_READ_IOPS+=($READ_IOPS) + RESULTS_READ_LAT+=($READ_LAT) + RESULTS_WRITE_BW+=($WRITE_BW) + RESULTS_WRITE_IOPS+=($WRITE_IOPS) + RESULTS_WRITE_LAT+=($WRITE_LAT) + done +} diff --git a/lib/detect.sh b/lib/detect.sh new file mode 100644 index 0000000..e168ec9 --- /dev/null +++ b/lib/detect.sh @@ -0,0 +1,99 @@ +#!/bin/bash +# detect.sh - Drive and filesystem detection +# Provides: detect_filesystem, detect_drive + +detect_filesystem() { + FILESYSTEMPARTITION="" + if command -v lsblk &> /dev/null; then + FILESYSTEMPARTITION=$(lsblk -P 2>/dev/null | grep "$TARGET" | head -n 1 | awk '{print $1}' | cut -d"=" -f2 | cut -d"\"" -f2) + fi + if [ -z "$FILESYSTEMPARTITION" ] && command -v findmnt &> /dev/null; then + FILESYSTEMPARTITION=$(findmnt -n -o SOURCE "$TARGET" 2>/dev/null | sed 's|/dev/||') + fi + if [ -z "$FILESYSTEMPARTITION" ]; then + FILESYSTEMPARTITION=$(df "$TARGET" 2>/dev/null | tail +2 | awk '{print $1}' | sed 's|/dev/||') + fi + + FILESYSTEMTYPE=$(df -T "$TARGET" | tail +2 | awk '{print $2}') + FILESYSTEMSIZE=$(df -Th "$TARGET" | tail +2 | awk '{print $3}') +} + +detect_drive_type() { + ISOVERLAY=0 + ISTMPFS=0 + ISNVME=0 + ISEMMC=0 + ISMDADM=0 + DRIVE="" + DRIVEDETAILS="" + + if [[ "$FILESYSTEMTYPE" == overlay ]]; then + ISOVERLAY=1 + elif [[ "$FILESYSTEMTYPE" == tmpfs ]]; then + ISTMPFS=1 + elif [[ "$FILESYSTEMPARTITION" == mmcblk* ]]; then + DRIVE=$(echo $FILESYSTEMPARTITION | rev | cut -c 3- | rev) + ISEMMC=1 + elif [[ "$FILESYSTEMPARTITION" == nvme* ]]; then + DRIVE=$(echo $FILESYSTEMPARTITION | rev | cut -c 3- | rev) + ISNVME=1 + elif [[ "$FILESYSTEMPARTITION" == hd* ]] || [[ "$FILESYSTEMPARTITION" == sd* ]] || [[ "$FILESYSTEMPARTITION" == vd* ]]; then + DRIVE=$(echo $FILESYSTEMPARTITION | sed 's/[0-9]*$//') + elif [[ "$FILESYSTEMPARTITION" == md* ]]; then + DRIVE=$FILESYSTEMPARTITION + ISMDADM=1 + fi +} + +detect_drive_info() { + DRIVELABEL="Drive" + DRIVENAME="Unknown" + DRIVESIZE="Unknown" + + if [ $ISOVERLAY -eq 1 ]; then + DRIVENAME="Overlay" + DRIVE="overlay" + DRIVESIZE=$FILESYSTEMSIZE + elif [ $ISTMPFS -eq 1 ]; then + DRIVENAME="RAM" + DRIVE="tmpfs" + DRIVESIZE=$(free -h --si | grep Mem: | awk '{print $2}') + elif [ $ISEMMC -eq 1 ]; then + DEVICE=() + if [ -f /sys/block/$DRIVE/device/type ]; then + case "$(cat /sys/block/$DRIVE/device/type)" in + SD) DEVICE+=("SD Card");; + *) DEVICE+=();; + esac + fi + [ -f /sys/block/$DRIVE/device/name ] && DEVICE+=($(cat /sys/block/$DRIVE/device/name | sed 's/ *$//g')) + DRIVENAME=${DEVICE[@]:-"eMMC flash storage"} + DRIVESIZE=$(fromBytes $(($(cat /sys/block/$DRIVE/size) * 512))) + elif [ $ISMDADM -eq 1 ]; then + DRIVELABEL="Drives" + DRIVENAME="mdadm $(cat /sys/block/$DRIVE/md/level)" + DRIVESIZE=$(fromBytes $(($(cat /sys/block/$DRIVE/size) * 512))) + DISKS=$(ls /sys/block/$DRIVE/slaves/) + DRIVEDETAILS="using $(echo $DISKS | wc -w) disks ($(echo $DISKS | sed 's/ /, /g'))" + elif [ -n "$DRIVE" ] && [ -d /sys/block/$DRIVE/device ]; then + DEVICE=() + [ -f /sys/block/$DRIVE/device/vendor ] && DEVICE+=($(cat /sys/block/$DRIVE/device/vendor | sed 's/ *$//g')) + [ -f /sys/block/$DRIVE/device/model ] && DEVICE+=($(cat /sys/block/$DRIVE/device/model | sed 's/ *$//g')) + DRIVENAME=${DEVICE[@]:-"Unknown drive"} + DRIVESIZE=$(fromBytes $(($(cat /sys/block/$DRIVE/size) * 512))) + else + DRIVE="Unknown" + fi + + if [ "$DRIVE" = "Unknown" ]; then + DRIVEINFO="Unknown" + else + DRIVEINFO="$DRIVENAME ($DRIVE, $DRIVESIZE) $DRIVEDETAILS" + fi +} + +detect_all() { + detect_filesystem + detect_drive_type + detect_drive_info +} diff --git a/lib/output.sh b/lib/output.sh new file mode 100644 index 0000000..f48359d --- /dev/null +++ b/lib/output.sh @@ -0,0 +1,164 @@ +#!/bin/bash +# output.sh - Output formatting functions +# Provides: output_* functions for all output formats + +escape_json() { + echo "$1" | sed 's/\\/\\\\/g' | sed 's/"/\\"/g' +} + +output_config_human() { + echo -e "$(color $BOLD $WHITE)Configuration:$(color $RESET) +- Target: $TARGET + - $DRIVELABEL: $DRIVEINFO + - Filesystem: $FILESYSTEMTYPE ($FILESYSTEMPARTITION, $FILESYSTEMSIZE) +- Profile: $PROFILE + - I/O: $IO + - Data: $DATA + - Size: $SIZE + - Warmup: $WARMUP$([ "$WARMUP" -eq 1 ] && echo " (block: ${BLOCK_MB}M)") + - $LIMIT +" +} + +output_running_message() { + if [[ -z "$FORMAT" ]]; then + echo -e "The benchmark is $(color $BOLD $WHITE)running$(color $RESET), please wait..." + fi +} + +output_dry_run_success() { + echo -e "$SYM_SUCCESS Dry run $(color $BOLD $GREEN)completed$(color $RESET). Configuration is valid." +} + +output_results_human() { + local total=${#RESULTS_NAME[@]} + local has_skipped=0 + for ((j = 0; j < total; j++)); do + [[ "${RESULTS_STATUS[$j]}" == "skipped" ]] && has_skipped=1 && break + done + if [ $has_skipped -eq 1 ]; then + echo -e "\n$SYM_SUCCESS The benchmark is $(color $BOLD $GREEN)finished$(color $RESET) with $(color $BOLD $YELLOW)warnings$(color $RESET):" + for job in "${SKIPPED_JOBS[@]}"; do + echo -e " - $job" + done + else + echo -e "\n$SYM_SUCCESS The benchmark is $(color $BOLD $GREEN)finished$(color $RESET)." + fi +} + +output_results_json() { + local total=${#RESULTS_NAME[@]} + echo "{" + echo " \"configuration\": {" + echo " \"target\": \"$(escape_json "$TARGET")\"," + echo " \"drive\": {" + echo " \"label\": \"$(escape_json "$DRIVELABEL")\"," + echo " \"info\": \"$(escape_json "$DRIVEINFO")\"" + echo " }," + echo " \"filesystem\": {" + echo " \"type\": \"$(escape_json "$FILESYSTEMTYPE")\"," + echo " \"partition\": \"$(escape_json "$FILESYSTEMPARTITION")\"," + echo " \"size\": \"$(escape_json "$FILESYSTEMSIZE")\"" + echo " }," + echo " \"profile\": \"$(escape_json "$PROFILE")\"," + echo " \"io\": \"$(escape_json "$IO")\"," + echo " \"data\": \"$(escape_json "$DATA")\"," + echo " \"size\": \"$SIZE\"," + echo " \"warmup\": $WARMUP," + if [[ -n "$LOOPS" ]]; then + echo " \"loops\": $LOOPS" + else + echo " \"runtime\": \"$RUNTIME\"" + fi + echo " }," + echo " \"results\": [" + for ((j = 0; j < total; j++)); do + echo -n " {\"name\": \"$(escape_json "${RESULTS_NAME[$j]}")\", \"status\": \"${RESULTS_STATUS[$j]}\"" + if [[ "${RESULTS_STATUS[$j]}" != "skipped" ]]; then + echo -n ", \"read\": {\"bandwidth_mb\": ${RESULTS_READ_BW[$j]}, \"iops\": ${RESULTS_READ_IOPS[$j]}, \"latency_ms\": ${RESULTS_READ_LAT[$j]}}, \"write\": {\"bandwidth_mb\": ${RESULTS_WRITE_BW[$j]}, \"iops\": ${RESULTS_WRITE_IOPS[$j]}, \"latency_ms\": ${RESULTS_WRITE_LAT[$j]}}" + fi + echo -n "}" + [[ $j -lt $((total - 1)) ]] && echo "," || echo + done + echo " ]" + echo "}" +} + +output_results_yaml() { + local total=${#RESULTS_NAME[@]} + echo "configuration:" + echo " target: \"$(escape_json "$TARGET")\"" + echo " drive:" + echo " label: \"$(escape_json "$DRIVELABEL")\"" + echo " info: \"$(escape_json "$DRIVEINFO")\"" + echo " filesystem:" + echo " type: \"$(escape_json "$FILESYSTEMTYPE")\"" + echo " partition: \"$(escape_json "$FILESYSTEMPARTITION")\"" + echo " size: \"$(escape_json "$FILESYSTEMSIZE")\"" + echo " profile: \"$(escape_json "$PROFILE")\"" + echo " io: \"$(escape_json "$IO")\"" + echo " data: \"$(escape_json "$DATA")\"" + echo " size: \"$SIZE\"" + echo " warmup: $WARMUP" + if [[ -n "$LOOPS" ]]; then + echo " loops: $LOOPS" + else + echo " runtime: \"$RUNTIME\"" + fi + echo "results:" + for ((j = 0; j < total; j++)); do + echo " - name: \"$(escape_json "${RESULTS_NAME[$j]}")\"" + echo " status: \"${RESULTS_STATUS[$j]}\"" + if [[ "${RESULTS_STATUS[$j]}" != "skipped" ]]; then + echo " read:" + echo " bandwidth_mb: ${RESULTS_READ_BW[$j]}" + echo " iops: ${RESULTS_READ_IOPS[$j]}" + echo " latency_ms: ${RESULTS_READ_LAT[$j]}" + echo " write:" + echo " bandwidth_mb: ${RESULTS_WRITE_BW[$j]}" + echo " iops: ${RESULTS_WRITE_IOPS[$j]}" + echo " latency_ms: ${RESULTS_WRITE_LAT[$j]}" + fi + done +} + +output_results_xml() { + local total=${#RESULTS_NAME[@]} + echo '' + echo "" + echo " " + echo " $TARGET" + echo " $DRIVEINFO" + echo " " + echo " $PROFILE" + echo " $IO" + echo " $DATA" + echo " $SIZE" + echo " $WARMUP" + if [[ -n "$LOOPS" ]]; then + echo " $LOOPS" + else + echo " $RUNTIME" + fi + echo " " + echo " " + for ((j = 0; j < total; j++)); do + echo " " + if [[ "${RESULTS_STATUS[$j]}" != "skipped" ]]; then + echo " " + echo " " + fi + echo " " + done + echo " " + echo "" +} + +output_results() { + case "$FORMAT" in + "") output_results_human ;; + json) output_results_json ;; + yaml) output_results_yaml ;; + xml) output_results_xml ;; + esac +} diff --git a/lib/profiles.sh b/lib/profiles.sh new file mode 100644 index 0000000..e7acdb9 --- /dev/null +++ b/lib/profiles.sh @@ -0,0 +1,79 @@ +#!/bin/bash +# profiles.sh - Benchmark profile definitions +# Provides: load_default_profile, load_nvme_profile, load_job, select_profile + +load_default_profile() { + NAME=("SEQ1MQ8T1" "SEQ1MQ1T1" "RND4KQ32T1" "RND4KQ1T1") + LABEL=("Sequential 1M Q8T1" "Sequential 1M Q1T1" "Random 4K Q32T1" "Random 4K Q1T1") + JOBCOLOR=($(color $NORMAL $YELLOW) $(color $NORMAL $YELLOW) $(color $NORMAL $CYAN) $(color $NORMAL $CYAN)) + BLOCKSIZE=("1M" "1M" "4K" "4K") + IODEPTH=(8 1 32 1) + NUMJOBS=(1 1 1 1) + READWRITE=("" "" "rand" "rand") + SIZEDIVIDER=(-1 -1 16 32) +} + +load_nvme_profile() { + NAME=("SEQ1MQ8T1" "SEQ128KQ32T1" "RND4KQ32T16" "RND4KQ1T1") + LABEL=("Sequential 1M Q8T1" "Sequential 128K Q32T1" "Random 4K Q32T16" "Random 4K Q1T1") + JOBCOLOR=($(color $NORMAL $YELLOW) $(color $NORMAL $GREEN) $(color $NORMAL $CYAN) $(color $NORMAL $CYAN)) + BLOCKSIZE=("1M" "128K" "4K" "4K") + IODEPTH=(8 32 32 1) + NUMJOBS=(1 1 16 1) + READWRITE=("" "" "rand" "rand") + SIZEDIVIDER=(-1 -1 16 32) +} + +load_job() { + PARAMS=($(echo "$JOB" | perl -nle '/^(RND|SEQ)([0-9]+[KM])Q([0-9]+)T([0-9]+)$/; print "$1 $2 $3 $4"')) + if [ -z ${PARAMS[0]} ]; then + error 1 "Invalid job name: $(color $BOLD $WHITE)$JOB$(color $RESET)" + fi + + case "${PARAMS[0]}" in + RND) + READWRITE=("rand") + READWRITELABEL="Random" + ;; + SEQ) + READWRITE=("") + READWRITELABEL="Sequential" + ;; + esac + BLOCKSIZE=(${PARAMS[1]}) + IODEPTH=(${PARAMS[2]}) + NUMJOBS=(${PARAMS[3]}) + + NAME=($JOB) + LABEL="$READWRITELABEL $BLOCKSIZE Q${IODEPTH}T${NUMJOBS}" + JOBCOLOR=($(color $NORMAL $MAGENTA)) + SIZEDIVIDER=(-1) +} + +select_profile() { + if [ ! -z $JOB ]; then + PROFILE="Job \"$JOB\"" + load_job + else + case "$PROFILE" in + ""|auto) + if [ $ISNVME -eq 1 ]; then + PROFILE="auto (nvme)" + load_nvme_profile + else + PROFILE="auto (default)" + load_default_profile + fi + ;; + default) + load_default_profile + ;; + nvme) + load_nvme_profile + ;; + *) + error 1 "Invalid PROFILE: $(color $BOLD $WHITE)$PROFILE$(color $RESET). Allowed values are 'auto', 'default', or 'nvme'." + ;; + esac + fi +} diff --git a/lib/update.sh b/lib/update.sh new file mode 100644 index 0000000..2458114 --- /dev/null +++ b/lib/update.sh @@ -0,0 +1,17 @@ +#!/bin/bash +# update.sh - Update check functionality +# Provides: check_for_updates + +is_semver() { + [[ "$1" =~ ^v?[0-9]+\.[0-9]+\.[0-9]+(-[a-zA-Z0-9.]+)?$ ]] +} + +check_for_updates() { + if [[ "$UPDATE_CHECK" -eq 1 ]] && [ -f "$VERSION_FILE" ]; then + CURRENT_VERSION="$SCRIPT_VERSION" + LATEST_VERSION=$(wget --no-check-certificate -qO- https://api.github.com/repos/e7db/docker-diskmark/releases/latest 2>/dev/null | grep '"tag_name"' | cut -d'"' -f4 || true) + if [[ "$CURRENT_VERSION" != "unknown" ]] && is_semver "$CURRENT_VERSION" && is_semver "$LATEST_VERSION" && [[ "$CURRENT_VERSION" != "$LATEST_VERSION" ]]; then + echo -e "Update available: \e[1;37m$CURRENT_VERSION\e[0m => \e[1;37m$LATEST_VERSION\e[0m (docker pull e7db/diskmark:latest)\n" + fi + fi +} diff --git a/lib/utils.sh b/lib/utils.sh new file mode 100644 index 0000000..8366b61 --- /dev/null +++ b/lib/utils.sh @@ -0,0 +1,153 @@ +#!/bin/bash +# utils.sh - Utility functions for diskmark +# Provides: color output, size conversions, cleanup, error handling + +RESET="0m" +NORMAL="0" +BOLD="1" +BLACK=";30m" +RED=";31m" +GREEN=";32m" +YELLOW=";33m" +BLUE=";34m" +MAGENTA=";35m" +CYAN=";36m" +WHITE=";37m" + +detect_color_support() { + if [[ "$TERM" == "dumb" ]]; then + echo 0 + else + echo 1 + fi +} + +detect_emoji_support() { + if [[ "$TERM" == "dumb" ]]; then + echo 0 + else + echo 1 + fi +} + +init_display_settings() { + if [[ -z "$COLOR" ]]; then + COLOR=$(detect_color_support) + elif [[ ! "$COLOR" =~ ^[01]$ ]]; then + echo "Error: COLOR must be either 0 or 1." >&2 + exit 1 + fi + + if [[ -z "$EMOJI" ]]; then + EMOJI=$(detect_emoji_support) + elif [[ ! "$EMOJI" =~ ^[01]$ ]]; then + echo "Error: EMOJI must be either 0 or 1." >&2 + exit 1 + fi + + if [[ "$EMOJI" -eq 1 ]]; then + SYM_SUCCESS="✅" + SYM_FAILURE="❌" + SYM_STOP="🛑" + else + SYM_SUCCESS="[OK]" + SYM_FAILURE="[FAIL]" + SYM_STOP="[STOP]" + fi +} + +color() { + if [[ "$COLOR" -eq 1 ]]; then + echo "\e[$1$2" + else + echo "" + fi +} + +toBytes() { + local SIZE=$1 + local UNIT=${SIZE//[0-9]/} + local NUMBER=${SIZE//[a-zA-Z]/} + case $UNIT in + P|p) echo $((NUMBER * 1024 * 1024 * 1024 * 1024 * 1024));; + T|t) echo $((NUMBER * 1024 * 1024 * 1024 * 1024));; + G|g) echo $((NUMBER * 1024 * 1024 * 1024));; + M|m) echo $((NUMBER * 1024 * 1024));; + K|k) echo $((NUMBER * 1024));; + *) echo $NUMBER;; + esac +} + +fromBytes() { + local SIZE=$1 + local UNIT="" + if (( SIZE > 1024 )); then + SIZE=$((SIZE / 1024)) + UNIT="K" + fi + if (( SIZE > 1024 )); then + SIZE=$((SIZE / 1024)) + UNIT="M" + fi + if (( SIZE > 1024 )); then + SIZE=$((SIZE / 1024)) + UNIT="G" + fi + if (( SIZE > 1024 )); then + SIZE=$((SIZE / 1024)) + UNIT="T" + fi + if (( SIZE > 1024 )); then + SIZE=$((SIZE / 1024)) + UNIT="P" + fi + echo "${SIZE}${UNIT}" +} + +clean() { + [[ -z $TARGET ]] && return + if [[ -n $ISNEWDIR ]]; then + rm -rf "$TARGET" + else + rm -f "$TARGET"/.diskmark.{json,tmp} + fi +} + +interrupt() { + local EXIT_CODE="${1:-0}" + echo -e "\r\n\n$SYM_STOP The benchmark was $(color $BOLD $RED)interrupted$(color $RESET)." + if [ ! -z "$2" ]; then + echo -e "$2" + fi + clean + exit "${EXIT_CODE}" +} + +fail() { + local EXIT_CODE="${1:-1}" + echo -e "\r\n\n$SYM_FAILURE The benchmark had $(color $BOLD $RED)failed$(color $RESET)." + if [ ! -z "$2" ]; then + echo -e "$2" + fi + clean + exit "${EXIT_CODE}" +} + +error() { + local EXIT_CODE="${1:-1}" + echo -e "\r\n$SYM_FAILURE The benchmark encountered an $(color $BOLD $RED)error$(color $RESET)." + if [ ! -z "$2" ]; then + echo -e "$2" + fi + clean + exit "${EXIT_CODE}" +} + +setup_traps() { + trap 'interrupt $? "The benchmark was aborted before its completion."' HUP INT QUIT KILL TERM + trap 'fail $? "The benchmark failed before its completion."' ERR +} + +require_command() { + command -v "$1" >/dev/null 2>&1 || fail 1 "Missing required dependency: $(color $BOLD $WHITE)$1$(color $RESET). Please install it and try again." +} diff --git a/lib/validate.sh b/lib/validate.sh new file mode 100644 index 0000000..7d3f9ed --- /dev/null +++ b/lib/validate.sh @@ -0,0 +1,137 @@ +#!/bin/bash +# validate.sh - Input validation functions +# Provides: validate_* functions for all input parameters + +validate_size_string() { + local VALUE="$1" + local LABEL="$2" + if [[ -z "$VALUE" ]]; then + error 1 "$LABEL must be provided." + fi + if [[ ! "$VALUE" =~ ^[0-9]+([KkMmGgTtPp])?$ ]]; then + error 1 "$LABEL must be a positive integer optionally followed by K, M, G, T, or P (example: 1G)." + fi + local BYTES=$(toBytes "$VALUE") + if [[ -z "$BYTES" || "$BYTES" -le 0 ]]; then + error 1 "$LABEL must be greater than zero." + fi +} + +validate_binary_flag() { + local VALUE="$1" + local LABEL="$2" + if [[ ! "$VALUE" =~ ^[01]$ ]]; then + error 1 "$LABEL must be either 0 or 1." + fi +} + +validate_runtime() { + local VALUE="$1" + if [[ -z "$VALUE" ]]; then + return 0 + fi + if [[ ! "$VALUE" =~ ^[0-9]+(ms|s|m|h)$ ]]; then + error 1 "RUNTIME must match the fio time format (e.g., 500ms, 5s, 2m, 1h)." + fi +} + +validate_integer() { + local VALUE="$1" + local LABEL="$2" + local ALLOW_ZERO="${3:-0}" + local REGEX='^[1-9][0-9]*$' + local ERROR_MSG="$LABEL must be a positive integer." + + if [[ "$ALLOW_ZERO" -eq 1 ]]; then + REGEX='^[0-9]+$' + ERROR_MSG="$LABEL must be a non-negative integer." + fi + + if [[ -z "$VALUE" ]]; then + error 1 "$LABEL must be provided." + fi + if [[ ! "$VALUE" =~ $REGEX ]]; then + error 1 "$ERROR_MSG" + fi +} + +ensure_writable_target() { + local PATH_TO_CHECK="$1" + if [[ "$PATH_TO_CHECK" == "/" ]]; then + error 1 "Refusing to run against the filesystem root. Please set TARGET to a dedicated directory." + fi + if [[ -d "$PATH_TO_CHECK" && ! -w "$PATH_TO_CHECK" ]]; then + error 1 "TARGET directory is not writable: $PATH_TO_CHECK" + fi +} + +validate_format() { + FORMAT="${FORMAT:-}" + if [[ -n "$FORMAT" && ! "$FORMAT" =~ ^(json|yaml|xml)$ ]]; then + echo "Error: FORMAT must be empty or one of: json, yaml, xml." >&2 + exit 1 + fi + # Machine-readable formats disable display features + if [[ -n "$FORMAT" ]]; then + COLOR=0 + EMOJI=0 + UPDATE_CHECK=0 + fi +} + +validate_update_check() { + UPDATE_CHECK="${UPDATE_CHECK:-1}" + if [[ ! "$UPDATE_CHECK" =~ ^[01]$ ]]; then + echo "Error: UPDATE_CHECK must be either 0 or 1." >&2 + exit 1 + fi +} + +validate_io_mode() { + case "$IO" in + ""|direct) + IO="direct (synchronous)" + DIRECT=1 + ;; + buffered) + IO="buffered (asynchronous)" + DIRECT=0 + ;; + *) + error 1 "Invalid IO mode: $(color $BOLD $WHITE)$IO$(color $RESET). Allowed values are 'direct' or 'buffered'." + ;; + esac +} + +validate_data_pattern() { + case "$DATA" in + ""|random|rand) + DATA="random" + WRITEZERO=0 + ;; + zero | 0 | 0x00) + DATA="zero (0x00)" + WRITEZERO=1 + ;; + *) + error 1 "Invalid DATA pattern: $(color $BOLD $WHITE)$DATA$(color $RESET). Allowed values are 'random' or 'zero'." + ;; + esac +} + +validate_all_inputs() { + validate_update_check + validate_format + validate_size_string "${SIZE:-1G}" "SIZE" + validate_binary_flag "${WARMUP:-1}" "WARMUP" + validate_binary_flag "${DRY_RUN:-0}" "DRY_RUN" + if [[ -n "$WARMUP_SIZE" ]]; then + validate_size_string "$WARMUP_SIZE" "WARMUP_SIZE" + fi + if [[ -n "$LOOPS" ]]; then + validate_integer "$LOOPS" "LOOPS" + fi + if [[ -n "$RUNTIME" ]]; then + validate_runtime "$RUNTIME" + fi +}