diff --git a/.gitignore b/.gitignore index 90cd4453..55f2e665 100644 --- a/.gitignore +++ b/.gitignore @@ -3,6 +3,7 @@ outputs/ trt_cache/ # Dataset dataset_collections/ +checkpoints/ # Byte-compiled / optimized / DLL files __pycache__/ diff --git a/README.md b/README.md index ee5d1b66..aadba3e2 100644 --- a/README.md +++ b/README.md @@ -12,6 +12,44 @@

DEIMKit is a Python wrapper for DEIM: DETR with Improved Matching for Fast Convergence. Check out the original repo for more details.

+ + + +
+
+ + + + + + + + + + + + + +
+ 🤔 Why DEIMKit? + + 🌟 Key Features + + 📦 Installation + + 🚀 Usage +
+ 💡 Inference + + 🏋️ Training + + 💾 Export + + ⚠️ Disclaimer +
+
+ +
Open In Colab @@ -22,29 +60,35 @@
- -## Why DEIMKit? +## 🤔 Why DEIMKit? - **Pure Python Configuration** - No complicated YAML files, just clean Python code - **Cross-Platform Simplicity** - Single command installation on Linux, macOS, and Windows - **Intuitive API** - Load, train, predict, export in just a few lines of code -## Supported Features - -- [x] Inference -- [x] Training -- [x] Export - - -## Installation - -### Using pip -Install [torch](https://pytorch.org/get-started/locally/) and torchvision as a pre-requisite. - -## Installation - -### Using pip -Install [torch](https://pytorch.org/get-started/locally/) and torchvision as a pre-requisite. +## 🌟 Key Features + +* **💡 Inference** + * [x] Single Image & Batch Prediction + * [x] Load Pretrained & Custom Models + * [x] Built-in Result Visualization + * [x] Live ONNX Inference (Webcam, Video, Image) +* **🏋️ Training** + * [x] Single & Multi-GPU Training + * [x] Custom Dataset Support (COCO Format) + * [x] Flexible Configuration via Pure Python +* **💾 Export** + * [x] Export Trained Models to ONNX + * [x] ONNX Model with Integrated Preprocessing +* **🛠️ Utilities & Demos** + * [x] Cross-Platform Support (Linux, macOS, Windows) + * [x] Pixi Environment Management Integration + * [x] Interactive Gradio Demo Script + +## 📦 Installation + +### 📥 Using pip +If you're installing using pip, install [torch](https://pytorch.org/get-started/locally/) and torchvision as a pre-requisite. Next, install the package. Bleeding edge version @@ -57,7 +101,7 @@ Stable version pip install git+https://github.com/dnth/DEIM.git@v0.1.1 ``` -### Using Pixi +### 🔌 Using Pixi > [!TIP] > I recommend using [Pixi](https://pixi.sh) to run this package. Pixi makes it easy to install the right version of Python and the dependencies to run this package on any platform! @@ -85,7 +129,7 @@ This will download a toy dataset with 8 images, and train a model on it for 3 ep If this runs without any issues, you've got a working Python environment with all the dependencies installed. This also installs DEIMKit in editable mode for development. See the [pixi cheatsheet](#-pixi-cheat-sheet) below for more. -## Usage +## 🚀 Usage List models supported by DEIMKit @@ -103,7 +147,7 @@ list_models() 'deim_hgnetv2_x'] ``` -### Inference +### 💡 Inference Load a pretrained model by the original authors @@ -157,7 +201,7 @@ Stomata Dataset See the [demo notebook on using pretrained models](nbs/pretrained-model-inference.ipynb) and [custom model inference](nbs/custom-model-inference.ipynb) for more details. -### Training +### 🏋️ Training DEIMKit provides a simple interface for training your own models. @@ -225,7 +269,8 @@ Navigate to the http://localhost:6006/ in your browser to view the training prog ![alt text](assets/tensorboard.png) -### Export +### 💾 Export +Currently, the export function is only used for exporting the model to ONNX and run it using ONNXRuntime (see [Live Inference](#-live-inference) for more details). I think one could get pretty far with this even on a low resource machine. Drop an issue if you think this should be extended to other formats. ```python from deimkit.exporter import Exporter @@ -240,58 +285,128 @@ output_path = exporter.to_onnx( ) ``` -### Gradio App -Run a Gradio app to interact with your model. +> [!NOTE] +> The exported model will accept raw BGR images of any size. It will also handle the preprocessing internally. Credit to [PINTO0309](https://github.com/PINTO0309/DEIM) for the implementation. +> +> ![onnx model](assets/exported_onnx.png) + +> [!TIP] +> If you want to export to OpenVINO you can do so directly from the ONNX model. +> +> +> ```python +> import onnx +> from onnx import helper +> +> model = onnx.load("best.onnx") +> +> # Change the mode attribute of the GridSample node to bilinear as this operation is not supported in OpenVINO +> for node in model.graph.node: +> if node.op_type == 'GridSample': +> for i, attr in enumerate(node.attribute): +> if attr.name == 'mode' and attr.s == b'linear': +> # Replace 'linear' with 'bilinear' +> node.attribute[i].s = b'bilinear' +> +> # Save the modified model +> onnx.save(model, "best_prep_openvino.onnx") +> ``` +> You can then use the live inference script to run inference on the OpenVINO model. + +### 🖥️ Gradio App +Run a Gradio app to interact with your model. The app will accept raw BGR images of any size. It will also handle the preprocessing internally using the exported ONNX model. ```bash -python scripts/gradio_demo.py +python scripts/gradio_demo.py \ + --model "best.onnx" \ + --classes "classes.txt" \ + --examples "Rock Paper Scissors SXSW.v14i.coco/test" ``` ![alt text](assets/gradio_demo.png) -### Live Inference +> [!NOTE] +> The demo app uses onnx model and onnxruntime for inference. Additionally, I have also made it that the ONNX model to accept any input size, despite the original model was trained on 640x640 images. +> This means you can use any image size you want. Play around with the input size slider to see what works best for your model. +> Some objects are visible even at lower input sizes, this means you can use a lower input size to speed up inference. + +### 🎥 Live Inference Run live inference on a video, image or webcam using ONNXRuntime. This runs on CPU by default. -If you would like to use the CUDA backend, you can install the `onnxruntime-gpu` package and uninstall the `onnxruntime` package. +If you would like to use the CUDA backend, install the `onnxruntime-gpu` package and uninstall the `onnxruntime` package. -For video inference, specify the path to the video file as the input. Output video will be saved as `onnx_result.mp4` in the current directory. +For running inference on a webcam, set the `--webcam` flag. ```bash python scripts/live_inference.py - --onnx model.onnx # Path to the ONNX model file - --input video.mp4 # Path to the input video file - --class-names classes.txt # Path to the classes file with each name on a new row - --input-size 320 # Input size for the model + --model model.onnx # Path to the ONNX model file + --webcam # Use webcam as input source + --classes classes.txt # Path to the classes file with each name on a new row + --video-width 720 # Input size for the model + --provider tensorrt # Execution provider (cpu/cuda/tensorrt) + --threshold 0.3 # Detection confidence threshold ``` -The following is a demo of video inference after training for about 50 epochs on the vehicles dataset with image size 320x320. +Because we are handling the preprocessing internally in the ONNX model, the input size is not limited to the original 640x640. You can use any input size you want for inference. The model was trained on 640x640 images. Integrating the preprocessing internally in the ONNX model also lets us run inference at very high FPS as it uses more efficient onnx operators. -https://github.com/user-attachments/assets/5066768f-c97e-4999-af81-ffd29d88f529 +The following is a model I trained on a custom dataset using the deim_hgnetv2_s model and exported to ONNX. Here are some examples of inference on a webcam at different video resolutions. +Webcam video width at 1920x1080 pixels (1080p): -You can also run live inference on a webcam by setting the `webcam` flag. +https://github.com/user-attachments/assets/bd98eb1e-feff-4b53-9fa9-d4aff6a724e0 + +Webcam video width at 1280x720 pixels (720p): + +https://github.com/user-attachments/assets/31a8644e-e0c6-4bba-9d4f-857a3d0b53e1 + +Webcam video width at 848x480 pixels (480p): + +https://github.com/user-attachments/assets/aa267f05-5dbd-4824-973c-62f3b8f59c80 + +Webcam video width at 640x480 pixels (480p): + +https://github.com/user-attachments/assets/3d0c04c0-645a-4d54-86c0-991930491113 + +Webcam video width at 320x240 pixels (240p): + +https://github.com/user-attachments/assets/f4afff9c-3e6d-4965-ab86-0d4de7ce1a44 + + + + +For video inference, specify the path to the video file as the input. Output video will be saved as `onnx_result.mp4` in the current directory. ```bash python scripts/live_inference.py - --onnx model.onnx # Path to the ONNX model file - --webcam # Use webcam as input source - --class-names classes.txt # Path to the classes file. Each class name should be on a new line. - --input-size 320 # Input size for the model + --model model.onnx # Path to the ONNX model file + --video video.mp4 # Path to the input video file + --classes classes.txt # Path to the classes file with each name on a new row + --video-width 320 # Input size for the model + --provider cpu # Execution provider (cpu/cuda/tensorrt) + --threshold 0.3 # Detection confidence threshold ``` -The following is a demo of webcam inference after training on the rock paper scissors dataset 640x640 resolution image. +https://github.com/user-attachments/assets/6bc1dc6a-a223-4220-954d-2dab5c75b4a8 + +The following is an inference using the pre-trained model `deim_hgnetv2_x` trained on COCO. See how I exported the pre-trained model to onnx in this notebook [here](nbs/export.ipynb). -https://github.com/user-attachments/assets/6e5dbb15-4e3a-45a3-997e-157bb9370146 +https://github.com/user-attachments/assets/77070ea4-8407-4648-ade3-01cacd77b51b For image inference, specify the path to the image file as the input. + ```bash python scripts/live_inference.py - --onnx model.onnx # Path to the ONNX model file - --input image.jpg # Path to the input image file - --class-names classes.txt # Path to the classes file. Each class name should be on a new line. - --input-size 320 # Input size for the model + --model model.onnx # Path to the ONNX model file + --image image.jpg # Path to the input image file + --classes classes.txt # Path to the classes file with each name on a new row + --provider cpu # Execution provider (cpu/cuda/tensorrt) + --threshold 0.3 # Detection confidence threshold ``` + + + + The following is a demo of image inference -![image](assets/sample_result_image.jpg) +![image](assets/sample_result_image_1.jpg) > [!TIP] > If you are using Pixi, you can run the live inference script with the following command with the same arguments as above. @@ -308,7 +423,7 @@ The following is a demo of image inference > If you want to use the CPU, replace `cuda` with `cpu` in the command above. -## Pixi Cheat Sheet +## 📝 Pixi Cheat Sheet Here are some useful tasks you can run with Pixi. Run a quickstart @@ -352,7 +467,7 @@ pixi run -e cpu live-inference --onnx model.onnx --input video.mp4 --class-names Launch Gradio app ```bash -pixi run -e cuda gradio-demo +pixi run gradio-demo --model "best_prep.onnx" --classes "classes.txt" --examples "Rock Paper Scissors SXSW.v14i.coco/test" ``` ```bash @@ -366,5 +481,5 @@ pixi run export --config config.yml --checkpoint model.pth --output model.onnx -## Disclaimer +## ⚠️ Disclaimer I'm not affiliated with the original DEIM authors. I just found the model interesting and wanted to try it out. The changes made here are of my own. Please cite and star the original repo if you find this useful. diff --git a/assets/exported_onnx.png b/assets/exported_onnx.png new file mode 100644 index 00000000..39d75e2a Binary files /dev/null and b/assets/exported_onnx.png differ diff --git a/assets/gradio_demo.png b/assets/gradio_demo.png index de1b4033..6e2cdaa6 100644 Binary files a/assets/gradio_demo.png and b/assets/gradio_demo.png differ diff --git a/assets/sample_result_image_1.jpg b/assets/sample_result_image_1.jpg new file mode 100644 index 00000000..641612a5 Binary files /dev/null and b/assets/sample_result_image_1.jpg differ diff --git a/nbs/export.ipynb b/nbs/export.ipynb new file mode 100644 index 00000000..e1babd4f --- /dev/null +++ b/nbs/export.ipynb @@ -0,0 +1,175 @@ +{ + "cells": [ + { + "cell_type": "code", + "execution_count": 1, + "metadata": {}, + "outputs": [ + { + "name": "stderr", + "output_type": "stream", + "text": [ + "\u001b[32m2025-04-02 16:30:26.794\u001b[0m | \u001b[1mINFO \u001b[0m | \u001b[36mdeimkit.predictor\u001b[0m:\u001b[36m__init__\u001b[0m:\u001b[36m119\u001b[0m - \u001b[1mInitializing Predictor with device=auto\u001b[0m\n", + "\u001b[32m2025-04-02 16:30:26.835\u001b[0m | \u001b[1mINFO \u001b[0m | \u001b[36mdeimkit.predictor\u001b[0m:\u001b[36m_setup_device\u001b[0m:\u001b[36m168\u001b[0m - \u001b[1mAuto-selected device: cuda\u001b[0m\n", + "\u001b[32m2025-04-02 16:30:26.878\u001b[0m | \u001b[1mINFO \u001b[0m | \u001b[36mdeimkit.predictor\u001b[0m:\u001b[36m_download_checkpoint\u001b[0m:\u001b[36m197\u001b[0m - \u001b[1mDownloading checkpoint for model deim_hgnetv2_x...\u001b[0m\n", + "Downloading...\n", + "From (original): https://drive.google.com/uc?id=1dPtbgtGgq1Oa7k_LgH1GXPelg1IVeu0j\n", + "From (redirected): https://drive.google.com/uc?id=1dPtbgtGgq1Oa7k_LgH1GXPelg1IVeu0j&confirm=t&uuid=b8b8be39-c2e9-4de7-bd7e-4bd5d14ba721\n", + "To: /home/dnth/Desktop/DEIMKit/nbs/checkpoints/deim_hgnetv2_x.pth\n", + "100%|██████████| 252M/252M [00:56<00:00, 4.50MB/s] \n", + "\u001b[32m2025-04-02 16:31:27.994\u001b[0m | \u001b[32m\u001b[1mSUCCESS \u001b[0m | \u001b[36mdeimkit.predictor\u001b[0m:\u001b[36m_download_checkpoint\u001b[0m:\u001b[36m200\u001b[0m - \u001b[32m\u001b[1mDownloaded checkpoint to checkpoints/deim_hgnetv2_x.pth\u001b[0m\n", + "\u001b[32m2025-04-02 16:31:28.280\u001b[0m | \u001b[1mINFO \u001b[0m | \u001b[36mdeimkit.predictor\u001b[0m:\u001b[36m_setup_model_config\u001b[0m:\u001b[36m232\u001b[0m - \u001b[1mLoading configuration from model name: deim_hgnetv2_x\u001b[0m\n", + "\u001b[32m2025-04-02 16:31:28.295\u001b[0m | \u001b[1mINFO \u001b[0m | \u001b[36mdeimkit.predictor\u001b[0m:\u001b[36m_setup_model_config\u001b[0m:\u001b[36m240\u001b[0m - \u001b[1mUpdating model configuration for 80 classes\u001b[0m\n", + "\u001b[32m2025-04-02 16:31:28.663\u001b[0m | \u001b[1mINFO \u001b[0m | \u001b[36mdeimkit.predictor\u001b[0m:\u001b[36m_load_model_weights\u001b[0m:\u001b[36m253\u001b[0m - \u001b[1mSuccessfully loaded checkpoint weights\u001b[0m\n", + "\u001b[32m2025-04-02 16:31:28.976\u001b[0m | \u001b[32m\u001b[1mSUCCESS \u001b[0m | \u001b[36mdeimkit.predictor\u001b[0m:\u001b[36m__init__\u001b[0m:\u001b[36m155\u001b[0m - \u001b[32m\u001b[1mPredictor initialization complete\u001b[0m\n" + ] + } + ], + "source": [ + "from deimkit import list_models, load_model\n", + "\n", + "coco_classes = ['person', 'bicycle', 'car', 'motorcycle', 'airplane', 'bus', 'train', 'truck', 'boat', 'traffic light',\n", + " 'fire hydrant', 'stop sign', 'parking meter', 'bench', 'bird', 'cat', 'dog', 'horse', 'sheep', 'cow', 'elephant', 'bear',\n", + " 'zebra', 'giraffe', 'backpack', 'umbrella', 'handbag', 'tie', 'suitcase', 'frisbee', 'skis', 'snowboard', 'sports ball',\n", + " 'kite', 'baseball bat', 'baseball glove', 'skateboard', 'surfboard', 'tennis racket', 'bottle', 'wine glass', 'cup', 'fork',\n", + " 'knife', 'spoon', 'bowl', 'banana', 'apple', 'sandwich', 'orange', 'broccoli', 'carrot', 'hot dog', 'pizza', 'donut', 'cake',\n", + " 'chair', 'couch', 'potted plant', 'bed', 'dining table', 'toilet', 'tv', 'laptop', 'mouse', 'remote', 'keyboard', 'cell phone',\n", + " 'microwave', 'oven', 'toaster', 'sink', 'refrigerator', 'book', 'clock', 'vase', 'scissors', 'teddy bear', 'hair drier', 'toothbrush'\n", + "]\n", + "\n", + "model = load_model(\"deim_hgnetv2_x\", class_names=coco_classes)\n" + ] + }, + { + "cell_type": "code", + "execution_count": 2, + "metadata": {}, + "outputs": [ + { + "data": { + "text/plain": [ + "task: detection\n", + "num_workers: 0\n", + "batch_size: None\n", + "resume: None\n", + "tuning: None\n", + "epoches: 58\n", + "last_epoch: -1\n", + "lrsheduler: flatcosine\n", + "lr_gamma: 0.5\n", + "no_aug_epoch: 8\n", + "warmup_iter: 2000\n", + "flat_epoch: 29\n", + "use_amp: True\n", + "use_ema: True\n", + "ema_decay: 0.9999\n", + "ema_warmups: 2000\n", + "sync_bn: True\n", + "clip_max_norm: 0.1\n", + "find_unused_parameters: False\n", + "seed: None\n", + "print_freq: 100\n", + "checkpoint_freq: 4\n", + "output_dir: ./outputs/deim_hgnetv2_x_coco\n", + "summary_dir: None\n", + "device: \n", + "yaml_cfg: {'task': 'detection', 'evaluator': {'type': 'CocoEvaluator', 'iou_types': ['bbox']}, 'num_classes': 80, 'remap_mscoco_category': True, 'train_dataloader': {'type': 'DataLoader', 'dataset': {'type': 'CocoDetection', 'img_folder': '/datassd/COCO/train2017/', 'ann_file': '/datassd/COCO/annotations/instances_train2017.json', 'return_masks': False, 'transforms': {'type': 'Compose', 'ops': [{'type': 'Mosaic', 'output_size': 640, 'rotation_range': 10, 'translation_range': [0.1, 0.1], 'scaling_range': [0.5, 1.5], 'probability': 1.0, 'fill_value': 0, 'use_cache': False, 'max_cached_images': 50, 'random_pop': True}, {'type': 'RandomPhotometricDistort', 'p': 0.5}, {'type': 'RandomZoomOut', 'fill': 0}, {'type': 'RandomIoUCrop', 'p': 0.8}, {'type': 'SanitizeBoundingBoxes', 'min_size': 1}, {'type': 'RandomHorizontalFlip'}, {'type': 'Resize', 'size': (640, 640)}, {'type': 'SanitizeBoundingBoxes', 'min_size': 1}, {'type': 'ConvertPILImage', 'dtype': 'float32', 'scale': True}, {'type': 'ConvertBoxes', 'fmt': 'cxcywh', 'normalize': True}], 'policy': {'name': 'stop_epoch', 'epoch': [4, 29, 50], 'ops': ['Mosaic', 'RandomPhotometricDistort', 'RandomZoomOut', 'RandomIoUCrop']}, 'mosaic_prob': 0.5}}, 'shuffle': True, 'num_workers': 4, 'drop_last': True, 'collate_fn': {'type': 'BatchImageCollateFunction', 'base_size': 640, 'base_size_repeat': 3, 'stop_epoch': 50, 'ema_restart_decay': 0.9998, 'mixup_prob': 0.5, 'mixup_epochs': [4, 29]}, 'total_batch_size': 32}, 'val_dataloader': {'type': 'DataLoader', 'dataset': {'type': 'CocoDetection', 'img_folder': '/datassd/COCO/val2017/', 'ann_file': '/datassd/COCO/annotations/instances_val2017.json', 'return_masks': False, 'transforms': {'type': 'Compose', 'ops': [{'type': 'Resize', 'size': (640, 640)}, {'type': 'ConvertPILImage', 'dtype': 'float32', 'scale': True}]}}, 'shuffle': False, 'num_workers': 4, 'drop_last': False, 'collate_fn': {'type': 'BatchImageCollateFunction'}, 'total_batch_size': 64}, 'print_freq': 100, 'output_dir': './outputs/deim_hgnetv2_x_coco', 'checkpoint_freq': 4, 'sync_bn': True, 'find_unused_parameters': False, 'use_amp': True, 'scaler': {'type': 'GradScaler', 'enabled': True}, 'use_ema': True, 'ema': {'type': 'ModelEMA', 'decay': 0.9999, 'warmups': 1000, 'start': 0}, 'epoches': 58, 'clip_max_norm': 0.1, 'optimizer': {'type': 'AdamW', 'params': [{'params': '^(?=.*backbone)(?!.*norm|bn).*$', 'lr': 5e-06}, {'params': '^(?=.*(?:encoder|decoder))(?=.*(?:norm|bn)).*$', 'weight_decay': 0.0}], 'lr': 0.0005, 'betas': [0.9, 0.999], 'weight_decay': 0.000125}, 'lr_scheduler': {'type': 'MultiStepLR', 'milestones': [500], 'gamma': 0.1}, 'lr_warmup_scheduler': {'type': 'LinearWarmup', 'warmup_duration': 500}, 'model': 'DEIM', 'criterion': 'DEIMCriterion', 'postprocessor': 'PostProcessor', 'use_focal_loss': True, 'eval_spatial_size': (640, 640), 'DEIM': {'backbone': 'HGNetv2', 'encoder': 'HybridEncoder', 'decoder': 'DFINETransformer'}, 'lrsheduler': 'flatcosine', 'lr_gamma': 0.5, 'warmup_iter': 2000, 'flat_epoch': 29, 'no_aug_epoch': 8, 'HGNetv2': {'pretrained': False, 'local_model_dir': '../RT-DETR-main/D-FINE/weight/hgnetv2/', 'name': 'B5', 'return_idx': [1, 2, 3], 'freeze_stem_only': True, 'freeze_at': -1, 'freeze_norm': False}, 'HybridEncoder': {'in_channels': [512, 1024, 2048], 'feat_strides': [8, 16, 32], 'hidden_dim': 384, 'use_encoder_idx': [2], 'num_encoder_layers': 1, 'nhead': 8, 'dim_feedforward': 2048, 'dropout': 0.0, 'enc_act': 'gelu', 'expansion': 1.0, 'depth_mult': 1, 'act': 'silu'}, 'DFINETransformer': {'feat_channels': [384, 384, 384], 'feat_strides': [8, 16, 32], 'hidden_dim': 256, 'num_levels': 3, 'num_layers': 6, 'eval_idx': -1, 'num_queries': 300, 'num_denoising': 100, 'label_noise_ratio': 0.5, 'box_noise_scale': 1.0, 'reg_max': 32, 'reg_scale': 8, 'layer_scale': 1, 'num_points': [3, 6, 3], 'cross_attn_method': 'default', 'query_select_method': 'default', 'activation': 'silu', 'mlp_act': 'silu'}, 'PostProcessor': {'num_top_queries': 300}, 'DEIMCriterion': {'weight_dict': {'loss_vfl': 1, 'loss_bbox': 5, 'loss_giou': 2, 'loss_fgl': 0.15, 'loss_ddf': 1.5, 'loss_mal': 1}, 'losses': ['mal', 'boxes', 'local'], 'alpha': 0.75, 'gamma': 1.5, 'reg_max': 32, 'matcher': {'type': 'HungarianMatcher', 'weight_dict': {'cost_class': 2, 'cost_bbox': 5, 'cost_giou': 2}, 'alpha': 0.25, 'gamma': 2.0}}, '__include__': ['./dfine_hgnetv2_x_coco.yml', '../base/deim.yml']}" + ] + }, + "execution_count": 2, + "metadata": {}, + "output_type": "execute_result" + } + ], + "source": [ + "model.cfg.save(\"./checkpoints/deim_hgnetv2_x.yml\")" + ] + }, + { + "cell_type": "code", + "execution_count": 3, + "metadata": {}, + "outputs": [ + { + "name": "stderr", + "output_type": "stream", + "text": [ + "\u001b[32m2025-04-02 16:31:29.014\u001b[0m | \u001b[1mINFO \u001b[0m | \u001b[36mdeimkit.exporter\u001b[0m:\u001b[36mto_onnx\u001b[0m:\u001b[36m123\u001b[0m - \u001b[1mUsing device: cpu\u001b[0m\n", + "\u001b[32m2025-04-02 16:31:29.014\u001b[0m | \u001b[1mINFO \u001b[0m | \u001b[36mdeimkit.exporter\u001b[0m:\u001b[36mto_onnx\u001b[0m:\u001b[36m126\u001b[0m - \u001b[1mLoading checkpoint from ./checkpoints/deim_hgnetv2_x.pth\u001b[0m\n", + "\u001b[32m2025-04-02 16:31:29.174\u001b[0m | \u001b[1mINFO \u001b[0m | \u001b[36mdeimkit.exporter\u001b[0m:\u001b[36mto_onnx\u001b[0m:\u001b[36m134\u001b[0m - \u001b[1mEMA weights not found, using regular model weights\u001b[0m\n", + "\u001b[32m2025-04-02 16:31:29.620\u001b[0m | \u001b[1mINFO \u001b[0m | \u001b[36mdeimkit.exporter\u001b[0m:\u001b[36mto_onnx\u001b[0m:\u001b[36m162\u001b[0m - \u001b[1mInput shape not provided, getting size from config\u001b[0m\n", + "\u001b[32m2025-04-02 16:31:29.620\u001b[0m | \u001b[1mINFO \u001b[0m | \u001b[36mdeimkit.exporter\u001b[0m:\u001b[36mto_onnx\u001b[0m:\u001b[36m188\u001b[0m - \u001b[1mUsing target shape from config: (1, 3, 640, 640)\u001b[0m\n", + "\u001b[32m2025-04-02 16:31:29.621\u001b[0m | \u001b[1mINFO \u001b[0m | \u001b[36mdeimkit.exporter\u001b[0m:\u001b[36m__init__\u001b[0m:\u001b[36m32\u001b[0m - \u001b[1mInitialized PreprocessingModule with target size: (640, 640)\u001b[0m\n", + "\u001b[32m2025-04-02 16:31:29.621\u001b[0m | \u001b[1mINFO \u001b[0m | \u001b[36mdeimkit.exporter\u001b[0m:\u001b[36mto_onnx\u001b[0m:\u001b[36m201\u001b[0m - \u001b[1mIncluding preprocessing steps in the ONNX model.\u001b[0m\n", + "\u001b[32m2025-04-02 16:31:29.640\u001b[0m | \u001b[1mINFO \u001b[0m | \u001b[36mdeimkit.exporter\u001b[0m:\u001b[36mto_onnx\u001b[0m:\u001b[36m260\u001b[0m - \u001b[1mUsing input names: ['input_bgr', 'orig_target_sizes']\u001b[0m\n", + "\u001b[32m2025-04-02 16:31:29.641\u001b[0m | \u001b[1mINFO \u001b[0m | \u001b[36mdeimkit.exporter\u001b[0m:\u001b[36mto_onnx\u001b[0m:\u001b[36m261\u001b[0m - \u001b[1mUsing output names: ['labels', 'boxes', 'scores']\u001b[0m\n", + "\u001b[32m2025-04-02 16:31:29.641\u001b[0m | \u001b[1mINFO \u001b[0m | \u001b[36mdeimkit.exporter\u001b[0m:\u001b[36mto_onnx\u001b[0m:\u001b[36m262\u001b[0m - \u001b[1mUsing dynamic axes: {'input_bgr': {0: 'N', 2: 'H', 3: 'W'}, 'orig_target_sizes': {0: 'N'}, 'labels': {0: 'N'}, 'boxes': {0: 'N'}, 'scores': {0: 'N'}}\u001b[0m\n", + "\u001b[32m2025-04-02 16:31:29.641\u001b[0m | \u001b[1mINFO \u001b[0m | \u001b[36mdeimkit.exporter\u001b[0m:\u001b[36mto_onnx\u001b[0m:\u001b[36m263\u001b[0m - \u001b[1mExporting model to ONNX: ./checkpoints/deim_hgnetv2_x.onnx\u001b[0m\n", + "\u001b[32m2025-04-02 16:31:29.662\u001b[0m | \u001b[34m\u001b[1mDEBUG \u001b[0m | \u001b[36mdeimkit.exporter\u001b[0m:\u001b[36mforward\u001b[0m:\u001b[36m54\u001b[0m - \u001b[34m\u001b[1mPreprocessing: Resized shape: torch.Size([1, 3, 640, 640])\u001b[0m\n", + "/home/dnth/Desktop/DEIMKit/src/deimkit/exporter.py:58: TracerWarning: Converting a tensor to a Python boolean might cause the trace to be incorrect. We can't record the data flow of Python values, so this value will be treated as a constant in the future. This means that the trace might not generalize to other inputs!\n", + " if x.shape[1] != 3:\n", + "\u001b[32m2025-04-02 16:31:29.666\u001b[0m | \u001b[34m\u001b[1mDEBUG \u001b[0m | \u001b[36mdeimkit.exporter\u001b[0m:\u001b[36mforward\u001b[0m:\u001b[36m62\u001b[0m - \u001b[34m\u001b[1mPreprocessing: Swapped BGR to RGB\u001b[0m\n", + "\u001b[32m2025-04-02 16:31:29.667\u001b[0m | \u001b[34m\u001b[1mDEBUG \u001b[0m | \u001b[36mdeimkit.exporter\u001b[0m:\u001b[36mforward\u001b[0m:\u001b[36m67\u001b[0m - \u001b[34m\u001b[1mPreprocessing: Normalized pixel values to [0, 1]\u001b[0m\n", + "/home/dnth/Desktop/DEIMKit/src/deimkit/engine/deim/dfine_decoder.py:646: TracerWarning: Converting a tensor to a Python boolean might cause the trace to be incorrect. We can't record the data flow of Python values, so this value will be treated as a constant in the future. This means that the trace might not generalize to other inputs!\n", + " if memory.shape[0] > 1:\n", + "/home/dnth/Desktop/DEIMKit/src/deimkit/engine/deim/dfine_decoder.py:129: TracerWarning: Converting a tensor to a Python boolean might cause the trace to be incorrect. We can't record the data flow of Python values, so this value will be treated as a constant in the future. This means that the trace might not generalize to other inputs!\n", + " if reference_points.shape[-1] == 2:\n", + "/home/dnth/Desktop/DEIMKit/src/deimkit/engine/deim/dfine_decoder.py:133: TracerWarning: Converting a tensor to a Python boolean might cause the trace to be incorrect. We can't record the data flow of Python values, so this value will be treated as a constant in the future. This means that the trace might not generalize to other inputs!\n", + " elif reference_points.shape[-1] == 4:\n", + "/home/dnth/Desktop/DEIMKit/.pixi/envs/cuda/lib/python3.11/site-packages/torch/onnx/_internal/jit_utils.py:308: UserWarning: Constant folding - Only steps=1 can be constant folded for opset >= 10 onnx::Slice op. Constant folding not applied. (Triggered internally at /pytorch/torch/csrc/jit/passes/onnx/constant_fold.cpp:178.)\n", + " _C._jit_pass_onnx_node_shape_type_inference(node, params_dict, opset_version)\n", + "/home/dnth/Desktop/DEIMKit/.pixi/envs/cuda/lib/python3.11/site-packages/torch/onnx/utils.py:657: UserWarning: Constant folding - Only steps=1 can be constant folded for opset >= 10 onnx::Slice op. Constant folding not applied. (Triggered internally at /pytorch/torch/csrc/jit/passes/onnx/constant_fold.cpp:178.)\n", + " _C._jit_pass_onnx_graph_shape_type_inference(\n", + "/home/dnth/Desktop/DEIMKit/.pixi/envs/cuda/lib/python3.11/site-packages/torch/onnx/utils.py:1127: UserWarning: Constant folding - Only steps=1 can be constant folded for opset >= 10 onnx::Slice op. Constant folding not applied. (Triggered internally at /pytorch/torch/csrc/jit/passes/onnx/constant_fold.cpp:178.)\n", + " _C._jit_pass_onnx_graph_shape_type_inference(\n", + "\u001b[32m2025-04-02 16:31:34.042\u001b[0m | \u001b[32m\u001b[1mSUCCESS \u001b[0m | \u001b[36mdeimkit.exporter\u001b[0m:\u001b[36mto_onnx\u001b[0m:\u001b[36m285\u001b[0m - \u001b[32m\u001b[1mONNX export completed successfully: ./checkpoints/deim_hgnetv2_x.onnx\u001b[0m\n", + "\u001b[32m2025-04-02 16:31:34.043\u001b[0m | \u001b[1mINFO \u001b[0m | \u001b[36mdeimkit.exporter\u001b[0m:\u001b[36mto_onnx\u001b[0m:\u001b[36m296\u001b[0m - \u001b[1mSimplifying ONNX model with input shapes: {'input_bgr': torch.Size([1, 3, 640, 640]), 'orig_target_sizes': torch.Size([1, 2])}\u001b[0m\n", + "\u001b[32m2025-04-02 16:31:34.077\u001b[0m | \u001b[1mINFO \u001b[0m | \u001b[36mdeimkit.exporter\u001b[0m:\u001b[36m_simplify_onnx_model\u001b[0m:\u001b[36m401\u001b[0m - \u001b[1mSimplifying ONNX model: ./checkpoints/deim_hgnetv2_x.onnx -> ./checkpoints/deim_hgnetv2_x.onnx\u001b[0m\n", + "\u001b[32m2025-04-02 16:31:49.184\u001b[0m | \u001b[32m\u001b[1mSUCCESS \u001b[0m | \u001b[36mdeimkit.exporter\u001b[0m:\u001b[36m_simplify_onnx_model\u001b[0m:\u001b[36m411\u001b[0m - \u001b[32m\u001b[1mONNX model simplification successful: ./checkpoints/deim_hgnetv2_x.onnx\u001b[0m\n", + "\u001b[32m2025-04-02 16:31:49.575\u001b[0m | \u001b[1mINFO \u001b[0m | \u001b[36mdeimkit.exporter\u001b[0m:\u001b[36m_check_onnx_model\u001b[0m:\u001b[36m368\u001b[0m - \u001b[1mONNX model validation successful: ./checkpoints/deim_hgnetv2_x.onnx\u001b[0m\n" + ] + } + ], + "source": [ + "from deimkit.exporter import Exporter\n", + "from deimkit.config import Config\n", + "\n", + "config = Config(\"./checkpoints/deim_hgnetv2_x.yml\")\n", + "exporter = Exporter(config)\n", + "\n", + "output_path = exporter.to_onnx(\n", + " checkpoint_path=\"./checkpoints/deim_hgnetv2_x.pth\",\n", + " output_path=\"./checkpoints/deim_hgnetv2_x.onnx\"\n", + ")" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [] + } + ], + "metadata": { + "kernelspec": { + "display_name": "cuda", + "language": "python", + "name": "python3" + }, + "language_info": { + "codemirror_mode": { + "name": "ipython", + "version": 3 + }, + "file_extension": ".py", + "mimetype": "text/x-python", + "name": "python", + "nbconvert_exporter": "python", + "pygments_lexer": "ipython3", + "version": "3.11.11" + } + }, + "nbformat": 4, + "nbformat_minor": 2 +} diff --git a/pixi.lock b/pixi.lock index c24990ba..204e62bf 100644 --- a/pixi.lock +++ b/pixi.lock @@ -34,8 +34,8 @@ environments: - conda: https://conda.anaconda.org/conda-forge/linux-64/libcurl-8.12.1-h332b0f4_0.conda - conda: https://conda.anaconda.org/conda-forge/linux-64/libedit-3.1.20250104-pl5321h7949ede_0.conda - conda: https://conda.anaconda.org/conda-forge/linux-64/libev-4.33-hd590300_2.conda - - conda: https://conda.anaconda.org/conda-forge/linux-64/libexpat-2.6.4-h5888daf_0.conda - - conda: https://conda.anaconda.org/conda-forge/linux-64/libffi-3.4.6-h2dba641_0.conda + - conda: https://conda.anaconda.org/conda-forge/linux-64/libexpat-2.7.0-h5888daf_0.conda + - conda: https://conda.anaconda.org/conda-forge/linux-64/libffi-3.4.6-h2dba641_1.conda - conda: https://conda.anaconda.org/conda-forge/linux-64/libgcc-14.2.0-h767d61c_2.conda - conda: https://conda.anaconda.org/conda-forge/linux-64/libgcc-ng-14.2.0-h69a702a_2.conda - conda: https://conda.anaconda.org/conda-forge/linux-64/libgomp-14.2.0-h767d61c_2.conda @@ -67,7 +67,7 @@ environments: - conda: https://conda.anaconda.org/conda-forge/noarch/pygments-2.19.1-pyhd8ed1ab_0.conda - conda: https://conda.anaconda.org/conda-forge/linux-64/python-3.11.11-h9e4cc4f_2_cpython.conda - conda: https://conda.anaconda.org/conda-forge/noarch/python-dateutil-2.9.0.post0-pyhff2d567_1.conda - - conda: https://conda.anaconda.org/conda-forge/linux-64/python_abi-3.11-5_cp311.conda + - conda: https://conda.anaconda.org/conda-forge/linux-64/python_abi-3.11-6_cp311.conda - conda: https://conda.anaconda.org/conda-forge/linux-64/pyzmq-26.3.0-py311h7deb3e3_0.conda - conda: https://conda.anaconda.org/conda-forge/linux-64/readline-8.2-h8c095d6_2.conda - conda: https://conda.anaconda.org/conda-forge/linux-64/rhash-1.4.5-hb9d3cd8_0.conda @@ -84,7 +84,7 @@ environments: - conda: https://conda.anaconda.org/conda-forge/noarch/zipp-3.21.0-pyhd8ed1ab_1.conda - conda: https://conda.anaconda.org/conda-forge/linux-64/zstd-1.5.7-hb8e6e7a_2.conda - pypi: https://files.pythonhosted.org/packages/98/5e/34ccb5bfb8dae555045c2dd13375e01ac8e2c1f200a4e4051e95fb9addf0/absl_py-2.2.1-py3-none-any.whl - - pypi: https://files.pythonhosted.org/packages/70/83/167d4b638bb758a966828eb8d23c5e7047825edfdf768ff5f4fb01440063/accelerate-1.5.2-py3-none-any.whl + - pypi: https://files.pythonhosted.org/packages/63/b1/8198e3cdd11a426b1df2912e3381018c4a4a55368f6d0857ba3ca418ef93/accelerate-1.6.0-py3-none-any.whl - pypi: https://files.pythonhosted.org/packages/c5/19/5af6804c4cc0fed83f47bff6e413a98a36618e7d40185cd36e69737f3b0e/aiofiles-23.2.1-py3-none-any.whl - pypi: https://files.pythonhosted.org/packages/78/b6/6307fbef88d9b5ee7421e68d78a9f162e0da4900bc5f5793f6d3d0e34fb8/annotated_types-0.7.0-py3-none-any.whl - pypi: https://files.pythonhosted.org/packages/a1/ee/48ca1a7c89ffec8b6a0c5d02b89c305671d5ffd8d3c94acf8b8c408575bb/anyio-4.9.0-py3-none-any.whl @@ -102,16 +102,16 @@ environments: - pypi: https://files.pythonhosted.org/packages/4d/36/2a115987e2d8c300a974597416d9de88f2444426de9571f4b59b2cca3acc/filelock-3.18.0-py3-none-any.whl - pypi: https://files.pythonhosted.org/packages/b8/25/155f9f080d5e4bc0082edfda032ea2bc2b8fab3f4d25d46c1e9dd22a1a89/flatbuffers-25.2.10-py2.py3-none-any.whl - pypi: https://files.pythonhosted.org/packages/28/e9/47c02d5a7027e8ed841ab6a10ca00c93dadd5f16742f1af1fa3f9978adf4/fonttools-4.56.0-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl - - pypi: https://files.pythonhosted.org/packages/56/53/eb690efa8513166adef3e0669afd31e95ffde69fb3c52ec2ac7223ed6018/fsspec-2025.3.0-py3-none-any.whl + - pypi: https://files.pythonhosted.org/packages/44/4b/e0cfc1a6f17e990f3e64b7d941ddc4acdc7b19d6edd51abf495f32b1a9e4/fsspec-2025.3.2-py3-none-any.whl - pypi: https://files.pythonhosted.org/packages/54/70/e07c381e6488a77094f04c85c9caf1c8008cdc30778f7019bc52e5285ef0/gdown-5.2.0-py3-none-any.whl - - pypi: https://files.pythonhosted.org/packages/dd/9a/23df7fafbe5201d811a60acb3869a3e21cf2d813eed47b2a3ad7d649b905/gradio-5.23.1-py3-none-any.whl + - pypi: https://files.pythonhosted.org/packages/5f/e3/f1e6569558d212f747820b2c976b4f57c034413a5747acb373f2a04bcd9a/gradio-5.23.3-py3-none-any.whl - pypi: https://files.pythonhosted.org/packages/15/c8/0df7f92c8f1bdf5c244c29de8cd7e33a5931768ddba245526a770bfa18a2/gradio_client-1.8.0-py3-none-any.whl - pypi: https://files.pythonhosted.org/packages/28/27/3d6dcadc8a3214d8522c1e7f6a19554e33659be44546d44a2f7572ac7d2a/groovy-0.1.2-py3-none-any.whl - pypi: https://files.pythonhosted.org/packages/c7/67/cbd63c485051eb78663355d9efd1b896cfb50d4a220581ec2cb9a15cd750/grpcio-1.71.0-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl - pypi: https://files.pythonhosted.org/packages/95/04/ff642e65ad6b90db43e668d70ffb6736436c7ce41fcc549f4e9472234127/h11-0.14.0-py3-none-any.whl - pypi: https://files.pythonhosted.org/packages/87/f5/72347bc88306acb359581ac4d52f23c0ef445b57157adedb9aee0cd689d2/httpcore-1.0.7-py3-none-any.whl - pypi: https://files.pythonhosted.org/packages/2a/39/e50c7c3a983047577ee07d2a9e53faf5a69493943ec3f6a384bdc792deb2/httpx-0.28.1-py3-none-any.whl - - pypi: https://files.pythonhosted.org/packages/40/0c/37d380846a2e5c9a3c6a73d26ffbcfdcad5fc3eacf42fdf7cff56f2af634/huggingface_hub-0.29.3-py3-none-any.whl + - pypi: https://files.pythonhosted.org/packages/99/e3/2232d0e726d4d6ea69643b9593d97d0e7e6ea69c2fe9ed5de34d476c1c47/huggingface_hub-0.30.1-py3-none-any.whl - pypi: https://files.pythonhosted.org/packages/f0/0f/310fb31e39e2d734ccaa2c0fb981ee41f7bd5056ce9bc29b2248bd569169/humanfriendly-10.0-py2.py3-none-any.whl - pypi: https://files.pythonhosted.org/packages/76/c6/c88e154df9c4e1a2a66ccf0005a88dfb2650c1dffb6f5ce603dfbd452ce3/idna-3.10-py3-none-any.whl - pypi: https://files.pythonhosted.org/packages/62/a1/3d680cbfd5f4b8f15abc1d571870c5fc3e594bb582bc3b64ea099db13e56/jinja2-3.1.6-py3-none-any.whl @@ -123,7 +123,7 @@ environments: - pypi: https://files.pythonhosted.org/packages/40/b8/53fa08a5eaf78d3a7213fd6da1feec4bae14a81d9805e567013811ff0e85/matplotlib-3.10.1-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl - pypi: https://files.pythonhosted.org/packages/b3/38/89ba8ad64ae25be8de66a6d463314cf1eb366222074cfda9ee839c56a4b4/mdurl-0.1.2-py3-none-any.whl - pypi: https://files.pythonhosted.org/packages/43/e3/7d92a15f894aa0c9c4b49b8ee9ac9850d6e63b03c9c32c0367a13ae62209/mpmath-1.3.0-py3-none-any.whl - - pypi: https://files.pythonhosted.org/packages/e9/96/79a6168dc7a5098066e097c01a45d01608c8df6552dfb92a2676ce623186/narwhals-1.32.0-py3-none-any.whl + - pypi: https://files.pythonhosted.org/packages/41/c1/e9bc6b67c774e7c1f939c91ea535f18f7644fedc61b20d6baa861ad52b34/narwhals-1.33.0-py3-none-any.whl - pypi: https://files.pythonhosted.org/packages/b9/54/dd730b32ea14ea797530a4479b2ed46a6fb250f682a9cfb997e968bf0261/networkx-3.4.2-py3-none-any.whl - pypi: https://files.pythonhosted.org/packages/c5/5c/ceefca458559f0ccc7a982319f37ed07b0d7b526964ae6cc61f8ad1b6119/numpy-2.2.4-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl - pypi: https://files.pythonhosted.org/packages/b1/2f/91092557ed478e323a2b4471e2081fdf88d1dd52ae988ceaf7db4e4506ff/onnx-1.17.0-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl @@ -132,7 +132,7 @@ environments: - pypi: https://files.pythonhosted.org/packages/2c/8b/90eb44a40476fa0e71e05a0283947cfd74a5d36121a11d926ad6f3193cc4/opencv_python-4.11.0.86-cp37-abi3-manylinux_2_17_x86_64.manylinux2014_x86_64.whl - pypi: https://files.pythonhosted.org/packages/92/18/5b1e1e995bffad49dc4311a0bdfd874bc6f135fd20f0e1f671adc2c9910e/orjson-3.10.16-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl - pypi: https://files.pythonhosted.org/packages/cd/5f/4dba1d39bb9c38d574a9a22548c540177f78ea47b32f99c0ff2ec499fac5/pandas-2.2.3-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl - - pypi: https://files.pythonhosted.org/packages/48/a4/fbfe9d5581d7b111b28f1d8c2762dee92e9821bb209af9fa83c940e507a0/pillow-11.1.0-cp311-cp311-manylinux_2_28_x86_64.whl + - pypi: https://files.pythonhosted.org/packages/90/c5/d4eaed054f75a3ffbd127190200b26c949acda13794aef6cf0ab91f2fb00/pillow-11.2.0-cp311-cp311-manylinux_2_28_x86_64.whl - pypi: https://files.pythonhosted.org/packages/02/65/ad2bc85f7377f5cfba5d4466d5474423a3fb7f6a97fd807c06f92dd3e721/plotly-6.0.1-py3-none-any.whl - pypi: https://files.pythonhosted.org/packages/28/50/1925de813499546bc8ab3ae857e3ec84efe7d2f19b34529d0c7c3d02d11d/protobuf-6.30.2-cp39-abi3-manylinux2014_x86_64.whl - pypi: https://files.pythonhosted.org/packages/cc/12/f9221a949f2419e2e23847303c002476c26fbcfd62dc7f3d25d0bec5ca99/pydantic-2.11.1-py3-none-any.whl @@ -145,7 +145,7 @@ environments: - pypi: https://files.pythonhosted.org/packages/75/e4/2c27590dfc9992f73aabbeb9241ae20220bd9452df27483b6e56d3975cc5/PyYAML-6.0.2-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl - pypi: https://files.pythonhosted.org/packages/bf/ce/0d0e61429f603bac433910d99ef1a02ce45a8967ffbe3cbee48599e62d88/regex-2024.11.6-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl - pypi: https://files.pythonhosted.org/packages/f9/9b/335f9764261e915ed497fcdeb11df5dfd6f7bf257d4a6a2a686d80da4d54/requests-2.32.3-py3-none-any.whl - - pypi: https://files.pythonhosted.org/packages/19/71/39c7c0d87f8d4e6c020a393182060eaefeeae6c01dab6a84ec346f2567df/rich-13.9.4-py3-none-any.whl + - pypi: https://files.pythonhosted.org/packages/0d/9b/63f4c7ebc259242c89b3acafdb37b41d1185c07ff0011164674e9076b491/rich-14.0.0-py3-none-any.whl - pypi: https://files.pythonhosted.org/packages/ce/d2/4ceed7147e05852876f3b5f3fdc23f878ce2b7e0b90dd6e698bda3d20787/ruff-0.11.2-py3-none-manylinux_2_17_x86_64.manylinux2014_x86_64.whl - pypi: https://files.pythonhosted.org/packages/4d/c0/1108ad9f01567f66b3154063605b350b69c3c9366732e09e45f9fd0d1deb/safehttpx-0.1.6-py3-none-any.whl - pypi: https://files.pythonhosted.org/packages/a6/f8/dae3421624fcc87a89d42e1898a798bc7ff72c61f38973a65d60df8f124c/safetensors-0.5.3-cp38-abi3-manylinux_2_17_x86_64.manylinux2014_x86_64.whl @@ -199,8 +199,8 @@ environments: - conda: https://conda.anaconda.org/conda-forge/osx-arm64/libcxx-20.1.1-ha82da77_0.conda - conda: https://conda.anaconda.org/conda-forge/osx-arm64/libedit-3.1.20250104-pl5321hafb1f1b_0.conda - conda: https://conda.anaconda.org/conda-forge/osx-arm64/libev-4.33-h93a5062_2.conda - - conda: https://conda.anaconda.org/conda-forge/osx-arm64/libexpat-2.6.4-h286801f_0.conda - - conda: https://conda.anaconda.org/conda-forge/osx-arm64/libffi-3.4.2-h3422bc3_5.tar.bz2 + - conda: https://conda.anaconda.org/conda-forge/osx-arm64/libexpat-2.7.0-h286801f_0.conda + - conda: https://conda.anaconda.org/conda-forge/osx-arm64/libffi-3.4.6-h1da3d7d_1.conda - conda: https://conda.anaconda.org/conda-forge/osx-arm64/liblzma-5.6.4-h39f12f2_0.conda - conda: https://conda.anaconda.org/conda-forge/osx-arm64/libnghttp2-1.64.0-h6d7220d_0.conda - conda: https://conda.anaconda.org/conda-forge/osx-arm64/libsodium-1.0.20-h99b78c6_0.conda @@ -224,7 +224,7 @@ environments: - conda: https://conda.anaconda.org/conda-forge/noarch/pygments-2.19.1-pyhd8ed1ab_0.conda - conda: https://conda.anaconda.org/conda-forge/osx-arm64/python-3.11.11-hc22306f_2_cpython.conda - conda: https://conda.anaconda.org/conda-forge/noarch/python-dateutil-2.9.0.post0-pyhff2d567_1.conda - - conda: https://conda.anaconda.org/conda-forge/osx-arm64/python_abi-3.11-5_cp311.conda + - conda: https://conda.anaconda.org/conda-forge/osx-arm64/python_abi-3.11-6_cp311.conda - conda: https://conda.anaconda.org/conda-forge/osx-arm64/pyzmq-26.3.0-py311h01f2145_0.conda - conda: https://conda.anaconda.org/conda-forge/osx-arm64/readline-8.2-h1d1bf99_2.conda - conda: https://conda.anaconda.org/conda-forge/osx-arm64/rhash-1.4.5-h7ab814d_0.conda @@ -241,7 +241,7 @@ environments: - conda: https://conda.anaconda.org/conda-forge/noarch/zipp-3.21.0-pyhd8ed1ab_1.conda - conda: https://conda.anaconda.org/conda-forge/osx-arm64/zstd-1.5.7-h6491c7d_2.conda - pypi: https://files.pythonhosted.org/packages/98/5e/34ccb5bfb8dae555045c2dd13375e01ac8e2c1f200a4e4051e95fb9addf0/absl_py-2.2.1-py3-none-any.whl - - pypi: https://files.pythonhosted.org/packages/70/83/167d4b638bb758a966828eb8d23c5e7047825edfdf768ff5f4fb01440063/accelerate-1.5.2-py3-none-any.whl + - pypi: https://files.pythonhosted.org/packages/63/b1/8198e3cdd11a426b1df2912e3381018c4a4a55368f6d0857ba3ca418ef93/accelerate-1.6.0-py3-none-any.whl - pypi: https://files.pythonhosted.org/packages/c5/19/5af6804c4cc0fed83f47bff6e413a98a36618e7d40185cd36e69737f3b0e/aiofiles-23.2.1-py3-none-any.whl - pypi: https://files.pythonhosted.org/packages/78/b6/6307fbef88d9b5ee7421e68d78a9f162e0da4900bc5f5793f6d3d0e34fb8/annotated_types-0.7.0-py3-none-any.whl - pypi: https://files.pythonhosted.org/packages/a1/ee/48ca1a7c89ffec8b6a0c5d02b89c305671d5ffd8d3c94acf8b8c408575bb/anyio-4.9.0-py3-none-any.whl @@ -259,16 +259,16 @@ environments: - pypi: https://files.pythonhosted.org/packages/4d/36/2a115987e2d8c300a974597416d9de88f2444426de9571f4b59b2cca3acc/filelock-3.18.0-py3-none-any.whl - pypi: https://files.pythonhosted.org/packages/b8/25/155f9f080d5e4bc0082edfda032ea2bc2b8fab3f4d25d46c1e9dd22a1a89/flatbuffers-25.2.10-py2.py3-none-any.whl - pypi: https://files.pythonhosted.org/packages/35/56/a2f3e777d48fcae7ecd29de4d96352d84e5ea9871e5f3fc88241521572cf/fonttools-4.56.0-cp311-cp311-macosx_10_9_universal2.whl - - pypi: https://files.pythonhosted.org/packages/56/53/eb690efa8513166adef3e0669afd31e95ffde69fb3c52ec2ac7223ed6018/fsspec-2025.3.0-py3-none-any.whl + - pypi: https://files.pythonhosted.org/packages/44/4b/e0cfc1a6f17e990f3e64b7d941ddc4acdc7b19d6edd51abf495f32b1a9e4/fsspec-2025.3.2-py3-none-any.whl - pypi: https://files.pythonhosted.org/packages/54/70/e07c381e6488a77094f04c85c9caf1c8008cdc30778f7019bc52e5285ef0/gdown-5.2.0-py3-none-any.whl - - pypi: https://files.pythonhosted.org/packages/dd/9a/23df7fafbe5201d811a60acb3869a3e21cf2d813eed47b2a3ad7d649b905/gradio-5.23.1-py3-none-any.whl + - pypi: https://files.pythonhosted.org/packages/5f/e3/f1e6569558d212f747820b2c976b4f57c034413a5747acb373f2a04bcd9a/gradio-5.23.3-py3-none-any.whl - pypi: https://files.pythonhosted.org/packages/15/c8/0df7f92c8f1bdf5c244c29de8cd7e33a5931768ddba245526a770bfa18a2/gradio_client-1.8.0-py3-none-any.whl - pypi: https://files.pythonhosted.org/packages/28/27/3d6dcadc8a3214d8522c1e7f6a19554e33659be44546d44a2f7572ac7d2a/groovy-0.1.2-py3-none-any.whl - pypi: https://files.pythonhosted.org/packages/b4/d5/0bc53ed33ba458de95020970e2c22aa8027b26cc84f98bea7fcad5d695d1/grpcio-1.71.0-cp311-cp311-macosx_10_14_universal2.whl - pypi: https://files.pythonhosted.org/packages/95/04/ff642e65ad6b90db43e668d70ffb6736436c7ce41fcc549f4e9472234127/h11-0.14.0-py3-none-any.whl - pypi: https://files.pythonhosted.org/packages/87/f5/72347bc88306acb359581ac4d52f23c0ef445b57157adedb9aee0cd689d2/httpcore-1.0.7-py3-none-any.whl - pypi: https://files.pythonhosted.org/packages/2a/39/e50c7c3a983047577ee07d2a9e53faf5a69493943ec3f6a384bdc792deb2/httpx-0.28.1-py3-none-any.whl - - pypi: https://files.pythonhosted.org/packages/40/0c/37d380846a2e5c9a3c6a73d26ffbcfdcad5fc3eacf42fdf7cff56f2af634/huggingface_hub-0.29.3-py3-none-any.whl + - pypi: https://files.pythonhosted.org/packages/99/e3/2232d0e726d4d6ea69643b9593d97d0e7e6ea69c2fe9ed5de34d476c1c47/huggingface_hub-0.30.1-py3-none-any.whl - pypi: https://files.pythonhosted.org/packages/f0/0f/310fb31e39e2d734ccaa2c0fb981ee41f7bd5056ce9bc29b2248bd569169/humanfriendly-10.0-py2.py3-none-any.whl - pypi: https://files.pythonhosted.org/packages/76/c6/c88e154df9c4e1a2a66ccf0005a88dfb2650c1dffb6f5ce603dfbd452ce3/idna-3.10-py3-none-any.whl - pypi: https://files.pythonhosted.org/packages/62/a1/3d680cbfd5f4b8f15abc1d571870c5fc3e594bb582bc3b64ea099db13e56/jinja2-3.1.6-py3-none-any.whl @@ -280,7 +280,7 @@ environments: - pypi: https://files.pythonhosted.org/packages/0a/e4/300b08e3e08f9c98b0d5635f42edabf2f7a1d634e64cb0318a71a44ff720/matplotlib-3.10.1-cp311-cp311-macosx_11_0_arm64.whl - pypi: https://files.pythonhosted.org/packages/b3/38/89ba8ad64ae25be8de66a6d463314cf1eb366222074cfda9ee839c56a4b4/mdurl-0.1.2-py3-none-any.whl - pypi: https://files.pythonhosted.org/packages/43/e3/7d92a15f894aa0c9c4b49b8ee9ac9850d6e63b03c9c32c0367a13ae62209/mpmath-1.3.0-py3-none-any.whl - - pypi: https://files.pythonhosted.org/packages/e9/96/79a6168dc7a5098066e097c01a45d01608c8df6552dfb92a2676ce623186/narwhals-1.32.0-py3-none-any.whl + - pypi: https://files.pythonhosted.org/packages/41/c1/e9bc6b67c774e7c1f939c91ea535f18f7644fedc61b20d6baa861ad52b34/narwhals-1.33.0-py3-none-any.whl - pypi: https://files.pythonhosted.org/packages/b9/54/dd730b32ea14ea797530a4479b2ed46a6fb250f682a9cfb997e968bf0261/networkx-3.4.2-py3-none-any.whl - pypi: https://files.pythonhosted.org/packages/a2/0a/1212befdbecab5d80eca3cde47d304cad986ad4eec7d85a42e0b6d2cc2ef/numpy-2.2.4-cp311-cp311-macosx_11_0_arm64.whl - pypi: https://files.pythonhosted.org/packages/e5/a9/8d1b1d53aec70df53e0f57e9f9fcf47004276539e29230c3d5f1f50719ba/onnx-1.17.0-cp311-cp311-macosx_12_0_universal2.whl @@ -289,7 +289,7 @@ environments: - pypi: https://files.pythonhosted.org/packages/05/4d/53b30a2a3ac1f75f65a59eb29cf2ee7207ce64867db47036ad61743d5a23/opencv_python-4.11.0.86-cp37-abi3-macosx_13_0_arm64.whl - pypi: https://files.pythonhosted.org/packages/97/29/43f91a5512b5d2535594438eb41c5357865fd5e64dec745d90a588820c75/orjson-3.10.16-cp311-cp311-macosx_10_15_x86_64.macosx_11_0_arm64.macosx_10_15_universal2.whl - pypi: https://files.pythonhosted.org/packages/52/11/9eac327a38834f162b8250aab32a6781339c69afe7574368fffe46387edf/pandas-2.2.3-cp311-cp311-macosx_11_0_arm64.whl - - pypi: https://files.pythonhosted.org/packages/d9/45/3fe487010dd9ce0a06adf9b8ff4f273cc0a44536e234b0fad3532a42c15b/pillow-11.1.0-cp311-cp311-macosx_11_0_arm64.whl + - pypi: https://files.pythonhosted.org/packages/ea/75/947f47ed4667192ded5ae85336518068170c1450abf46da8a80bec84eb21/pillow-11.2.0-cp311-cp311-macosx_11_0_arm64.whl - pypi: https://files.pythonhosted.org/packages/02/65/ad2bc85f7377f5cfba5d4466d5474423a3fb7f6a97fd807c06f92dd3e721/plotly-6.0.1-py3-none-any.whl - pypi: https://files.pythonhosted.org/packages/8e/66/7f3b121f59097c93267e7f497f10e52ced7161b38295137a12a266b6c149/protobuf-6.30.2-cp39-abi3-macosx_10_9_universal2.whl - pypi: https://files.pythonhosted.org/packages/cc/12/f9221a949f2419e2e23847303c002476c26fbcfd62dc7f3d25d0bec5ca99/pydantic-2.11.1-py3-none-any.whl @@ -302,7 +302,7 @@ environments: - pypi: https://files.pythonhosted.org/packages/8b/62/b9faa998fd185f65c1371643678e4d58254add437edb764a08c5a98fb986/PyYAML-6.0.2-cp311-cp311-macosx_11_0_arm64.whl - pypi: https://files.pythonhosted.org/packages/c5/1b/f0e4d13e6adf866ce9b069e191f303a30ab1277e037037a365c3aad5cc9c/regex-2024.11.6-cp311-cp311-macosx_11_0_arm64.whl - pypi: https://files.pythonhosted.org/packages/f9/9b/335f9764261e915ed497fcdeb11df5dfd6f7bf257d4a6a2a686d80da4d54/requests-2.32.3-py3-none-any.whl - - pypi: https://files.pythonhosted.org/packages/19/71/39c7c0d87f8d4e6c020a393182060eaefeeae6c01dab6a84ec346f2567df/rich-13.9.4-py3-none-any.whl + - pypi: https://files.pythonhosted.org/packages/0d/9b/63f4c7ebc259242c89b3acafdb37b41d1185c07ff0011164674e9076b491/rich-14.0.0-py3-none-any.whl - pypi: https://files.pythonhosted.org/packages/fc/3e/d3f13619e1d152c7b600a38c1a035e833e794c6625c9a6cea6f63dbf3af4/ruff-0.11.2-py3-none-macosx_11_0_arm64.whl - pypi: https://files.pythonhosted.org/packages/4d/c0/1108ad9f01567f66b3154063605b350b69c3c9366732e09e45f9fd0d1deb/safehttpx-0.1.6-py3-none-any.whl - pypi: https://files.pythonhosted.org/packages/b8/3b/11f1b4a2f5d2ab7da34ecc062b0bc301f2be024d110a6466726bec8c055c/safetensors-0.5.3-cp38-abi3-macosx_11_0_arm64.whl @@ -353,8 +353,8 @@ environments: - conda: https://conda.anaconda.org/conda-forge/noarch/jupyterlab_widgets-3.0.13-pyhd8ed1ab_1.conda - conda: https://conda.anaconda.org/conda-forge/win-64/krb5-1.21.3-hdf4eb48_0.conda - conda: https://conda.anaconda.org/conda-forge/win-64/libcurl-8.12.1-h88aaa65_0.conda - - conda: https://conda.anaconda.org/conda-forge/win-64/libexpat-2.6.4-he0c23c2_0.conda - - conda: https://conda.anaconda.org/conda-forge/win-64/libffi-3.4.6-h537db12_0.conda + - conda: https://conda.anaconda.org/conda-forge/win-64/libexpat-2.7.0-he0c23c2_0.conda + - conda: https://conda.anaconda.org/conda-forge/win-64/libffi-3.4.6-h537db12_1.conda - conda: https://conda.anaconda.org/conda-forge/win-64/liblzma-5.6.4-h2466b09_0.conda - conda: https://conda.anaconda.org/conda-forge/win-64/libsodium-1.0.20-hc70643c_0.conda - conda: https://conda.anaconda.org/conda-forge/win-64/libsqlite-3.49.1-h67fdade_2.conda @@ -374,7 +374,7 @@ environments: - conda: https://conda.anaconda.org/conda-forge/noarch/pygments-2.19.1-pyhd8ed1ab_0.conda - conda: https://conda.anaconda.org/conda-forge/win-64/python-3.11.11-h3f84c4b_2_cpython.conda - conda: https://conda.anaconda.org/conda-forge/noarch/python-dateutil-2.9.0.post0-pyhff2d567_1.conda - - conda: https://conda.anaconda.org/conda-forge/win-64/python_abi-3.11-5_cp311.conda + - conda: https://conda.anaconda.org/conda-forge/win-64/python_abi-3.11-6_cp311.conda - conda: https://conda.anaconda.org/conda-forge/win-64/pywin32-307-py311hda3d55a_3.conda - conda: https://conda.anaconda.org/conda-forge/win-64/pyzmq-26.3.0-py311h484c95c_0.conda - conda: https://conda.anaconda.org/conda-forge/noarch/six-1.17.0-pyhd8ed1ab_0.conda @@ -393,7 +393,7 @@ environments: - conda: https://conda.anaconda.org/conda-forge/noarch/zipp-3.21.0-pyhd8ed1ab_1.conda - conda: https://conda.anaconda.org/conda-forge/win-64/zstd-1.5.7-hbeecb71_2.conda - pypi: https://files.pythonhosted.org/packages/98/5e/34ccb5bfb8dae555045c2dd13375e01ac8e2c1f200a4e4051e95fb9addf0/absl_py-2.2.1-py3-none-any.whl - - pypi: https://files.pythonhosted.org/packages/70/83/167d4b638bb758a966828eb8d23c5e7047825edfdf768ff5f4fb01440063/accelerate-1.5.2-py3-none-any.whl + - pypi: https://files.pythonhosted.org/packages/63/b1/8198e3cdd11a426b1df2912e3381018c4a4a55368f6d0857ba3ca418ef93/accelerate-1.6.0-py3-none-any.whl - pypi: https://files.pythonhosted.org/packages/c5/19/5af6804c4cc0fed83f47bff6e413a98a36618e7d40185cd36e69737f3b0e/aiofiles-23.2.1-py3-none-any.whl - pypi: https://files.pythonhosted.org/packages/78/b6/6307fbef88d9b5ee7421e68d78a9f162e0da4900bc5f5793f6d3d0e34fb8/annotated_types-0.7.0-py3-none-any.whl - pypi: https://files.pythonhosted.org/packages/a1/ee/48ca1a7c89ffec8b6a0c5d02b89c305671d5ffd8d3c94acf8b8c408575bb/anyio-4.9.0-py3-none-any.whl @@ -411,16 +411,16 @@ environments: - pypi: https://files.pythonhosted.org/packages/4d/36/2a115987e2d8c300a974597416d9de88f2444426de9571f4b59b2cca3acc/filelock-3.18.0-py3-none-any.whl - pypi: https://files.pythonhosted.org/packages/b8/25/155f9f080d5e4bc0082edfda032ea2bc2b8fab3f4d25d46c1e9dd22a1a89/flatbuffers-25.2.10-py2.py3-none-any.whl - pypi: https://files.pythonhosted.org/packages/3b/90/4926e653041c4116ecd43e50e3c79f5daae6dcafc58ceb64bc4f71dd4924/fonttools-4.56.0-cp311-cp311-win_amd64.whl - - pypi: https://files.pythonhosted.org/packages/56/53/eb690efa8513166adef3e0669afd31e95ffde69fb3c52ec2ac7223ed6018/fsspec-2025.3.0-py3-none-any.whl + - pypi: https://files.pythonhosted.org/packages/44/4b/e0cfc1a6f17e990f3e64b7d941ddc4acdc7b19d6edd51abf495f32b1a9e4/fsspec-2025.3.2-py3-none-any.whl - pypi: https://files.pythonhosted.org/packages/54/70/e07c381e6488a77094f04c85c9caf1c8008cdc30778f7019bc52e5285ef0/gdown-5.2.0-py3-none-any.whl - - pypi: https://files.pythonhosted.org/packages/dd/9a/23df7fafbe5201d811a60acb3869a3e21cf2d813eed47b2a3ad7d649b905/gradio-5.23.1-py3-none-any.whl + - pypi: https://files.pythonhosted.org/packages/5f/e3/f1e6569558d212f747820b2c976b4f57c034413a5747acb373f2a04bcd9a/gradio-5.23.3-py3-none-any.whl - pypi: https://files.pythonhosted.org/packages/15/c8/0df7f92c8f1bdf5c244c29de8cd7e33a5931768ddba245526a770bfa18a2/gradio_client-1.8.0-py3-none-any.whl - pypi: https://files.pythonhosted.org/packages/28/27/3d6dcadc8a3214d8522c1e7f6a19554e33659be44546d44a2f7572ac7d2a/groovy-0.1.2-py3-none-any.whl - pypi: https://files.pythonhosted.org/packages/ee/3f/cf92e7e62ccb8dbdf977499547dfc27133124d6467d3a7d23775bcecb0f9/grpcio-1.71.0-cp311-cp311-win_amd64.whl - pypi: https://files.pythonhosted.org/packages/95/04/ff642e65ad6b90db43e668d70ffb6736436c7ce41fcc549f4e9472234127/h11-0.14.0-py3-none-any.whl - pypi: https://files.pythonhosted.org/packages/87/f5/72347bc88306acb359581ac4d52f23c0ef445b57157adedb9aee0cd689d2/httpcore-1.0.7-py3-none-any.whl - pypi: https://files.pythonhosted.org/packages/2a/39/e50c7c3a983047577ee07d2a9e53faf5a69493943ec3f6a384bdc792deb2/httpx-0.28.1-py3-none-any.whl - - pypi: https://files.pythonhosted.org/packages/40/0c/37d380846a2e5c9a3c6a73d26ffbcfdcad5fc3eacf42fdf7cff56f2af634/huggingface_hub-0.29.3-py3-none-any.whl + - pypi: https://files.pythonhosted.org/packages/99/e3/2232d0e726d4d6ea69643b9593d97d0e7e6ea69c2fe9ed5de34d476c1c47/huggingface_hub-0.30.1-py3-none-any.whl - pypi: https://files.pythonhosted.org/packages/f0/0f/310fb31e39e2d734ccaa2c0fb981ee41f7bd5056ce9bc29b2248bd569169/humanfriendly-10.0-py2.py3-none-any.whl - pypi: https://files.pythonhosted.org/packages/76/c6/c88e154df9c4e1a2a66ccf0005a88dfb2650c1dffb6f5ce603dfbd452ce3/idna-3.10-py3-none-any.whl - pypi: https://files.pythonhosted.org/packages/62/a1/3d680cbfd5f4b8f15abc1d571870c5fc3e594bb582bc3b64ea099db13e56/jinja2-3.1.6-py3-none-any.whl @@ -432,7 +432,7 @@ environments: - pypi: https://files.pythonhosted.org/packages/d7/68/0d03098b3feb786cbd494df0aac15b571effda7f7cbdec267e8a8d398c16/matplotlib-3.10.1-cp311-cp311-win_amd64.whl - pypi: https://files.pythonhosted.org/packages/b3/38/89ba8ad64ae25be8de66a6d463314cf1eb366222074cfda9ee839c56a4b4/mdurl-0.1.2-py3-none-any.whl - pypi: https://files.pythonhosted.org/packages/43/e3/7d92a15f894aa0c9c4b49b8ee9ac9850d6e63b03c9c32c0367a13ae62209/mpmath-1.3.0-py3-none-any.whl - - pypi: https://files.pythonhosted.org/packages/e9/96/79a6168dc7a5098066e097c01a45d01608c8df6552dfb92a2676ce623186/narwhals-1.32.0-py3-none-any.whl + - pypi: https://files.pythonhosted.org/packages/41/c1/e9bc6b67c774e7c1f939c91ea535f18f7644fedc61b20d6baa861ad52b34/narwhals-1.33.0-py3-none-any.whl - pypi: https://files.pythonhosted.org/packages/b9/54/dd730b32ea14ea797530a4479b2ed46a6fb250f682a9cfb997e968bf0261/networkx-3.4.2-py3-none-any.whl - pypi: https://files.pythonhosted.org/packages/8b/72/10c1d2d82101c468a28adc35de6c77b308f288cfd0b88e1070f15b98e00c/numpy-2.2.4-cp311-cp311-win_amd64.whl - pypi: https://files.pythonhosted.org/packages/51/a5/19b0dfcb567b62e7adf1a21b08b23224f0c2d13842aee4d0abc6f07f9cf5/onnx-1.17.0-cp311-cp311-win_amd64.whl @@ -441,7 +441,7 @@ environments: - pypi: https://files.pythonhosted.org/packages/a4/7d/f1c30a92854540bf789e9cd5dde7ef49bbe63f855b85a2e6b3db8135c591/opencv_python-4.11.0.86-cp37-abi3-win_amd64.whl - pypi: https://files.pythonhosted.org/packages/f4/22/5e8217c48d68c0adbfb181e749d6a733761074e598b083c69a1383d18147/orjson-3.10.16-cp311-cp311-win_amd64.whl - pypi: https://files.pythonhosted.org/packages/ed/8c/87ddf1fcb55d11f9f847e3c69bb1c6f8e46e2f40ab1a2d2abadb2401b007/pandas-2.2.3-cp311-cp311-win_amd64.whl - - pypi: https://files.pythonhosted.org/packages/3a/c6/fce9255272bcf0c39e15abd2f8fd8429a954cf344469eaceb9d0d1366913/pillow-11.1.0-cp311-cp311-win_amd64.whl + - pypi: https://files.pythonhosted.org/packages/75/c9/2b0e8350b812825a43b53a1f49823e933514dbd2c7c6303daec7b28d7b57/pillow-11.2.0-cp311-cp311-win_amd64.whl - pypi: https://files.pythonhosted.org/packages/02/65/ad2bc85f7377f5cfba5d4466d5474423a3fb7f6a97fd807c06f92dd3e721/plotly-6.0.1-py3-none-any.whl - pypi: https://files.pythonhosted.org/packages/97/e9/7b9f1b259d509aef2b833c29a1f3c39185e2bf21c9c1be1cd11c22cb2149/protobuf-6.30.2-cp310-abi3-win_amd64.whl - pypi: https://files.pythonhosted.org/packages/cc/12/f9221a949f2419e2e23847303c002476c26fbcfd62dc7f3d25d0bec5ca99/pydantic-2.11.1-py3-none-any.whl @@ -455,7 +455,7 @@ environments: - pypi: https://files.pythonhosted.org/packages/ed/23/8da0bbe2ab9dcdd11f4f4557ccaf95c10b9811b13ecced089d43ce59c3c8/PyYAML-6.0.2-cp311-cp311-win_amd64.whl - pypi: https://files.pythonhosted.org/packages/80/32/763a6cc01d21fb3819227a1cc3f60fd251c13c37c27a73b8ff4315433a8e/regex-2024.11.6-cp311-cp311-win_amd64.whl - pypi: https://files.pythonhosted.org/packages/f9/9b/335f9764261e915ed497fcdeb11df5dfd6f7bf257d4a6a2a686d80da4d54/requests-2.32.3-py3-none-any.whl - - pypi: https://files.pythonhosted.org/packages/19/71/39c7c0d87f8d4e6c020a393182060eaefeeae6c01dab6a84ec346f2567df/rich-13.9.4-py3-none-any.whl + - pypi: https://files.pythonhosted.org/packages/0d/9b/63f4c7ebc259242c89b3acafdb37b41d1185c07ff0011164674e9076b491/rich-14.0.0-py3-none-any.whl - pypi: https://files.pythonhosted.org/packages/86/54/3c12d3af58012a5e2cd7ebdbe9983f4834af3f8cbea0e8a8c74fa1e23b2b/ruff-0.11.2-py3-none-win_amd64.whl - pypi: https://files.pythonhosted.org/packages/4d/c0/1108ad9f01567f66b3154063605b350b69c3c9366732e09e45f9fd0d1deb/safehttpx-0.1.6-py3-none-any.whl - pypi: https://files.pythonhosted.org/packages/69/e2/b011c38e5394c4c18fb5500778a55ec43ad6106126e74723ffaee246f56e/safetensors-0.5.3-cp38-abi3-win_amd64.whl @@ -518,8 +518,8 @@ environments: - conda: https://conda.anaconda.org/conda-forge/linux-64/libcurl-8.12.1-h332b0f4_0.conda - conda: https://conda.anaconda.org/conda-forge/linux-64/libedit-3.1.20250104-pl5321h7949ede_0.conda - conda: https://conda.anaconda.org/conda-forge/linux-64/libev-4.33-hd590300_2.conda - - conda: https://conda.anaconda.org/conda-forge/linux-64/libexpat-2.6.4-h5888daf_0.conda - - conda: https://conda.anaconda.org/conda-forge/linux-64/libffi-3.4.6-h2dba641_0.conda + - conda: https://conda.anaconda.org/conda-forge/linux-64/libexpat-2.7.0-h5888daf_0.conda + - conda: https://conda.anaconda.org/conda-forge/linux-64/libffi-3.4.6-h2dba641_1.conda - conda: https://conda.anaconda.org/conda-forge/linux-64/libgcc-14.2.0-h767d61c_2.conda - conda: https://conda.anaconda.org/conda-forge/linux-64/libgcc-ng-14.2.0-h69a702a_2.conda - conda: https://conda.anaconda.org/conda-forge/linux-64/libgomp-14.2.0-h767d61c_2.conda @@ -551,7 +551,7 @@ environments: - conda: https://conda.anaconda.org/conda-forge/noarch/pygments-2.19.1-pyhd8ed1ab_0.conda - conda: https://conda.anaconda.org/conda-forge/linux-64/python-3.11.11-h9e4cc4f_2_cpython.conda - conda: https://conda.anaconda.org/conda-forge/noarch/python-dateutil-2.9.0.post0-pyhff2d567_1.conda - - conda: https://conda.anaconda.org/conda-forge/linux-64/python_abi-3.11-5_cp311.conda + - conda: https://conda.anaconda.org/conda-forge/linux-64/python_abi-3.11-6_cp311.conda - conda: https://conda.anaconda.org/conda-forge/linux-64/pyzmq-26.3.0-py311h7deb3e3_0.conda - conda: https://conda.anaconda.org/conda-forge/linux-64/readline-8.2-h8c095d6_2.conda - conda: https://conda.anaconda.org/conda-forge/linux-64/rhash-1.4.5-hb9d3cd8_0.conda @@ -568,7 +568,7 @@ environments: - conda: https://conda.anaconda.org/conda-forge/noarch/zipp-3.21.0-pyhd8ed1ab_1.conda - conda: https://conda.anaconda.org/conda-forge/linux-64/zstd-1.5.7-hb8e6e7a_2.conda - pypi: https://files.pythonhosted.org/packages/98/5e/34ccb5bfb8dae555045c2dd13375e01ac8e2c1f200a4e4051e95fb9addf0/absl_py-2.2.1-py3-none-any.whl - - pypi: https://files.pythonhosted.org/packages/70/83/167d4b638bb758a966828eb8d23c5e7047825edfdf768ff5f4fb01440063/accelerate-1.5.2-py3-none-any.whl + - pypi: https://files.pythonhosted.org/packages/63/b1/8198e3cdd11a426b1df2912e3381018c4a4a55368f6d0857ba3ca418ef93/accelerate-1.6.0-py3-none-any.whl - pypi: https://files.pythonhosted.org/packages/c5/19/5af6804c4cc0fed83f47bff6e413a98a36618e7d40185cd36e69737f3b0e/aiofiles-23.2.1-py3-none-any.whl - pypi: https://files.pythonhosted.org/packages/78/b6/6307fbef88d9b5ee7421e68d78a9f162e0da4900bc5f5793f6d3d0e34fb8/annotated_types-0.7.0-py3-none-any.whl - pypi: https://files.pythonhosted.org/packages/a1/ee/48ca1a7c89ffec8b6a0c5d02b89c305671d5ffd8d3c94acf8b8c408575bb/anyio-4.9.0-py3-none-any.whl @@ -586,16 +586,16 @@ environments: - pypi: https://files.pythonhosted.org/packages/4d/36/2a115987e2d8c300a974597416d9de88f2444426de9571f4b59b2cca3acc/filelock-3.18.0-py3-none-any.whl - pypi: https://files.pythonhosted.org/packages/b8/25/155f9f080d5e4bc0082edfda032ea2bc2b8fab3f4d25d46c1e9dd22a1a89/flatbuffers-25.2.10-py2.py3-none-any.whl - pypi: https://files.pythonhosted.org/packages/28/e9/47c02d5a7027e8ed841ab6a10ca00c93dadd5f16742f1af1fa3f9978adf4/fonttools-4.56.0-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl - - pypi: https://files.pythonhosted.org/packages/56/53/eb690efa8513166adef3e0669afd31e95ffde69fb3c52ec2ac7223ed6018/fsspec-2025.3.0-py3-none-any.whl + - pypi: https://files.pythonhosted.org/packages/44/4b/e0cfc1a6f17e990f3e64b7d941ddc4acdc7b19d6edd51abf495f32b1a9e4/fsspec-2025.3.2-py3-none-any.whl - pypi: https://files.pythonhosted.org/packages/54/70/e07c381e6488a77094f04c85c9caf1c8008cdc30778f7019bc52e5285ef0/gdown-5.2.0-py3-none-any.whl - - pypi: https://files.pythonhosted.org/packages/dd/9a/23df7fafbe5201d811a60acb3869a3e21cf2d813eed47b2a3ad7d649b905/gradio-5.23.1-py3-none-any.whl + - pypi: https://files.pythonhosted.org/packages/5f/e3/f1e6569558d212f747820b2c976b4f57c034413a5747acb373f2a04bcd9a/gradio-5.23.3-py3-none-any.whl - pypi: https://files.pythonhosted.org/packages/15/c8/0df7f92c8f1bdf5c244c29de8cd7e33a5931768ddba245526a770bfa18a2/gradio_client-1.8.0-py3-none-any.whl - pypi: https://files.pythonhosted.org/packages/28/27/3d6dcadc8a3214d8522c1e7f6a19554e33659be44546d44a2f7572ac7d2a/groovy-0.1.2-py3-none-any.whl - pypi: https://files.pythonhosted.org/packages/c7/67/cbd63c485051eb78663355d9efd1b896cfb50d4a220581ec2cb9a15cd750/grpcio-1.71.0-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl - pypi: https://files.pythonhosted.org/packages/95/04/ff642e65ad6b90db43e668d70ffb6736436c7ce41fcc549f4e9472234127/h11-0.14.0-py3-none-any.whl - pypi: https://files.pythonhosted.org/packages/87/f5/72347bc88306acb359581ac4d52f23c0ef445b57157adedb9aee0cd689d2/httpcore-1.0.7-py3-none-any.whl - pypi: https://files.pythonhosted.org/packages/2a/39/e50c7c3a983047577ee07d2a9e53faf5a69493943ec3f6a384bdc792deb2/httpx-0.28.1-py3-none-any.whl - - pypi: https://files.pythonhosted.org/packages/40/0c/37d380846a2e5c9a3c6a73d26ffbcfdcad5fc3eacf42fdf7cff56f2af634/huggingface_hub-0.29.3-py3-none-any.whl + - pypi: https://files.pythonhosted.org/packages/99/e3/2232d0e726d4d6ea69643b9593d97d0e7e6ea69c2fe9ed5de34d476c1c47/huggingface_hub-0.30.1-py3-none-any.whl - pypi: https://files.pythonhosted.org/packages/f0/0f/310fb31e39e2d734ccaa2c0fb981ee41f7bd5056ce9bc29b2248bd569169/humanfriendly-10.0-py2.py3-none-any.whl - pypi: https://files.pythonhosted.org/packages/76/c6/c88e154df9c4e1a2a66ccf0005a88dfb2650c1dffb6f5ce603dfbd452ce3/idna-3.10-py3-none-any.whl - pypi: https://files.pythonhosted.org/packages/62/a1/3d680cbfd5f4b8f15abc1d571870c5fc3e594bb582bc3b64ea099db13e56/jinja2-3.1.6-py3-none-any.whl @@ -607,7 +607,7 @@ environments: - pypi: https://files.pythonhosted.org/packages/40/b8/53fa08a5eaf78d3a7213fd6da1feec4bae14a81d9805e567013811ff0e85/matplotlib-3.10.1-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl - pypi: https://files.pythonhosted.org/packages/b3/38/89ba8ad64ae25be8de66a6d463314cf1eb366222074cfda9ee839c56a4b4/mdurl-0.1.2-py3-none-any.whl - pypi: https://files.pythonhosted.org/packages/43/e3/7d92a15f894aa0c9c4b49b8ee9ac9850d6e63b03c9c32c0367a13ae62209/mpmath-1.3.0-py3-none-any.whl - - pypi: https://files.pythonhosted.org/packages/e9/96/79a6168dc7a5098066e097c01a45d01608c8df6552dfb92a2676ce623186/narwhals-1.32.0-py3-none-any.whl + - pypi: https://files.pythonhosted.org/packages/41/c1/e9bc6b67c774e7c1f939c91ea535f18f7644fedc61b20d6baa861ad52b34/narwhals-1.33.0-py3-none-any.whl - pypi: https://files.pythonhosted.org/packages/b9/54/dd730b32ea14ea797530a4479b2ed46a6fb250f682a9cfb997e968bf0261/networkx-3.4.2-py3-none-any.whl - pypi: https://files.pythonhosted.org/packages/c5/5c/ceefca458559f0ccc7a982319f37ed07b0d7b526964ae6cc61f8ad1b6119/numpy-2.2.4-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl - pypi: https://files.pythonhosted.org/packages/ae/71/1c91302526c45ab494c23f61c7a84aa568b8c1f9d196efa5993957faf906/nvidia_cublas_cu12-12.4.5.8-py3-none-manylinux2014_x86_64.whl @@ -629,7 +629,7 @@ environments: - pypi: https://files.pythonhosted.org/packages/2c/8b/90eb44a40476fa0e71e05a0283947cfd74a5d36121a11d926ad6f3193cc4/opencv_python-4.11.0.86-cp37-abi3-manylinux_2_17_x86_64.manylinux2014_x86_64.whl - pypi: https://files.pythonhosted.org/packages/92/18/5b1e1e995bffad49dc4311a0bdfd874bc6f135fd20f0e1f671adc2c9910e/orjson-3.10.16-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl - pypi: https://files.pythonhosted.org/packages/cd/5f/4dba1d39bb9c38d574a9a22548c540177f78ea47b32f99c0ff2ec499fac5/pandas-2.2.3-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl - - pypi: https://files.pythonhosted.org/packages/48/a4/fbfe9d5581d7b111b28f1d8c2762dee92e9821bb209af9fa83c940e507a0/pillow-11.1.0-cp311-cp311-manylinux_2_28_x86_64.whl + - pypi: https://files.pythonhosted.org/packages/90/c5/d4eaed054f75a3ffbd127190200b26c949acda13794aef6cf0ab91f2fb00/pillow-11.2.0-cp311-cp311-manylinux_2_28_x86_64.whl - pypi: https://files.pythonhosted.org/packages/02/65/ad2bc85f7377f5cfba5d4466d5474423a3fb7f6a97fd807c06f92dd3e721/plotly-6.0.1-py3-none-any.whl - pypi: https://files.pythonhosted.org/packages/28/50/1925de813499546bc8ab3ae857e3ec84efe7d2f19b34529d0c7c3d02d11d/protobuf-6.30.2-cp39-abi3-manylinux2014_x86_64.whl - pypi: https://files.pythonhosted.org/packages/cc/12/f9221a949f2419e2e23847303c002476c26fbcfd62dc7f3d25d0bec5ca99/pydantic-2.11.1-py3-none-any.whl @@ -642,7 +642,7 @@ environments: - pypi: https://files.pythonhosted.org/packages/75/e4/2c27590dfc9992f73aabbeb9241ae20220bd9452df27483b6e56d3975cc5/PyYAML-6.0.2-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl - pypi: https://files.pythonhosted.org/packages/bf/ce/0d0e61429f603bac433910d99ef1a02ce45a8967ffbe3cbee48599e62d88/regex-2024.11.6-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl - pypi: https://files.pythonhosted.org/packages/f9/9b/335f9764261e915ed497fcdeb11df5dfd6f7bf257d4a6a2a686d80da4d54/requests-2.32.3-py3-none-any.whl - - pypi: https://files.pythonhosted.org/packages/19/71/39c7c0d87f8d4e6c020a393182060eaefeeae6c01dab6a84ec346f2567df/rich-13.9.4-py3-none-any.whl + - pypi: https://files.pythonhosted.org/packages/0d/9b/63f4c7ebc259242c89b3acafdb37b41d1185c07ff0011164674e9076b491/rich-14.0.0-py3-none-any.whl - pypi: https://files.pythonhosted.org/packages/ce/d2/4ceed7147e05852876f3b5f3fdc23f878ce2b7e0b90dd6e698bda3d20787/ruff-0.11.2-py3-none-manylinux_2_17_x86_64.manylinux2014_x86_64.whl - pypi: https://files.pythonhosted.org/packages/4d/c0/1108ad9f01567f66b3154063605b350b69c3c9366732e09e45f9fd0d1deb/safehttpx-0.1.6-py3-none-any.whl - pypi: https://files.pythonhosted.org/packages/a6/f8/dae3421624fcc87a89d42e1898a798bc7ff72c61f38973a65d60df8f124c/safetensors-0.5.3-cp38-abi3-manylinux_2_17_x86_64.manylinux2014_x86_64.whl @@ -698,8 +698,8 @@ environments: - conda: https://conda.anaconda.org/conda-forge/noarch/jupyterlab_widgets-3.0.13-pyhd8ed1ab_1.conda - conda: https://conda.anaconda.org/conda-forge/win-64/krb5-1.21.3-hdf4eb48_0.conda - conda: https://conda.anaconda.org/conda-forge/win-64/libcurl-8.12.1-h88aaa65_0.conda - - conda: https://conda.anaconda.org/conda-forge/win-64/libexpat-2.6.4-he0c23c2_0.conda - - conda: https://conda.anaconda.org/conda-forge/win-64/libffi-3.4.6-h537db12_0.conda + - conda: https://conda.anaconda.org/conda-forge/win-64/libexpat-2.7.0-he0c23c2_0.conda + - conda: https://conda.anaconda.org/conda-forge/win-64/libffi-3.4.6-h537db12_1.conda - conda: https://conda.anaconda.org/conda-forge/win-64/liblzma-5.6.4-h2466b09_0.conda - conda: https://conda.anaconda.org/conda-forge/win-64/libsodium-1.0.20-hc70643c_0.conda - conda: https://conda.anaconda.org/conda-forge/win-64/libsqlite-3.49.1-h67fdade_2.conda @@ -719,7 +719,7 @@ environments: - conda: https://conda.anaconda.org/conda-forge/noarch/pygments-2.19.1-pyhd8ed1ab_0.conda - conda: https://conda.anaconda.org/conda-forge/win-64/python-3.11.11-h3f84c4b_2_cpython.conda - conda: https://conda.anaconda.org/conda-forge/noarch/python-dateutil-2.9.0.post0-pyhff2d567_1.conda - - conda: https://conda.anaconda.org/conda-forge/win-64/python_abi-3.11-5_cp311.conda + - conda: https://conda.anaconda.org/conda-forge/win-64/python_abi-3.11-6_cp311.conda - conda: https://conda.anaconda.org/conda-forge/win-64/pywin32-307-py311hda3d55a_3.conda - conda: https://conda.anaconda.org/conda-forge/win-64/pyzmq-26.3.0-py311h484c95c_0.conda - conda: https://conda.anaconda.org/conda-forge/noarch/six-1.17.0-pyhd8ed1ab_0.conda @@ -738,7 +738,7 @@ environments: - conda: https://conda.anaconda.org/conda-forge/noarch/zipp-3.21.0-pyhd8ed1ab_1.conda - conda: https://conda.anaconda.org/conda-forge/win-64/zstd-1.5.7-hbeecb71_2.conda - pypi: https://files.pythonhosted.org/packages/98/5e/34ccb5bfb8dae555045c2dd13375e01ac8e2c1f200a4e4051e95fb9addf0/absl_py-2.2.1-py3-none-any.whl - - pypi: https://files.pythonhosted.org/packages/70/83/167d4b638bb758a966828eb8d23c5e7047825edfdf768ff5f4fb01440063/accelerate-1.5.2-py3-none-any.whl + - pypi: https://files.pythonhosted.org/packages/63/b1/8198e3cdd11a426b1df2912e3381018c4a4a55368f6d0857ba3ca418ef93/accelerate-1.6.0-py3-none-any.whl - pypi: https://files.pythonhosted.org/packages/c5/19/5af6804c4cc0fed83f47bff6e413a98a36618e7d40185cd36e69737f3b0e/aiofiles-23.2.1-py3-none-any.whl - pypi: https://files.pythonhosted.org/packages/78/b6/6307fbef88d9b5ee7421e68d78a9f162e0da4900bc5f5793f6d3d0e34fb8/annotated_types-0.7.0-py3-none-any.whl - pypi: https://files.pythonhosted.org/packages/a1/ee/48ca1a7c89ffec8b6a0c5d02b89c305671d5ffd8d3c94acf8b8c408575bb/anyio-4.9.0-py3-none-any.whl @@ -756,16 +756,16 @@ environments: - pypi: https://files.pythonhosted.org/packages/4d/36/2a115987e2d8c300a974597416d9de88f2444426de9571f4b59b2cca3acc/filelock-3.18.0-py3-none-any.whl - pypi: https://files.pythonhosted.org/packages/b8/25/155f9f080d5e4bc0082edfda032ea2bc2b8fab3f4d25d46c1e9dd22a1a89/flatbuffers-25.2.10-py2.py3-none-any.whl - pypi: https://files.pythonhosted.org/packages/3b/90/4926e653041c4116ecd43e50e3c79f5daae6dcafc58ceb64bc4f71dd4924/fonttools-4.56.0-cp311-cp311-win_amd64.whl - - pypi: https://files.pythonhosted.org/packages/56/53/eb690efa8513166adef3e0669afd31e95ffde69fb3c52ec2ac7223ed6018/fsspec-2025.3.0-py3-none-any.whl + - pypi: https://files.pythonhosted.org/packages/44/4b/e0cfc1a6f17e990f3e64b7d941ddc4acdc7b19d6edd51abf495f32b1a9e4/fsspec-2025.3.2-py3-none-any.whl - pypi: https://files.pythonhosted.org/packages/54/70/e07c381e6488a77094f04c85c9caf1c8008cdc30778f7019bc52e5285ef0/gdown-5.2.0-py3-none-any.whl - - pypi: https://files.pythonhosted.org/packages/dd/9a/23df7fafbe5201d811a60acb3869a3e21cf2d813eed47b2a3ad7d649b905/gradio-5.23.1-py3-none-any.whl + - pypi: https://files.pythonhosted.org/packages/5f/e3/f1e6569558d212f747820b2c976b4f57c034413a5747acb373f2a04bcd9a/gradio-5.23.3-py3-none-any.whl - pypi: https://files.pythonhosted.org/packages/15/c8/0df7f92c8f1bdf5c244c29de8cd7e33a5931768ddba245526a770bfa18a2/gradio_client-1.8.0-py3-none-any.whl - pypi: https://files.pythonhosted.org/packages/28/27/3d6dcadc8a3214d8522c1e7f6a19554e33659be44546d44a2f7572ac7d2a/groovy-0.1.2-py3-none-any.whl - pypi: https://files.pythonhosted.org/packages/ee/3f/cf92e7e62ccb8dbdf977499547dfc27133124d6467d3a7d23775bcecb0f9/grpcio-1.71.0-cp311-cp311-win_amd64.whl - pypi: https://files.pythonhosted.org/packages/95/04/ff642e65ad6b90db43e668d70ffb6736436c7ce41fcc549f4e9472234127/h11-0.14.0-py3-none-any.whl - pypi: https://files.pythonhosted.org/packages/87/f5/72347bc88306acb359581ac4d52f23c0ef445b57157adedb9aee0cd689d2/httpcore-1.0.7-py3-none-any.whl - pypi: https://files.pythonhosted.org/packages/2a/39/e50c7c3a983047577ee07d2a9e53faf5a69493943ec3f6a384bdc792deb2/httpx-0.28.1-py3-none-any.whl - - pypi: https://files.pythonhosted.org/packages/40/0c/37d380846a2e5c9a3c6a73d26ffbcfdcad5fc3eacf42fdf7cff56f2af634/huggingface_hub-0.29.3-py3-none-any.whl + - pypi: https://files.pythonhosted.org/packages/99/e3/2232d0e726d4d6ea69643b9593d97d0e7e6ea69c2fe9ed5de34d476c1c47/huggingface_hub-0.30.1-py3-none-any.whl - pypi: https://files.pythonhosted.org/packages/f0/0f/310fb31e39e2d734ccaa2c0fb981ee41f7bd5056ce9bc29b2248bd569169/humanfriendly-10.0-py2.py3-none-any.whl - pypi: https://files.pythonhosted.org/packages/76/c6/c88e154df9c4e1a2a66ccf0005a88dfb2650c1dffb6f5ce603dfbd452ce3/idna-3.10-py3-none-any.whl - pypi: https://files.pythonhosted.org/packages/62/a1/3d680cbfd5f4b8f15abc1d571870c5fc3e594bb582bc3b64ea099db13e56/jinja2-3.1.6-py3-none-any.whl @@ -777,7 +777,7 @@ environments: - pypi: https://files.pythonhosted.org/packages/d7/68/0d03098b3feb786cbd494df0aac15b571effda7f7cbdec267e8a8d398c16/matplotlib-3.10.1-cp311-cp311-win_amd64.whl - pypi: https://files.pythonhosted.org/packages/b3/38/89ba8ad64ae25be8de66a6d463314cf1eb366222074cfda9ee839c56a4b4/mdurl-0.1.2-py3-none-any.whl - pypi: https://files.pythonhosted.org/packages/43/e3/7d92a15f894aa0c9c4b49b8ee9ac9850d6e63b03c9c32c0367a13ae62209/mpmath-1.3.0-py3-none-any.whl - - pypi: https://files.pythonhosted.org/packages/e9/96/79a6168dc7a5098066e097c01a45d01608c8df6552dfb92a2676ce623186/narwhals-1.32.0-py3-none-any.whl + - pypi: https://files.pythonhosted.org/packages/41/c1/e9bc6b67c774e7c1f939c91ea535f18f7644fedc61b20d6baa861ad52b34/narwhals-1.33.0-py3-none-any.whl - pypi: https://files.pythonhosted.org/packages/b9/54/dd730b32ea14ea797530a4479b2ed46a6fb250f682a9cfb997e968bf0261/networkx-3.4.2-py3-none-any.whl - pypi: https://files.pythonhosted.org/packages/8b/72/10c1d2d82101c468a28adc35de6c77b308f288cfd0b88e1070f15b98e00c/numpy-2.2.4-cp311-cp311-win_amd64.whl - pypi: https://files.pythonhosted.org/packages/30/a5/a515b7600ad361ea14bfa13fb4d6687abf500adc270f19e89849c0590492/nvidia_cuda_runtime_cu12-12.8.90-py3-none-win_amd64.whl @@ -787,7 +787,7 @@ environments: - pypi: https://files.pythonhosted.org/packages/a4/7d/f1c30a92854540bf789e9cd5dde7ef49bbe63f855b85a2e6b3db8135c591/opencv_python-4.11.0.86-cp37-abi3-win_amd64.whl - pypi: https://files.pythonhosted.org/packages/f4/22/5e8217c48d68c0adbfb181e749d6a733761074e598b083c69a1383d18147/orjson-3.10.16-cp311-cp311-win_amd64.whl - pypi: https://files.pythonhosted.org/packages/ed/8c/87ddf1fcb55d11f9f847e3c69bb1c6f8e46e2f40ab1a2d2abadb2401b007/pandas-2.2.3-cp311-cp311-win_amd64.whl - - pypi: https://files.pythonhosted.org/packages/3a/c6/fce9255272bcf0c39e15abd2f8fd8429a954cf344469eaceb9d0d1366913/pillow-11.1.0-cp311-cp311-win_amd64.whl + - pypi: https://files.pythonhosted.org/packages/75/c9/2b0e8350b812825a43b53a1f49823e933514dbd2c7c6303daec7b28d7b57/pillow-11.2.0-cp311-cp311-win_amd64.whl - pypi: https://files.pythonhosted.org/packages/02/65/ad2bc85f7377f5cfba5d4466d5474423a3fb7f6a97fd807c06f92dd3e721/plotly-6.0.1-py3-none-any.whl - pypi: https://files.pythonhosted.org/packages/97/e9/7b9f1b259d509aef2b833c29a1f3c39185e2bf21c9c1be1cd11c22cb2149/protobuf-6.30.2-cp310-abi3-win_amd64.whl - pypi: https://files.pythonhosted.org/packages/cc/12/f9221a949f2419e2e23847303c002476c26fbcfd62dc7f3d25d0bec5ca99/pydantic-2.11.1-py3-none-any.whl @@ -801,7 +801,7 @@ environments: - pypi: https://files.pythonhosted.org/packages/ed/23/8da0bbe2ab9dcdd11f4f4557ccaf95c10b9811b13ecced089d43ce59c3c8/PyYAML-6.0.2-cp311-cp311-win_amd64.whl - pypi: https://files.pythonhosted.org/packages/80/32/763a6cc01d21fb3819227a1cc3f60fd251c13c37c27a73b8ff4315433a8e/regex-2024.11.6-cp311-cp311-win_amd64.whl - pypi: https://files.pythonhosted.org/packages/f9/9b/335f9764261e915ed497fcdeb11df5dfd6f7bf257d4a6a2a686d80da4d54/requests-2.32.3-py3-none-any.whl - - pypi: https://files.pythonhosted.org/packages/19/71/39c7c0d87f8d4e6c020a393182060eaefeeae6c01dab6a84ec346f2567df/rich-13.9.4-py3-none-any.whl + - pypi: https://files.pythonhosted.org/packages/0d/9b/63f4c7ebc259242c89b3acafdb37b41d1185c07ff0011164674e9076b491/rich-14.0.0-py3-none-any.whl - pypi: https://files.pythonhosted.org/packages/86/54/3c12d3af58012a5e2cd7ebdbe9983f4834af3f8cbea0e8a8c74fa1e23b2b/ruff-0.11.2-py3-none-win_amd64.whl - pypi: https://files.pythonhosted.org/packages/4d/c0/1108ad9f01567f66b3154063605b350b69c3c9366732e09e45f9fd0d1deb/safehttpx-0.1.6-py3-none-any.whl - pypi: https://files.pythonhosted.org/packages/69/e2/b011c38e5394c4c18fb5500778a55ec43ad6106126e74723ffaee246f56e/safetensors-0.5.3-cp38-abi3-win_amd64.whl @@ -868,8 +868,8 @@ environments: - conda: https://conda.anaconda.org/conda-forge/linux-64/libcurl-8.12.1-h332b0f4_0.conda - conda: https://conda.anaconda.org/conda-forge/linux-64/libedit-3.1.20250104-pl5321h7949ede_0.conda - conda: https://conda.anaconda.org/conda-forge/linux-64/libev-4.33-hd590300_2.conda - - conda: https://conda.anaconda.org/conda-forge/linux-64/libexpat-2.6.4-h5888daf_0.conda - - conda: https://conda.anaconda.org/conda-forge/linux-64/libffi-3.4.6-h2dba641_0.conda + - conda: https://conda.anaconda.org/conda-forge/linux-64/libexpat-2.7.0-h5888daf_0.conda + - conda: https://conda.anaconda.org/conda-forge/linux-64/libffi-3.4.6-h2dba641_1.conda - conda: https://conda.anaconda.org/conda-forge/linux-64/libgcc-14.2.0-h767d61c_2.conda - conda: https://conda.anaconda.org/conda-forge/linux-64/libgcc-ng-14.2.0-h69a702a_2.conda - conda: https://conda.anaconda.org/conda-forge/linux-64/libgomp-14.2.0-h767d61c_2.conda @@ -901,7 +901,7 @@ environments: - conda: https://conda.anaconda.org/conda-forge/noarch/pygments-2.19.1-pyhd8ed1ab_0.conda - conda: https://conda.anaconda.org/conda-forge/linux-64/python-3.11.11-h9e4cc4f_2_cpython.conda - conda: https://conda.anaconda.org/conda-forge/noarch/python-dateutil-2.9.0.post0-pyhff2d567_1.conda - - conda: https://conda.anaconda.org/conda-forge/linux-64/python_abi-3.11-5_cp311.conda + - conda: https://conda.anaconda.org/conda-forge/linux-64/python_abi-3.11-6_cp311.conda - conda: https://conda.anaconda.org/conda-forge/linux-64/pyzmq-26.3.0-py311h7deb3e3_0.conda - conda: https://conda.anaconda.org/conda-forge/linux-64/readline-8.2-h8c095d6_2.conda - conda: https://conda.anaconda.org/conda-forge/linux-64/rhash-1.4.5-hb9d3cd8_0.conda @@ -918,7 +918,7 @@ environments: - conda: https://conda.anaconda.org/conda-forge/noarch/zipp-3.21.0-pyhd8ed1ab_1.conda - conda: https://conda.anaconda.org/conda-forge/linux-64/zstd-1.5.7-hb8e6e7a_2.conda - pypi: https://files.pythonhosted.org/packages/98/5e/34ccb5bfb8dae555045c2dd13375e01ac8e2c1f200a4e4051e95fb9addf0/absl_py-2.2.1-py3-none-any.whl - - pypi: https://files.pythonhosted.org/packages/70/83/167d4b638bb758a966828eb8d23c5e7047825edfdf768ff5f4fb01440063/accelerate-1.5.2-py3-none-any.whl + - pypi: https://files.pythonhosted.org/packages/63/b1/8198e3cdd11a426b1df2912e3381018c4a4a55368f6d0857ba3ca418ef93/accelerate-1.6.0-py3-none-any.whl - pypi: https://files.pythonhosted.org/packages/c5/19/5af6804c4cc0fed83f47bff6e413a98a36618e7d40185cd36e69737f3b0e/aiofiles-23.2.1-py3-none-any.whl - pypi: https://files.pythonhosted.org/packages/78/b6/6307fbef88d9b5ee7421e68d78a9f162e0da4900bc5f5793f6d3d0e34fb8/annotated_types-0.7.0-py3-none-any.whl - pypi: https://files.pythonhosted.org/packages/a1/ee/48ca1a7c89ffec8b6a0c5d02b89c305671d5ffd8d3c94acf8b8c408575bb/anyio-4.9.0-py3-none-any.whl @@ -936,16 +936,16 @@ environments: - pypi: https://files.pythonhosted.org/packages/4d/36/2a115987e2d8c300a974597416d9de88f2444426de9571f4b59b2cca3acc/filelock-3.18.0-py3-none-any.whl - pypi: https://files.pythonhosted.org/packages/b8/25/155f9f080d5e4bc0082edfda032ea2bc2b8fab3f4d25d46c1e9dd22a1a89/flatbuffers-25.2.10-py2.py3-none-any.whl - pypi: https://files.pythonhosted.org/packages/28/e9/47c02d5a7027e8ed841ab6a10ca00c93dadd5f16742f1af1fa3f9978adf4/fonttools-4.56.0-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl - - pypi: https://files.pythonhosted.org/packages/56/53/eb690efa8513166adef3e0669afd31e95ffde69fb3c52ec2ac7223ed6018/fsspec-2025.3.0-py3-none-any.whl + - pypi: https://files.pythonhosted.org/packages/44/4b/e0cfc1a6f17e990f3e64b7d941ddc4acdc7b19d6edd51abf495f32b1a9e4/fsspec-2025.3.2-py3-none-any.whl - pypi: https://files.pythonhosted.org/packages/54/70/e07c381e6488a77094f04c85c9caf1c8008cdc30778f7019bc52e5285ef0/gdown-5.2.0-py3-none-any.whl - - pypi: https://files.pythonhosted.org/packages/dd/9a/23df7fafbe5201d811a60acb3869a3e21cf2d813eed47b2a3ad7d649b905/gradio-5.23.1-py3-none-any.whl + - pypi: https://files.pythonhosted.org/packages/5f/e3/f1e6569558d212f747820b2c976b4f57c034413a5747acb373f2a04bcd9a/gradio-5.23.3-py3-none-any.whl - pypi: https://files.pythonhosted.org/packages/15/c8/0df7f92c8f1bdf5c244c29de8cd7e33a5931768ddba245526a770bfa18a2/gradio_client-1.8.0-py3-none-any.whl - pypi: https://files.pythonhosted.org/packages/28/27/3d6dcadc8a3214d8522c1e7f6a19554e33659be44546d44a2f7572ac7d2a/groovy-0.1.2-py3-none-any.whl - pypi: https://files.pythonhosted.org/packages/c7/67/cbd63c485051eb78663355d9efd1b896cfb50d4a220581ec2cb9a15cd750/grpcio-1.71.0-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl - pypi: https://files.pythonhosted.org/packages/95/04/ff642e65ad6b90db43e668d70ffb6736436c7ce41fcc549f4e9472234127/h11-0.14.0-py3-none-any.whl - pypi: https://files.pythonhosted.org/packages/87/f5/72347bc88306acb359581ac4d52f23c0ef445b57157adedb9aee0cd689d2/httpcore-1.0.7-py3-none-any.whl - pypi: https://files.pythonhosted.org/packages/2a/39/e50c7c3a983047577ee07d2a9e53faf5a69493943ec3f6a384bdc792deb2/httpx-0.28.1-py3-none-any.whl - - pypi: https://files.pythonhosted.org/packages/40/0c/37d380846a2e5c9a3c6a73d26ffbcfdcad5fc3eacf42fdf7cff56f2af634/huggingface_hub-0.29.3-py3-none-any.whl + - pypi: https://files.pythonhosted.org/packages/99/e3/2232d0e726d4d6ea69643b9593d97d0e7e6ea69c2fe9ed5de34d476c1c47/huggingface_hub-0.30.1-py3-none-any.whl - pypi: https://files.pythonhosted.org/packages/f0/0f/310fb31e39e2d734ccaa2c0fb981ee41f7bd5056ce9bc29b2248bd569169/humanfriendly-10.0-py2.py3-none-any.whl - pypi: https://files.pythonhosted.org/packages/76/c6/c88e154df9c4e1a2a66ccf0005a88dfb2650c1dffb6f5ce603dfbd452ce3/idna-3.10-py3-none-any.whl - pypi: https://files.pythonhosted.org/packages/62/a1/3d680cbfd5f4b8f15abc1d571870c5fc3e594bb582bc3b64ea099db13e56/jinja2-3.1.6-py3-none-any.whl @@ -957,7 +957,7 @@ environments: - pypi: https://files.pythonhosted.org/packages/40/b8/53fa08a5eaf78d3a7213fd6da1feec4bae14a81d9805e567013811ff0e85/matplotlib-3.10.1-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl - pypi: https://files.pythonhosted.org/packages/b3/38/89ba8ad64ae25be8de66a6d463314cf1eb366222074cfda9ee839c56a4b4/mdurl-0.1.2-py3-none-any.whl - pypi: https://files.pythonhosted.org/packages/43/e3/7d92a15f894aa0c9c4b49b8ee9ac9850d6e63b03c9c32c0367a13ae62209/mpmath-1.3.0-py3-none-any.whl - - pypi: https://files.pythonhosted.org/packages/e9/96/79a6168dc7a5098066e097c01a45d01608c8df6552dfb92a2676ce623186/narwhals-1.32.0-py3-none-any.whl + - pypi: https://files.pythonhosted.org/packages/41/c1/e9bc6b67c774e7c1f939c91ea535f18f7644fedc61b20d6baa861ad52b34/narwhals-1.33.0-py3-none-any.whl - pypi: https://files.pythonhosted.org/packages/b9/54/dd730b32ea14ea797530a4479b2ed46a6fb250f682a9cfb997e968bf0261/networkx-3.4.2-py3-none-any.whl - pypi: https://files.pythonhosted.org/packages/c5/5c/ceefca458559f0ccc7a982319f37ed07b0d7b526964ae6cc61f8ad1b6119/numpy-2.2.4-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl - pypi: https://files.pythonhosted.org/packages/b1/2f/91092557ed478e323a2b4471e2081fdf88d1dd52ae988ceaf7db4e4506ff/onnx-1.17.0-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl @@ -966,7 +966,7 @@ environments: - pypi: https://files.pythonhosted.org/packages/2c/8b/90eb44a40476fa0e71e05a0283947cfd74a5d36121a11d926ad6f3193cc4/opencv_python-4.11.0.86-cp37-abi3-manylinux_2_17_x86_64.manylinux2014_x86_64.whl - pypi: https://files.pythonhosted.org/packages/92/18/5b1e1e995bffad49dc4311a0bdfd874bc6f135fd20f0e1f671adc2c9910e/orjson-3.10.16-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl - pypi: https://files.pythonhosted.org/packages/cd/5f/4dba1d39bb9c38d574a9a22548c540177f78ea47b32f99c0ff2ec499fac5/pandas-2.2.3-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl - - pypi: https://files.pythonhosted.org/packages/48/a4/fbfe9d5581d7b111b28f1d8c2762dee92e9821bb209af9fa83c940e507a0/pillow-11.1.0-cp311-cp311-manylinux_2_28_x86_64.whl + - pypi: https://files.pythonhosted.org/packages/90/c5/d4eaed054f75a3ffbd127190200b26c949acda13794aef6cf0ab91f2fb00/pillow-11.2.0-cp311-cp311-manylinux_2_28_x86_64.whl - pypi: https://files.pythonhosted.org/packages/02/65/ad2bc85f7377f5cfba5d4466d5474423a3fb7f6a97fd807c06f92dd3e721/plotly-6.0.1-py3-none-any.whl - pypi: https://files.pythonhosted.org/packages/28/50/1925de813499546bc8ab3ae857e3ec84efe7d2f19b34529d0c7c3d02d11d/protobuf-6.30.2-cp39-abi3-manylinux2014_x86_64.whl - pypi: https://files.pythonhosted.org/packages/cc/12/f9221a949f2419e2e23847303c002476c26fbcfd62dc7f3d25d0bec5ca99/pydantic-2.11.1-py3-none-any.whl @@ -979,7 +979,7 @@ environments: - pypi: https://files.pythonhosted.org/packages/75/e4/2c27590dfc9992f73aabbeb9241ae20220bd9452df27483b6e56d3975cc5/PyYAML-6.0.2-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl - pypi: https://files.pythonhosted.org/packages/bf/ce/0d0e61429f603bac433910d99ef1a02ce45a8967ffbe3cbee48599e62d88/regex-2024.11.6-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl - pypi: https://files.pythonhosted.org/packages/f9/9b/335f9764261e915ed497fcdeb11df5dfd6f7bf257d4a6a2a686d80da4d54/requests-2.32.3-py3-none-any.whl - - pypi: https://files.pythonhosted.org/packages/19/71/39c7c0d87f8d4e6c020a393182060eaefeeae6c01dab6a84ec346f2567df/rich-13.9.4-py3-none-any.whl + - pypi: https://files.pythonhosted.org/packages/0d/9b/63f4c7ebc259242c89b3acafdb37b41d1185c07ff0011164674e9076b491/rich-14.0.0-py3-none-any.whl - pypi: https://files.pythonhosted.org/packages/ce/d2/4ceed7147e05852876f3b5f3fdc23f878ce2b7e0b90dd6e698bda3d20787/ruff-0.11.2-py3-none-manylinux_2_17_x86_64.manylinux2014_x86_64.whl - pypi: https://files.pythonhosted.org/packages/4d/c0/1108ad9f01567f66b3154063605b350b69c3c9366732e09e45f9fd0d1deb/safehttpx-0.1.6-py3-none-any.whl - pypi: https://files.pythonhosted.org/packages/a6/f8/dae3421624fcc87a89d42e1898a798bc7ff72c61f38973a65d60df8f124c/safetensors-0.5.3-cp38-abi3-manylinux_2_17_x86_64.manylinux2014_x86_64.whl @@ -1033,8 +1033,8 @@ environments: - conda: https://conda.anaconda.org/conda-forge/osx-arm64/libcxx-20.1.1-ha82da77_0.conda - conda: https://conda.anaconda.org/conda-forge/osx-arm64/libedit-3.1.20250104-pl5321hafb1f1b_0.conda - conda: https://conda.anaconda.org/conda-forge/osx-arm64/libev-4.33-h93a5062_2.conda - - conda: https://conda.anaconda.org/conda-forge/osx-arm64/libexpat-2.6.4-h286801f_0.conda - - conda: https://conda.anaconda.org/conda-forge/osx-arm64/libffi-3.4.2-h3422bc3_5.tar.bz2 + - conda: https://conda.anaconda.org/conda-forge/osx-arm64/libexpat-2.7.0-h286801f_0.conda + - conda: https://conda.anaconda.org/conda-forge/osx-arm64/libffi-3.4.6-h1da3d7d_1.conda - conda: https://conda.anaconda.org/conda-forge/osx-arm64/liblzma-5.6.4-h39f12f2_0.conda - conda: https://conda.anaconda.org/conda-forge/osx-arm64/libnghttp2-1.64.0-h6d7220d_0.conda - conda: https://conda.anaconda.org/conda-forge/osx-arm64/libsodium-1.0.20-h99b78c6_0.conda @@ -1058,7 +1058,7 @@ environments: - conda: https://conda.anaconda.org/conda-forge/noarch/pygments-2.19.1-pyhd8ed1ab_0.conda - conda: https://conda.anaconda.org/conda-forge/osx-arm64/python-3.11.11-hc22306f_2_cpython.conda - conda: https://conda.anaconda.org/conda-forge/noarch/python-dateutil-2.9.0.post0-pyhff2d567_1.conda - - conda: https://conda.anaconda.org/conda-forge/osx-arm64/python_abi-3.11-5_cp311.conda + - conda: https://conda.anaconda.org/conda-forge/osx-arm64/python_abi-3.11-6_cp311.conda - conda: https://conda.anaconda.org/conda-forge/osx-arm64/pyzmq-26.3.0-py311h01f2145_0.conda - conda: https://conda.anaconda.org/conda-forge/osx-arm64/readline-8.2-h1d1bf99_2.conda - conda: https://conda.anaconda.org/conda-forge/osx-arm64/rhash-1.4.5-h7ab814d_0.conda @@ -1075,7 +1075,7 @@ environments: - conda: https://conda.anaconda.org/conda-forge/noarch/zipp-3.21.0-pyhd8ed1ab_1.conda - conda: https://conda.anaconda.org/conda-forge/osx-arm64/zstd-1.5.7-h6491c7d_2.conda - pypi: https://files.pythonhosted.org/packages/98/5e/34ccb5bfb8dae555045c2dd13375e01ac8e2c1f200a4e4051e95fb9addf0/absl_py-2.2.1-py3-none-any.whl - - pypi: https://files.pythonhosted.org/packages/70/83/167d4b638bb758a966828eb8d23c5e7047825edfdf768ff5f4fb01440063/accelerate-1.5.2-py3-none-any.whl + - pypi: https://files.pythonhosted.org/packages/63/b1/8198e3cdd11a426b1df2912e3381018c4a4a55368f6d0857ba3ca418ef93/accelerate-1.6.0-py3-none-any.whl - pypi: https://files.pythonhosted.org/packages/c5/19/5af6804c4cc0fed83f47bff6e413a98a36618e7d40185cd36e69737f3b0e/aiofiles-23.2.1-py3-none-any.whl - pypi: https://files.pythonhosted.org/packages/78/b6/6307fbef88d9b5ee7421e68d78a9f162e0da4900bc5f5793f6d3d0e34fb8/annotated_types-0.7.0-py3-none-any.whl - pypi: https://files.pythonhosted.org/packages/a1/ee/48ca1a7c89ffec8b6a0c5d02b89c305671d5ffd8d3c94acf8b8c408575bb/anyio-4.9.0-py3-none-any.whl @@ -1093,16 +1093,16 @@ environments: - pypi: https://files.pythonhosted.org/packages/4d/36/2a115987e2d8c300a974597416d9de88f2444426de9571f4b59b2cca3acc/filelock-3.18.0-py3-none-any.whl - pypi: https://files.pythonhosted.org/packages/b8/25/155f9f080d5e4bc0082edfda032ea2bc2b8fab3f4d25d46c1e9dd22a1a89/flatbuffers-25.2.10-py2.py3-none-any.whl - pypi: https://files.pythonhosted.org/packages/35/56/a2f3e777d48fcae7ecd29de4d96352d84e5ea9871e5f3fc88241521572cf/fonttools-4.56.0-cp311-cp311-macosx_10_9_universal2.whl - - pypi: https://files.pythonhosted.org/packages/56/53/eb690efa8513166adef3e0669afd31e95ffde69fb3c52ec2ac7223ed6018/fsspec-2025.3.0-py3-none-any.whl + - pypi: https://files.pythonhosted.org/packages/44/4b/e0cfc1a6f17e990f3e64b7d941ddc4acdc7b19d6edd51abf495f32b1a9e4/fsspec-2025.3.2-py3-none-any.whl - pypi: https://files.pythonhosted.org/packages/54/70/e07c381e6488a77094f04c85c9caf1c8008cdc30778f7019bc52e5285ef0/gdown-5.2.0-py3-none-any.whl - - pypi: https://files.pythonhosted.org/packages/dd/9a/23df7fafbe5201d811a60acb3869a3e21cf2d813eed47b2a3ad7d649b905/gradio-5.23.1-py3-none-any.whl + - pypi: https://files.pythonhosted.org/packages/5f/e3/f1e6569558d212f747820b2c976b4f57c034413a5747acb373f2a04bcd9a/gradio-5.23.3-py3-none-any.whl - pypi: https://files.pythonhosted.org/packages/15/c8/0df7f92c8f1bdf5c244c29de8cd7e33a5931768ddba245526a770bfa18a2/gradio_client-1.8.0-py3-none-any.whl - pypi: https://files.pythonhosted.org/packages/28/27/3d6dcadc8a3214d8522c1e7f6a19554e33659be44546d44a2f7572ac7d2a/groovy-0.1.2-py3-none-any.whl - pypi: https://files.pythonhosted.org/packages/b4/d5/0bc53ed33ba458de95020970e2c22aa8027b26cc84f98bea7fcad5d695d1/grpcio-1.71.0-cp311-cp311-macosx_10_14_universal2.whl - pypi: https://files.pythonhosted.org/packages/95/04/ff642e65ad6b90db43e668d70ffb6736436c7ce41fcc549f4e9472234127/h11-0.14.0-py3-none-any.whl - pypi: https://files.pythonhosted.org/packages/87/f5/72347bc88306acb359581ac4d52f23c0ef445b57157adedb9aee0cd689d2/httpcore-1.0.7-py3-none-any.whl - pypi: https://files.pythonhosted.org/packages/2a/39/e50c7c3a983047577ee07d2a9e53faf5a69493943ec3f6a384bdc792deb2/httpx-0.28.1-py3-none-any.whl - - pypi: https://files.pythonhosted.org/packages/40/0c/37d380846a2e5c9a3c6a73d26ffbcfdcad5fc3eacf42fdf7cff56f2af634/huggingface_hub-0.29.3-py3-none-any.whl + - pypi: https://files.pythonhosted.org/packages/99/e3/2232d0e726d4d6ea69643b9593d97d0e7e6ea69c2fe9ed5de34d476c1c47/huggingface_hub-0.30.1-py3-none-any.whl - pypi: https://files.pythonhosted.org/packages/f0/0f/310fb31e39e2d734ccaa2c0fb981ee41f7bd5056ce9bc29b2248bd569169/humanfriendly-10.0-py2.py3-none-any.whl - pypi: https://files.pythonhosted.org/packages/76/c6/c88e154df9c4e1a2a66ccf0005a88dfb2650c1dffb6f5ce603dfbd452ce3/idna-3.10-py3-none-any.whl - pypi: https://files.pythonhosted.org/packages/62/a1/3d680cbfd5f4b8f15abc1d571870c5fc3e594bb582bc3b64ea099db13e56/jinja2-3.1.6-py3-none-any.whl @@ -1114,7 +1114,7 @@ environments: - pypi: https://files.pythonhosted.org/packages/0a/e4/300b08e3e08f9c98b0d5635f42edabf2f7a1d634e64cb0318a71a44ff720/matplotlib-3.10.1-cp311-cp311-macosx_11_0_arm64.whl - pypi: https://files.pythonhosted.org/packages/b3/38/89ba8ad64ae25be8de66a6d463314cf1eb366222074cfda9ee839c56a4b4/mdurl-0.1.2-py3-none-any.whl - pypi: https://files.pythonhosted.org/packages/43/e3/7d92a15f894aa0c9c4b49b8ee9ac9850d6e63b03c9c32c0367a13ae62209/mpmath-1.3.0-py3-none-any.whl - - pypi: https://files.pythonhosted.org/packages/e9/96/79a6168dc7a5098066e097c01a45d01608c8df6552dfb92a2676ce623186/narwhals-1.32.0-py3-none-any.whl + - pypi: https://files.pythonhosted.org/packages/41/c1/e9bc6b67c774e7c1f939c91ea535f18f7644fedc61b20d6baa861ad52b34/narwhals-1.33.0-py3-none-any.whl - pypi: https://files.pythonhosted.org/packages/b9/54/dd730b32ea14ea797530a4479b2ed46a6fb250f682a9cfb997e968bf0261/networkx-3.4.2-py3-none-any.whl - pypi: https://files.pythonhosted.org/packages/a2/0a/1212befdbecab5d80eca3cde47d304cad986ad4eec7d85a42e0b6d2cc2ef/numpy-2.2.4-cp311-cp311-macosx_11_0_arm64.whl - pypi: https://files.pythonhosted.org/packages/e5/a9/8d1b1d53aec70df53e0f57e9f9fcf47004276539e29230c3d5f1f50719ba/onnx-1.17.0-cp311-cp311-macosx_12_0_universal2.whl @@ -1123,7 +1123,7 @@ environments: - pypi: https://files.pythonhosted.org/packages/05/4d/53b30a2a3ac1f75f65a59eb29cf2ee7207ce64867db47036ad61743d5a23/opencv_python-4.11.0.86-cp37-abi3-macosx_13_0_arm64.whl - pypi: https://files.pythonhosted.org/packages/97/29/43f91a5512b5d2535594438eb41c5357865fd5e64dec745d90a588820c75/orjson-3.10.16-cp311-cp311-macosx_10_15_x86_64.macosx_11_0_arm64.macosx_10_15_universal2.whl - pypi: https://files.pythonhosted.org/packages/52/11/9eac327a38834f162b8250aab32a6781339c69afe7574368fffe46387edf/pandas-2.2.3-cp311-cp311-macosx_11_0_arm64.whl - - pypi: https://files.pythonhosted.org/packages/d9/45/3fe487010dd9ce0a06adf9b8ff4f273cc0a44536e234b0fad3532a42c15b/pillow-11.1.0-cp311-cp311-macosx_11_0_arm64.whl + - pypi: https://files.pythonhosted.org/packages/ea/75/947f47ed4667192ded5ae85336518068170c1450abf46da8a80bec84eb21/pillow-11.2.0-cp311-cp311-macosx_11_0_arm64.whl - pypi: https://files.pythonhosted.org/packages/02/65/ad2bc85f7377f5cfba5d4466d5474423a3fb7f6a97fd807c06f92dd3e721/plotly-6.0.1-py3-none-any.whl - pypi: https://files.pythonhosted.org/packages/8e/66/7f3b121f59097c93267e7f497f10e52ced7161b38295137a12a266b6c149/protobuf-6.30.2-cp39-abi3-macosx_10_9_universal2.whl - pypi: https://files.pythonhosted.org/packages/cc/12/f9221a949f2419e2e23847303c002476c26fbcfd62dc7f3d25d0bec5ca99/pydantic-2.11.1-py3-none-any.whl @@ -1136,7 +1136,7 @@ environments: - pypi: https://files.pythonhosted.org/packages/8b/62/b9faa998fd185f65c1371643678e4d58254add437edb764a08c5a98fb986/PyYAML-6.0.2-cp311-cp311-macosx_11_0_arm64.whl - pypi: https://files.pythonhosted.org/packages/c5/1b/f0e4d13e6adf866ce9b069e191f303a30ab1277e037037a365c3aad5cc9c/regex-2024.11.6-cp311-cp311-macosx_11_0_arm64.whl - pypi: https://files.pythonhosted.org/packages/f9/9b/335f9764261e915ed497fcdeb11df5dfd6f7bf257d4a6a2a686d80da4d54/requests-2.32.3-py3-none-any.whl - - pypi: https://files.pythonhosted.org/packages/19/71/39c7c0d87f8d4e6c020a393182060eaefeeae6c01dab6a84ec346f2567df/rich-13.9.4-py3-none-any.whl + - pypi: https://files.pythonhosted.org/packages/0d/9b/63f4c7ebc259242c89b3acafdb37b41d1185c07ff0011164674e9076b491/rich-14.0.0-py3-none-any.whl - pypi: https://files.pythonhosted.org/packages/fc/3e/d3f13619e1d152c7b600a38c1a035e833e794c6625c9a6cea6f63dbf3af4/ruff-0.11.2-py3-none-macosx_11_0_arm64.whl - pypi: https://files.pythonhosted.org/packages/4d/c0/1108ad9f01567f66b3154063605b350b69c3c9366732e09e45f9fd0d1deb/safehttpx-0.1.6-py3-none-any.whl - pypi: https://files.pythonhosted.org/packages/b8/3b/11f1b4a2f5d2ab7da34ecc062b0bc301f2be024d110a6466726bec8c055c/safetensors-0.5.3-cp38-abi3-macosx_11_0_arm64.whl @@ -1187,8 +1187,8 @@ environments: - conda: https://conda.anaconda.org/conda-forge/noarch/jupyterlab_widgets-3.0.13-pyhd8ed1ab_1.conda - conda: https://conda.anaconda.org/conda-forge/win-64/krb5-1.21.3-hdf4eb48_0.conda - conda: https://conda.anaconda.org/conda-forge/win-64/libcurl-8.12.1-h88aaa65_0.conda - - conda: https://conda.anaconda.org/conda-forge/win-64/libexpat-2.6.4-he0c23c2_0.conda - - conda: https://conda.anaconda.org/conda-forge/win-64/libffi-3.4.6-h537db12_0.conda + - conda: https://conda.anaconda.org/conda-forge/win-64/libexpat-2.7.0-he0c23c2_0.conda + - conda: https://conda.anaconda.org/conda-forge/win-64/libffi-3.4.6-h537db12_1.conda - conda: https://conda.anaconda.org/conda-forge/win-64/liblzma-5.6.4-h2466b09_0.conda - conda: https://conda.anaconda.org/conda-forge/win-64/libsodium-1.0.20-hc70643c_0.conda - conda: https://conda.anaconda.org/conda-forge/win-64/libsqlite-3.49.1-h67fdade_2.conda @@ -1208,7 +1208,7 @@ environments: - conda: https://conda.anaconda.org/conda-forge/noarch/pygments-2.19.1-pyhd8ed1ab_0.conda - conda: https://conda.anaconda.org/conda-forge/win-64/python-3.11.11-h3f84c4b_2_cpython.conda - conda: https://conda.anaconda.org/conda-forge/noarch/python-dateutil-2.9.0.post0-pyhff2d567_1.conda - - conda: https://conda.anaconda.org/conda-forge/win-64/python_abi-3.11-5_cp311.conda + - conda: https://conda.anaconda.org/conda-forge/win-64/python_abi-3.11-6_cp311.conda - conda: https://conda.anaconda.org/conda-forge/win-64/pywin32-307-py311hda3d55a_3.conda - conda: https://conda.anaconda.org/conda-forge/win-64/pyzmq-26.3.0-py311h484c95c_0.conda - conda: https://conda.anaconda.org/conda-forge/noarch/six-1.17.0-pyhd8ed1ab_0.conda @@ -1227,7 +1227,7 @@ environments: - conda: https://conda.anaconda.org/conda-forge/noarch/zipp-3.21.0-pyhd8ed1ab_1.conda - conda: https://conda.anaconda.org/conda-forge/win-64/zstd-1.5.7-hbeecb71_2.conda - pypi: https://files.pythonhosted.org/packages/98/5e/34ccb5bfb8dae555045c2dd13375e01ac8e2c1f200a4e4051e95fb9addf0/absl_py-2.2.1-py3-none-any.whl - - pypi: https://files.pythonhosted.org/packages/70/83/167d4b638bb758a966828eb8d23c5e7047825edfdf768ff5f4fb01440063/accelerate-1.5.2-py3-none-any.whl + - pypi: https://files.pythonhosted.org/packages/63/b1/8198e3cdd11a426b1df2912e3381018c4a4a55368f6d0857ba3ca418ef93/accelerate-1.6.0-py3-none-any.whl - pypi: https://files.pythonhosted.org/packages/c5/19/5af6804c4cc0fed83f47bff6e413a98a36618e7d40185cd36e69737f3b0e/aiofiles-23.2.1-py3-none-any.whl - pypi: https://files.pythonhosted.org/packages/78/b6/6307fbef88d9b5ee7421e68d78a9f162e0da4900bc5f5793f6d3d0e34fb8/annotated_types-0.7.0-py3-none-any.whl - pypi: https://files.pythonhosted.org/packages/a1/ee/48ca1a7c89ffec8b6a0c5d02b89c305671d5ffd8d3c94acf8b8c408575bb/anyio-4.9.0-py3-none-any.whl @@ -1245,16 +1245,16 @@ environments: - pypi: https://files.pythonhosted.org/packages/4d/36/2a115987e2d8c300a974597416d9de88f2444426de9571f4b59b2cca3acc/filelock-3.18.0-py3-none-any.whl - pypi: https://files.pythonhosted.org/packages/b8/25/155f9f080d5e4bc0082edfda032ea2bc2b8fab3f4d25d46c1e9dd22a1a89/flatbuffers-25.2.10-py2.py3-none-any.whl - pypi: https://files.pythonhosted.org/packages/3b/90/4926e653041c4116ecd43e50e3c79f5daae6dcafc58ceb64bc4f71dd4924/fonttools-4.56.0-cp311-cp311-win_amd64.whl - - pypi: https://files.pythonhosted.org/packages/56/53/eb690efa8513166adef3e0669afd31e95ffde69fb3c52ec2ac7223ed6018/fsspec-2025.3.0-py3-none-any.whl + - pypi: https://files.pythonhosted.org/packages/44/4b/e0cfc1a6f17e990f3e64b7d941ddc4acdc7b19d6edd51abf495f32b1a9e4/fsspec-2025.3.2-py3-none-any.whl - pypi: https://files.pythonhosted.org/packages/54/70/e07c381e6488a77094f04c85c9caf1c8008cdc30778f7019bc52e5285ef0/gdown-5.2.0-py3-none-any.whl - - pypi: https://files.pythonhosted.org/packages/dd/9a/23df7fafbe5201d811a60acb3869a3e21cf2d813eed47b2a3ad7d649b905/gradio-5.23.1-py3-none-any.whl + - pypi: https://files.pythonhosted.org/packages/5f/e3/f1e6569558d212f747820b2c976b4f57c034413a5747acb373f2a04bcd9a/gradio-5.23.3-py3-none-any.whl - pypi: https://files.pythonhosted.org/packages/15/c8/0df7f92c8f1bdf5c244c29de8cd7e33a5931768ddba245526a770bfa18a2/gradio_client-1.8.0-py3-none-any.whl - pypi: https://files.pythonhosted.org/packages/28/27/3d6dcadc8a3214d8522c1e7f6a19554e33659be44546d44a2f7572ac7d2a/groovy-0.1.2-py3-none-any.whl - pypi: https://files.pythonhosted.org/packages/ee/3f/cf92e7e62ccb8dbdf977499547dfc27133124d6467d3a7d23775bcecb0f9/grpcio-1.71.0-cp311-cp311-win_amd64.whl - pypi: https://files.pythonhosted.org/packages/95/04/ff642e65ad6b90db43e668d70ffb6736436c7ce41fcc549f4e9472234127/h11-0.14.0-py3-none-any.whl - pypi: https://files.pythonhosted.org/packages/87/f5/72347bc88306acb359581ac4d52f23c0ef445b57157adedb9aee0cd689d2/httpcore-1.0.7-py3-none-any.whl - pypi: https://files.pythonhosted.org/packages/2a/39/e50c7c3a983047577ee07d2a9e53faf5a69493943ec3f6a384bdc792deb2/httpx-0.28.1-py3-none-any.whl - - pypi: https://files.pythonhosted.org/packages/40/0c/37d380846a2e5c9a3c6a73d26ffbcfdcad5fc3eacf42fdf7cff56f2af634/huggingface_hub-0.29.3-py3-none-any.whl + - pypi: https://files.pythonhosted.org/packages/99/e3/2232d0e726d4d6ea69643b9593d97d0e7e6ea69c2fe9ed5de34d476c1c47/huggingface_hub-0.30.1-py3-none-any.whl - pypi: https://files.pythonhosted.org/packages/f0/0f/310fb31e39e2d734ccaa2c0fb981ee41f7bd5056ce9bc29b2248bd569169/humanfriendly-10.0-py2.py3-none-any.whl - pypi: https://files.pythonhosted.org/packages/76/c6/c88e154df9c4e1a2a66ccf0005a88dfb2650c1dffb6f5ce603dfbd452ce3/idna-3.10-py3-none-any.whl - pypi: https://files.pythonhosted.org/packages/62/a1/3d680cbfd5f4b8f15abc1d571870c5fc3e594bb582bc3b64ea099db13e56/jinja2-3.1.6-py3-none-any.whl @@ -1266,7 +1266,7 @@ environments: - pypi: https://files.pythonhosted.org/packages/d7/68/0d03098b3feb786cbd494df0aac15b571effda7f7cbdec267e8a8d398c16/matplotlib-3.10.1-cp311-cp311-win_amd64.whl - pypi: https://files.pythonhosted.org/packages/b3/38/89ba8ad64ae25be8de66a6d463314cf1eb366222074cfda9ee839c56a4b4/mdurl-0.1.2-py3-none-any.whl - pypi: https://files.pythonhosted.org/packages/43/e3/7d92a15f894aa0c9c4b49b8ee9ac9850d6e63b03c9c32c0367a13ae62209/mpmath-1.3.0-py3-none-any.whl - - pypi: https://files.pythonhosted.org/packages/e9/96/79a6168dc7a5098066e097c01a45d01608c8df6552dfb92a2676ce623186/narwhals-1.32.0-py3-none-any.whl + - pypi: https://files.pythonhosted.org/packages/41/c1/e9bc6b67c774e7c1f939c91ea535f18f7644fedc61b20d6baa861ad52b34/narwhals-1.33.0-py3-none-any.whl - pypi: https://files.pythonhosted.org/packages/b9/54/dd730b32ea14ea797530a4479b2ed46a6fb250f682a9cfb997e968bf0261/networkx-3.4.2-py3-none-any.whl - pypi: https://files.pythonhosted.org/packages/8b/72/10c1d2d82101c468a28adc35de6c77b308f288cfd0b88e1070f15b98e00c/numpy-2.2.4-cp311-cp311-win_amd64.whl - pypi: https://files.pythonhosted.org/packages/51/a5/19b0dfcb567b62e7adf1a21b08b23224f0c2d13842aee4d0abc6f07f9cf5/onnx-1.17.0-cp311-cp311-win_amd64.whl @@ -1275,7 +1275,7 @@ environments: - pypi: https://files.pythonhosted.org/packages/a4/7d/f1c30a92854540bf789e9cd5dde7ef49bbe63f855b85a2e6b3db8135c591/opencv_python-4.11.0.86-cp37-abi3-win_amd64.whl - pypi: https://files.pythonhosted.org/packages/f4/22/5e8217c48d68c0adbfb181e749d6a733761074e598b083c69a1383d18147/orjson-3.10.16-cp311-cp311-win_amd64.whl - pypi: https://files.pythonhosted.org/packages/ed/8c/87ddf1fcb55d11f9f847e3c69bb1c6f8e46e2f40ab1a2d2abadb2401b007/pandas-2.2.3-cp311-cp311-win_amd64.whl - - pypi: https://files.pythonhosted.org/packages/3a/c6/fce9255272bcf0c39e15abd2f8fd8429a954cf344469eaceb9d0d1366913/pillow-11.1.0-cp311-cp311-win_amd64.whl + - pypi: https://files.pythonhosted.org/packages/75/c9/2b0e8350b812825a43b53a1f49823e933514dbd2c7c6303daec7b28d7b57/pillow-11.2.0-cp311-cp311-win_amd64.whl - pypi: https://files.pythonhosted.org/packages/02/65/ad2bc85f7377f5cfba5d4466d5474423a3fb7f6a97fd807c06f92dd3e721/plotly-6.0.1-py3-none-any.whl - pypi: https://files.pythonhosted.org/packages/97/e9/7b9f1b259d509aef2b833c29a1f3c39185e2bf21c9c1be1cd11c22cb2149/protobuf-6.30.2-cp310-abi3-win_amd64.whl - pypi: https://files.pythonhosted.org/packages/cc/12/f9221a949f2419e2e23847303c002476c26fbcfd62dc7f3d25d0bec5ca99/pydantic-2.11.1-py3-none-any.whl @@ -1289,7 +1289,7 @@ environments: - pypi: https://files.pythonhosted.org/packages/ed/23/8da0bbe2ab9dcdd11f4f4557ccaf95c10b9811b13ecced089d43ce59c3c8/PyYAML-6.0.2-cp311-cp311-win_amd64.whl - pypi: https://files.pythonhosted.org/packages/80/32/763a6cc01d21fb3819227a1cc3f60fd251c13c37c27a73b8ff4315433a8e/regex-2024.11.6-cp311-cp311-win_amd64.whl - pypi: https://files.pythonhosted.org/packages/f9/9b/335f9764261e915ed497fcdeb11df5dfd6f7bf257d4a6a2a686d80da4d54/requests-2.32.3-py3-none-any.whl - - pypi: https://files.pythonhosted.org/packages/19/71/39c7c0d87f8d4e6c020a393182060eaefeeae6c01dab6a84ec346f2567df/rich-13.9.4-py3-none-any.whl + - pypi: https://files.pythonhosted.org/packages/0d/9b/63f4c7ebc259242c89b3acafdb37b41d1185c07ff0011164674e9076b491/rich-14.0.0-py3-none-any.whl - pypi: https://files.pythonhosted.org/packages/86/54/3c12d3af58012a5e2cd7ebdbe9983f4834af3f8cbea0e8a8c74fa1e23b2b/ruff-0.11.2-py3-none-win_amd64.whl - pypi: https://files.pythonhosted.org/packages/4d/c0/1108ad9f01567f66b3154063605b350b69c3c9366732e09e45f9fd0d1deb/safehttpx-0.1.6-py3-none-any.whl - pypi: https://files.pythonhosted.org/packages/69/e2/b011c38e5394c4c18fb5500778a55ec43ad6106126e74723ffaee246f56e/safetensors-0.5.3-cp38-abi3-win_amd64.whl @@ -1349,10 +1349,10 @@ packages: version: 2.2.1 sha256: ca8209abd5005ae6e700ef36e2edc84ad5338678f95625a3f15275410a89ffbc requires_python: '>=3.8' -- pypi: https://files.pythonhosted.org/packages/70/83/167d4b638bb758a966828eb8d23c5e7047825edfdf768ff5f4fb01440063/accelerate-1.5.2-py3-none-any.whl +- pypi: https://files.pythonhosted.org/packages/63/b1/8198e3cdd11a426b1df2912e3381018c4a4a55368f6d0857ba3ca418ef93/accelerate-1.6.0-py3-none-any.whl name: accelerate - version: 1.5.2 - sha256: 68a3b272f6a6ffebb457bdc138581a2bf52efad6a5e0214dc46675f3edd98792 + version: 1.6.0 + sha256: 1aee717d3d3735ad6d09710a7c26990ee4652b79b4e93df46551551b5227c2aa requires_dist: - numpy>=1.17,<3.0.0 - packaging>=20.0 @@ -1364,7 +1364,7 @@ packages: - deepspeed ; extra == 'deepspeed' - black~=23.1 ; extra == 'dev' - hf-doc-builder>=0.3.0 ; extra == 'dev' - - ruff~=0.6.4 ; extra == 'dev' + - ruff~=0.11.2 ; extra == 'dev' - pytest>=7.2.0,<=8.0.0 ; extra == 'dev' - pytest-xdist ; extra == 'dev' - pytest-subtests ; extra == 'dev' @@ -1384,7 +1384,7 @@ packages: - rich ; extra == 'dev' - black~=23.1 ; extra == 'quality' - hf-doc-builder>=0.3.0 ; extra == 'quality' - - ruff~=0.6.4 ; extra == 'quality' + - ruff~=0.11.2 ; extra == 'quality' - rich ; extra == 'rich' - sagemaker ; extra == 'sagemaker' - datasets ; extra == 'test-dev' @@ -1407,6 +1407,8 @@ packages: - comet-ml ; extra == 'test-trackers' - tensorboard ; extra == 'test-trackers' - dvclive ; extra == 'test-trackers' + - mlflow ; extra == 'test-trackers' + - matplotlib ; extra == 'test-trackers' - pytest>=7.2.0,<=8.0.0 ; extra == 'testing' - pytest-xdist ; extra == 'testing' - pytest-subtests ; extra == 'testing' @@ -2153,10 +2155,10 @@ packages: - skia-pathops>=0.5.0 ; extra == 'all' - uharfbuzz>=0.23.0 ; extra == 'all' requires_python: '>=3.8' -- pypi: https://files.pythonhosted.org/packages/56/53/eb690efa8513166adef3e0669afd31e95ffde69fb3c52ec2ac7223ed6018/fsspec-2025.3.0-py3-none-any.whl +- pypi: https://files.pythonhosted.org/packages/44/4b/e0cfc1a6f17e990f3e64b7d941ddc4acdc7b19d6edd51abf495f32b1a9e4/fsspec-2025.3.2-py3-none-any.whl name: fsspec - version: 2025.3.0 - sha256: efb87af3efa9103f94ca91a7f8cb7a4df91af9f74fc106c9c7ea0efd7277c1b3 + version: 2025.3.2 + sha256: 2daf8dc3d1dfa65b6aa37748d112773a7a08416f6c70d96b264c96476ecaf711 requires_dist: - adlfs ; extra == 'abfs' - adlfs ; extra == 'adl' @@ -2259,7 +2261,7 @@ packages: - zarr ; extra == 'test-full' - zstandard ; extra == 'test-full' - tqdm ; extra == 'tqdm' - requires_python: '>=3.8' + requires_python: '>=3.9' - pypi: https://files.pythonhosted.org/packages/54/70/e07c381e6488a77094f04c85c9caf1c8008cdc30778f7019bc52e5285ef0/gdown-5.2.0-py3-none-any.whl name: gdown version: 5.2.0 @@ -2278,10 +2280,10 @@ packages: - types-requests ; extra == 'test' - types-setuptools ; extra == 'test' requires_python: '>=3.8' -- pypi: https://files.pythonhosted.org/packages/dd/9a/23df7fafbe5201d811a60acb3869a3e21cf2d813eed47b2a3ad7d649b905/gradio-5.23.1-py3-none-any.whl +- pypi: https://files.pythonhosted.org/packages/5f/e3/f1e6569558d212f747820b2c976b4f57c034413a5747acb373f2a04bcd9a/gradio-5.23.3-py3-none-any.whl name: gradio - version: 5.23.1 - sha256: 945ead6d6590d3f2f7a5fa148cd993f4a57e4a1c44096abc4c008715afa42f65 + version: 5.23.3 + sha256: 4176c4f8e49e5338baea0c844e884c28711d4d673c9ea2b4e74f51f68a2d6f2d requires_dist: - aiofiles>=22.0,<24.0 - anyio>=3.0,<5.0 @@ -2299,7 +2301,7 @@ packages: - packaging - pandas>=1.0,<3.0 - pillow>=8.0,<12.0 - - pydantic>=2.0 + - pydantic>=2.0,<2.12 - pydub - python-multipart>=0.0.18 - pyyaml>=5.0,<7.0 @@ -2393,10 +2395,10 @@ packages: - socksio==1.* ; extra == 'socks' - zstandard>=0.18.0 ; extra == 'zstd' requires_python: '>=3.8' -- pypi: https://files.pythonhosted.org/packages/40/0c/37d380846a2e5c9a3c6a73d26ffbcfdcad5fc3eacf42fdf7cff56f2af634/huggingface_hub-0.29.3-py3-none-any.whl +- pypi: https://files.pythonhosted.org/packages/99/e3/2232d0e726d4d6ea69643b9593d97d0e7e6ea69c2fe9ed5de34d476c1c47/huggingface_hub-0.30.1-py3-none-any.whl name: huggingface-hub - version: 0.29.3 - sha256: 0b25710932ac649c08cdbefa6c6ccb8e88eef82927cacdb048efb726429453aa + version: 0.30.1 + sha256: 0f6aa5ec5a4e68e5b9e45d556b4e5ea180c58f5a5ffa734e7f38c9d573028959 requires_dist: - filelock - fsspec>=2023.5.0 @@ -2466,6 +2468,7 @@ packages: - fastai>=2.4 ; extra == 'fastai' - fastcore>=1.3.27 ; extra == 'fastai' - hf-transfer>=0.1.4 ; extra == 'hf-transfer' + - hf-xet>=0.1.4 ; extra == 'hf-xet' - aiohttp ; extra == 'inference' - ruff>=0.9.0 ; extra == 'quality' - mypy==1.5.1 ; extra == 'quality' @@ -2969,54 +2972,54 @@ packages: purls: [] size: 107458 timestamp: 1702146414478 -- conda: https://conda.anaconda.org/conda-forge/linux-64/libexpat-2.6.4-h5888daf_0.conda - sha256: 56541b98447b58e52d824bd59d6382d609e11de1f8adf20b23143e353d2b8d26 - md5: db833e03127376d461e1e13e76f09b6c +- conda: https://conda.anaconda.org/conda-forge/linux-64/libexpat-2.7.0-h5888daf_0.conda + sha256: 33ab03438aee65d6aa667cf7d90c91e5e7d734c19a67aa4c7040742c0a13d505 + md5: db0bfbe7dd197b68ad5f30333bae6ce0 depends: - __glibc >=2.17,<3.0.a0 - libgcc >=13 constrains: - - expat 2.6.4.* + - expat 2.7.0.* arch: x86_64 platform: linux license: MIT license_family: MIT purls: [] - size: 73304 - timestamp: 1730967041968 -- conda: https://conda.anaconda.org/conda-forge/osx-arm64/libexpat-2.6.4-h286801f_0.conda - sha256: e42ab5ace927ee7c84e3f0f7d813671e1cf3529f5f06ee5899606630498c2745 - md5: 38d2656dd914feb0cab8c629370768bf + size: 74427 + timestamp: 1743431794976 +- conda: https://conda.anaconda.org/conda-forge/osx-arm64/libexpat-2.7.0-h286801f_0.conda + sha256: ee550e44765a7bbcb2a0216c063dcd53ac914a7be5386dd0554bd06e6be61840 + md5: 6934bbb74380e045741eb8637641a65b depends: - __osx >=11.0 constrains: - - expat 2.6.4.* + - expat 2.7.0.* arch: arm64 platform: osx license: MIT license_family: MIT purls: [] - size: 64693 - timestamp: 1730967175868 -- conda: https://conda.anaconda.org/conda-forge/win-64/libexpat-2.6.4-he0c23c2_0.conda - sha256: 0c0447bf20d1013d5603499de93a16b6faa92d7ead870d96305c0f065b6a5a12 - md5: eb383771c680aa792feb529eaf9df82f + size: 65714 + timestamp: 1743431789879 +- conda: https://conda.anaconda.org/conda-forge/win-64/libexpat-2.7.0-he0c23c2_0.conda + sha256: 1a227c094a4e06bd54e8c2f3ec40c17ff99dcf3037d812294f842210aa66dbeb + md5: b6f5352fdb525662f4169a0431d2dd7a depends: - ucrt >=10.0.20348.0 - vc >=14.2,<15 - vc14_runtime >=14.29.30139 constrains: - - expat 2.6.4.* + - expat 2.7.0.* arch: x86_64 platform: win license: MIT license_family: MIT purls: [] - size: 139068 - timestamp: 1730967442102 -- conda: https://conda.anaconda.org/conda-forge/linux-64/libffi-3.4.6-h2dba641_0.conda - sha256: 67a6c95e33ebc763c1adc3455b9a9ecde901850eb2fceb8e646cc05ef3a663da - md5: e3eb7806380bc8bcecba6d749ad5f026 + size: 140896 + timestamp: 1743432122520 +- conda: https://conda.anaconda.org/conda-forge/linux-64/libffi-3.4.6-h2dba641_1.conda + sha256: 764432d32db45466e87f10621db5b74363a9f847d2b8b1f9743746cd160f06ab + md5: ede4673863426c0883c0063d853bbd85 depends: - __glibc >=2.17,<3.0.a0 - libgcc >=13 @@ -3025,21 +3028,23 @@ packages: license: MIT license_family: MIT purls: [] - size: 53415 - timestamp: 1739260413716 -- conda: https://conda.anaconda.org/conda-forge/osx-arm64/libffi-3.4.2-h3422bc3_5.tar.bz2 - sha256: 41b3d13efb775e340e4dba549ab5c029611ea6918703096b2eaa9c015c0750ca - md5: 086914b672be056eb70fd4285b6783b6 + size: 57433 + timestamp: 1743434498161 +- conda: https://conda.anaconda.org/conda-forge/osx-arm64/libffi-3.4.6-h1da3d7d_1.conda + sha256: c6a530924a9b14e193ea9adfe92843de2a806d1b7dbfd341546ece9653129e60 + md5: c215a60c2935b517dcda8cad4705734d + depends: + - __osx >=11.0 arch: arm64 platform: osx license: MIT license_family: MIT purls: [] - size: 39020 - timestamp: 1636488587153 -- conda: https://conda.anaconda.org/conda-forge/win-64/libffi-3.4.6-h537db12_0.conda - sha256: 77922d8dd2faf88ac6accaeebf06409d1820486fde710cff6b554d12273e46be - md5: 31d5107f75b2f204937728417e2e39e5 + size: 39839 + timestamp: 1743434670405 +- conda: https://conda.anaconda.org/conda-forge/win-64/libffi-3.4.6-h537db12_1.conda + sha256: d3b0b8812eab553d3464bbd68204f007f1ebadf96ce30eb0cbc5159f72e353f5 + md5: 85d8fa5e55ed8f93f874b3b23ed54ec6 depends: - ucrt >=10.0.20348.0 - vc >=14.2,<15 @@ -3049,8 +3054,8 @@ packages: license: MIT license_family: MIT purls: [] - size: 40830 - timestamp: 1739260917585 + size: 44978 + timestamp: 1743435053850 - conda: https://conda.anaconda.org/conda-forge/linux-64/libgcc-14.2.0-h767d61c_2.conda sha256: 3a572d031cb86deb541d15c1875aaa097baefc0c580b54dc61f5edab99215792 md5: ef504d1acbd74b7cc6849ef8af47dd03 @@ -3607,10 +3612,10 @@ packages: - sphinx ; extra == 'docs' - gmpy2>=2.1.0a4 ; platform_python_implementation != 'PyPy' and extra == 'gmpy' - pytest>=4.6 ; extra == 'tests' -- pypi: https://files.pythonhosted.org/packages/e9/96/79a6168dc7a5098066e097c01a45d01608c8df6552dfb92a2676ce623186/narwhals-1.32.0-py3-none-any.whl +- pypi: https://files.pythonhosted.org/packages/41/c1/e9bc6b67c774e7c1f939c91ea535f18f7644fedc61b20d6baa861ad52b34/narwhals-1.33.0-py3-none-any.whl name: narwhals - version: 1.32.0 - sha256: 8bdbf3f76155887412eea04b0b06303856ac1aa3d9e8bda5b5e54612855fa560 + version: 1.33.0 + sha256: f653319112fd121a1f1c18a40cf70dada773cdacfd53e62c2aa0afae43c17129 requires_dist: - cudf>=24.10.0 ; extra == 'cudf' - dask[dataframe]>=2024.8 ; extra == 'dask' @@ -4333,19 +4338,20 @@ packages: - pkg:pypi/pickleshare?source=hash-mapping size: 11748 timestamp: 1733327448200 -- pypi: https://files.pythonhosted.org/packages/3a/c6/fce9255272bcf0c39e15abd2f8fd8429a954cf344469eaceb9d0d1366913/pillow-11.1.0-cp311-cp311-win_amd64.whl +- pypi: https://files.pythonhosted.org/packages/75/c9/2b0e8350b812825a43b53a1f49823e933514dbd2c7c6303daec7b28d7b57/pillow-11.2.0-cp311-cp311-win_amd64.whl name: pillow - version: 11.1.0 - sha256: fbd43429d0d7ed6533b25fc993861b8fd512c42d04514a0dd6337fb3ccf22761 + version: 11.2.0 + sha256: 4d1c229f800addb9158c26661c6ade9cb038ff7bb98e3c0149e8fd3a7b6e6e08 requires_dist: - furo ; extra == 'docs' - olefile ; extra == 'docs' - - sphinx>=8.1 ; extra == 'docs' + - sphinx>=8.2 ; extra == 'docs' - sphinx-copybutton ; extra == 'docs' - sphinx-inline-tabs ; extra == 'docs' - sphinxext-opengraph ; extra == 'docs' - olefile ; extra == 'fpx' - olefile ; extra == 'mic' + - pyarrow ; extra == 'test-arrow' - check-manifest ; extra == 'tests' - coverage>=7.4.2 ; extra == 'tests' - defusedxml ; extra == 'tests' @@ -4360,19 +4366,20 @@ packages: - typing-extensions ; python_full_version < '3.10' and extra == 'typing' - defusedxml ; extra == 'xmp' requires_python: '>=3.9' -- pypi: https://files.pythonhosted.org/packages/48/a4/fbfe9d5581d7b111b28f1d8c2762dee92e9821bb209af9fa83c940e507a0/pillow-11.1.0-cp311-cp311-manylinux_2_28_x86_64.whl +- pypi: https://files.pythonhosted.org/packages/90/c5/d4eaed054f75a3ffbd127190200b26c949acda13794aef6cf0ab91f2fb00/pillow-11.2.0-cp311-cp311-manylinux_2_28_x86_64.whl name: pillow - version: 11.1.0 - sha256: 837060a8599b8f5d402e97197d4924f05a2e0d68756998345c829c33186217b1 + version: 11.2.0 + sha256: 92baf77b80de05ede82b14d92bf89a7f0067501b9476df0a162004f3bb015b50 requires_dist: - furo ; extra == 'docs' - olefile ; extra == 'docs' - - sphinx>=8.1 ; extra == 'docs' + - sphinx>=8.2 ; extra == 'docs' - sphinx-copybutton ; extra == 'docs' - sphinx-inline-tabs ; extra == 'docs' - sphinxext-opengraph ; extra == 'docs' - olefile ; extra == 'fpx' - olefile ; extra == 'mic' + - pyarrow ; extra == 'test-arrow' - check-manifest ; extra == 'tests' - coverage>=7.4.2 ; extra == 'tests' - defusedxml ; extra == 'tests' @@ -4387,19 +4394,20 @@ packages: - typing-extensions ; python_full_version < '3.10' and extra == 'typing' - defusedxml ; extra == 'xmp' requires_python: '>=3.9' -- pypi: https://files.pythonhosted.org/packages/d9/45/3fe487010dd9ce0a06adf9b8ff4f273cc0a44536e234b0fad3532a42c15b/pillow-11.1.0-cp311-cp311-macosx_11_0_arm64.whl +- pypi: https://files.pythonhosted.org/packages/ea/75/947f47ed4667192ded5ae85336518068170c1450abf46da8a80bec84eb21/pillow-11.2.0-cp311-cp311-macosx_11_0_arm64.whl name: pillow - version: 11.1.0 - sha256: 96f82000e12f23e4f29346e42702b6ed9a2f2fea34a740dd5ffffcc8c539eb35 + version: 11.2.0 + sha256: f0125a9e668df3e83ff2cfa5800dfe5646591449abae40abf75a1d8b325ddd33 requires_dist: - furo ; extra == 'docs' - olefile ; extra == 'docs' - - sphinx>=8.1 ; extra == 'docs' + - sphinx>=8.2 ; extra == 'docs' - sphinx-copybutton ; extra == 'docs' - sphinx-inline-tabs ; extra == 'docs' - sphinxext-opengraph ; extra == 'docs' - olefile ; extra == 'fpx' - olefile ; extra == 'mic' + - pyarrow ; extra == 'test-arrow' - check-manifest ; extra == 'tests' - coverage>=7.4.2 ; extra == 'tests' - defusedxml ; extra == 'tests' @@ -4703,10 +4711,10 @@ packages: version: 0.0.20 sha256: 8a62d3a8335e06589fe01f2a3e178cdcc632f3fbe0d492ad9ee0ec35aab1f104 requires_python: '>=3.8' -- conda: https://conda.anaconda.org/conda-forge/linux-64/python_abi-3.11-5_cp311.conda - build_number: 5 - sha256: 2660b8059b3ee854bc5d3c6b1fce946e5bd2fe8fbca7827de2c5885ead6209de - md5: 139a8d40c8a2f430df31048949e450de +- conda: https://conda.anaconda.org/conda-forge/linux-64/python_abi-3.11-6_cp311.conda + build_number: 6 + sha256: 2ff22fffe5bb93802c1687b5c4a34b9062394b78f23cfb5c1c1ef9b635bb030e + md5: 37ec65e056b9964529c0e1e2697b9955 constrains: - python 3.11.* *_cpython arch: x86_64 @@ -4714,12 +4722,12 @@ packages: license: BSD-3-Clause license_family: BSD purls: [] - size: 6211 - timestamp: 1723823324668 -- conda: https://conda.anaconda.org/conda-forge/osx-arm64/python_abi-3.11-5_cp311.conda - build_number: 5 - sha256: adc05729b7e0aca7b436e60a86f10822a92185dfcb48d66d6444e3629d3a1f6a - md5: 3b855e3734344134cb56c410f729c340 + size: 6853 + timestamp: 1743483206119 +- conda: https://conda.anaconda.org/conda-forge/osx-arm64/python_abi-3.11-6_cp311.conda + build_number: 6 + sha256: 3d17ac9da54e92fd3d8b52a37aecdf532a45bbc2c0025871da78cec469f1aff7 + md5: b30f805c0fccfebec5012f9f4c2ccfd9 constrains: - python 3.11.* *_cpython arch: arm64 @@ -4727,12 +4735,12 @@ packages: license: BSD-3-Clause license_family: BSD purls: [] - size: 6308 - timestamp: 1723823096865 -- conda: https://conda.anaconda.org/conda-forge/win-64/python_abi-3.11-5_cp311.conda - build_number: 5 - sha256: 9b210e5807dd9c9ed71ff192a95f1872da597ddd10e7cefec93a922fe22e598a - md5: 895b873644c11ccc0ab7dba2d8513ae6 + size: 6983 + timestamp: 1743483247301 +- conda: https://conda.anaconda.org/conda-forge/win-64/python_abi-3.11-6_cp311.conda + build_number: 6 + sha256: 82b09808cc4f80212be7539d542d5853e0aaa593bc715f02b831c0ea0552b8bf + md5: 0cdb3079c532b4d216bc9efacd510138 constrains: - python 3.11.* *_cpython arch: x86_64 @@ -4740,8 +4748,8 @@ packages: license: BSD-3-Clause license_family: BSD purls: [] - size: 6707 - timestamp: 1723823225752 + size: 7348 + timestamp: 1743483205320 - pypi: https://files.pythonhosted.org/packages/81/c4/34e93fe5f5429d7570ec1fa436f1986fb1f00c3e0f43a589fe2bbcd22c3f/pytz-2025.2-py2.py3-none-any.whl name: pytz version: '2025.2' @@ -4912,10 +4920,10 @@ packages: purls: [] size: 177240 timestamp: 1728886815751 -- pypi: https://files.pythonhosted.org/packages/19/71/39c7c0d87f8d4e6c020a393182060eaefeeae6c01dab6a84ec346f2567df/rich-13.9.4-py3-none-any.whl +- pypi: https://files.pythonhosted.org/packages/0d/9b/63f4c7ebc259242c89b3acafdb37b41d1185c07ff0011164674e9076b491/rich-14.0.0-py3-none-any.whl name: rich - version: 13.9.4 - sha256: 6049d5e6ec054bf2779ab3358186963bac2ea89175919d699e378b99738c2a90 + version: 14.0.0 + sha256: 1c9491e1951aac09caffd42f448ee3d04e58923ffe14993f6e83068dc395d7e0 requires_dist: - ipywidgets>=7.5.1,<9 ; extra == 'jupyter' - markdown-it-py>=2.2.0 diff --git a/scripts/gradio_demo.py b/scripts/gradio_demo.py index 36c61804..77c3fb8c 100644 --- a/scripts/gradio_demo.py +++ b/scripts/gradio_demo.py @@ -1,11 +1,14 @@ import colorsys import os +import argparse +import time import gradio as gr import numpy as np import onnxruntime as ort import pandas as pd from PIL import Image, ImageDraw +import cv2 # Use absolute paths instead of relative paths @@ -13,20 +16,6 @@ MODEL_PATH = os.path.join(BASE_DIR, "models/deim-blood-cell-detection_nano.onnx") CLASS_NAMES_PATH = os.path.join(BASE_DIR, "models/classes.txt") -def resize_with_aspect_ratio(image, size, interpolation=Image.BILINEAR): - """Resizes an image while maintaining aspect ratio and pads it.""" - original_width, original_height = image.size - ratio = min(size / original_width, size / original_height) - new_width = int(original_width * ratio) - new_height = int(original_height * ratio) - image = image.resize((new_width, new_height), interpolation) - - # Create a new image with the desired size and paste the resized image onto it - new_image = Image.new("RGB", (size, size)) - new_image.paste(image, ((size - new_width) // 2, (size - new_height) // 2)) - return new_image, ratio, (size - new_width) // 2, (size - new_height) // 2 - - def generate_colors(num_classes): """Generate a list of distinct colors for different classes.""" # Generate evenly spaced hues @@ -42,76 +31,78 @@ def generate_colors(num_classes): return colors -def draw(images, labels, boxes, scores, ratios, paddings, thrh=0.4, class_names=None): +def draw(images, labels, boxes, scores, scales, paddings, thrh=0.4, class_names=None): + """Draw detection boxes on images.""" result_images = [] - - # Generate colors for classes - num_classes = ( - len(class_names) if class_names else 91 - ) # Use length of class_names if available, otherwise default to COCO's 91 classes - colors = generate_colors(num_classes) + colors = generate_colors(len(class_names) if class_names else 91) for i, im in enumerate(images): - draw = ImageDraw.Draw(im) - scr = scores[i] - - # Get indices of scores above threshold - valid_indices = np.where(scr > thrh)[0] - - # Filter using these indices + # Convert PIL to numpy if needed + if isinstance(im, Image.Image): + im = np.array(im) + + # Filter detections by threshold + valid_indices = scores[i] > thrh valid_labels = labels[i][valid_indices] valid_boxes = boxes[i][valid_indices] - valid_scores = scr[valid_indices] - - for j, (lbl, bb, score) in enumerate(zip(valid_labels, valid_boxes, valid_scores)): - # Get color for this class - class_idx = int(lbl) - color = colors[class_idx % len(colors)] + valid_scores = scores[i][valid_indices] - # Convert RGB to hex for PIL - hex_color = "#{:02x}{:02x}{:02x}".format(*color) + # Scale boxes from padded size to original image size + scale = scales[i] + x_offset, y_offset = paddings[i] + + valid_boxes[:, [0, 2]] = (valid_boxes[:, [0, 2]] - x_offset) / scale # x coordinates + valid_boxes[:, [1, 3]] = (valid_boxes[:, [1, 3]] - y_offset) / scale # y coordinates - ratio = ratios[i] - pad_w, pad_h = paddings[i] + # Draw boxes + for label, box, score in zip(valid_labels, valid_boxes, valid_scores): + class_idx = int(label) + color = colors[class_idx % len(colors)] - # Adjust bounding boxes according to the resizing and padding - bb = [ - (bb[0] - pad_w) / ratio, - (bb[1] - pad_h) / ratio, - (bb[2] - pad_w) / ratio, - (bb[3] - pad_h) / ratio, - ] - - # Draw rectangle with class-specific color - draw.rectangle(bb, outline=hex_color, width=3) - - # Use class name if available, otherwise use class index + # Convert coordinates to integers + box = [int(coord) for coord in box] + + # Draw rectangle + cv2.rectangle(im, (box[0], box[1]), (box[2], box[3]), color, 2) + + # Prepare label text if class_names and class_idx < len(class_names): label_text = f"{class_names[class_idx]} {score:.2f}" else: label_text = f"Class {class_idx} {score:.2f}" + # Get text size + (text_width, text_height), _ = cv2.getTextSize( + label_text, cv2.FONT_HERSHEY_SIMPLEX, 0.7, 2 + ) + # Draw text background - text_size = draw.textbbox((0, 0), label_text, font=None) - text_width = text_size[2] - text_size[0] - text_height = text_size[3] - text_size[1] - - # Draw text background rectangle - draw.rectangle( - [bb[0], bb[1] - text_height - 4, bb[0] + text_width + 4, bb[1]], - fill=hex_color, + cv2.rectangle( + im, + (box[0], box[1] - text_height - 4), + (box[0] + text_width + 4, box[1]), + color, + -1, ) - # Draw text in white or black depending on color brightness - brightness = (color[0] * 299 + color[1] * 587 + color[2] * 114) / 1000 - text_color = "black" if brightness > 128 else "white" + # Calculate text color based on background brightness + brightness = sum(color) / 3 + text_color = (0, 0, 0) if brightness > 128 else (255, 255, 255) # Draw text - draw.text( - (bb[0] + 2, bb[1] - text_height - 2), text=label_text, fill=text_color + cv2.putText( + im, + label_text, + (box[0] + 2, box[1] - 2), + cv2.FONT_HERSHEY_SIMPLEX, + 0.7, + text_color, + 2, ) - result_images.append(im) + # Convert back to PIL Image + result_images.append(Image.fromarray(im)) + return result_images @@ -186,58 +177,67 @@ def load_class_names(class_names_path): def prepare_image(image, target_size=640): """ - Prepare image for inference by converting to PIL and resizing. + Prepare image for inference by converting to PIL and resizing with padding. Args: image: Input image (PIL or numpy array) target_size: Target size for resizing (default: 640) Returns: - tuple: (resized_image, original_image, ratio, padding) + tuple: (model_input, original_image, scale, padding) """ - # Convert to PIL image if needed - if not isinstance(image, Image.Image): - image = Image.fromarray(image).convert("RGB") + # Convert to numpy array if PIL Image + if isinstance(image, Image.Image): + image = np.array(image) + + # Calculate scaling and padding + height, width = image.shape[:2] + scale = target_size / max(height, width) + new_height = int(height * scale) + new_width = int(width * scale) + + # Calculate padding + y_offset = (target_size - new_height) // 2 + x_offset = (target_size - new_width) // 2 - # Resize image while preserving aspect ratio - resized_image, ratio, pad_w, pad_h = resize_with_aspect_ratio(image, target_size) + # Create model input with padding + model_input = np.zeros((target_size, target_size, 3), dtype=np.uint8) + model_input[y_offset:y_offset + new_height, x_offset:x_offset + new_width] = cv2.resize(image, (new_width, new_height)) - return resized_image, image, ratio, (pad_w, pad_h) + return model_input, image, scale, (x_offset, y_offset) -def run_inference(session, image): +def run_inference(session, image, target_size=640): """ Run inference on the prepared image. Args: session: ONNX runtime session - image: Prepared PIL image + image: Prepared image array + target_size: Target size used for padding Returns: tuple: (labels, boxes, scores) """ - # Check if image is None - if image is None: - raise ValueError("Input image is None") - - # Get original image dimensions - orig_height, orig_width = image.size[1], image.size[0] - # Convert to int64 as expected by the model - orig_size = np.array([[orig_height, orig_width]], dtype=np.int64) - - # Convert PIL image to numpy array and normalize to 0-1 range - im_data = np.array(image, dtype=np.float32) / 255.0 - # Transpose from HWC to CHW format - im_data = im_data.transpose(2, 0, 1) - # Add batch dimension - im_data = np.expand_dims(im_data, axis=0) - - output = session.run( + # Convert BGR to RGB for model input + image_rgb = cv2.cvtColor(image, cv2.COLOR_BGR2RGB) + + # Prepare input data + im_data = np.ascontiguousarray( + image_rgb.transpose(2, 0, 1), # HWC to CHW format + dtype=np.float32, + ) + im_data = np.expand_dims(im_data, axis=0) # Add batch dimension + orig_size = np.array([[target_size, target_size]], dtype=np.int64) # Use padded size + + # Get input name and run inference + input_name = session.get_inputs()[0].name + outputs = session.run( output_names=None, - input_feed={"images": im_data, "orig_target_sizes": orig_size}, + input_feed={input_name: im_data, "orig_target_sizes": orig_size}, ) - return output # labels, boxes, scores + return outputs def count_objects(labels, scores, confidence_threshold, class_names): @@ -310,69 +310,63 @@ def create_bar_data(object_counts): def predict(image, model_path, class_names_path, confidence_threshold, image_size): - """ - Main prediction function that orchestrates the detection pipeline. - - Args: - image: Input image - model_path: Path to ONNX model - class_names_path: Path to class names file or list of class names - confidence_threshold: Detection confidence threshold - image_size: Size to resize the image to before inference - - Returns: - tuple: (result_image, status_message, bar_data) - """ - # Check if image is None + """Main prediction function.""" if image is None: return None, "Error: No image provided", None - + # Load model + model_load_start = time.time() session, error = load_model(model_path) + model_load_time = time.time() - model_load_start + if error: return None, error, None # Load class names class_names = load_class_names(class_names_path) - - # Debug print to verify class names are loaded correctly - print(f"Class names for detection: {class_names}") try: - # Prepare image with the selected size - resized_image, original_image, ratio, padding = prepare_image(image, image_size) + # Prepare image + preprocess_start = time.time() + model_input, original_image, scale, padding = prepare_image(image, image_size) + preprocess_time = time.time() - preprocess_start # Run inference - output = run_inference(session, resized_image) + inference_start = time.time() + outputs = run_inference(session, model_input, image_size) + inference_time = time.time() - inference_start - # Check if output is valid - if not output or len(output) < 3: + if not outputs or len(outputs) < 3: return None, "Error: Model output is invalid", None - labels, boxes, scores = output + labels, boxes, scores = outputs - # Draw detections on the original image + # Draw detections + postprocess_start = time.time() result_images = draw( [original_image], labels, boxes, scores, - [ratio], + [scale], [padding], thrh=confidence_threshold, class_names=class_names, ) - # Count objects by class + # Count objects and create visualizations object_counts = count_objects(labels, scores, confidence_threshold, class_names) - - # Debug print to verify object counts - print(f"Object counts: {object_counts}") + postprocess_time = time.time() - postprocess_start - # Create status message + # Create status message with timing information status_message = create_status_message(object_counts) - - # Create bar plot data + status_message += "\n\nLatency Information:" + status_message += f"\n- Model Loading: {model_load_time*1000:.1f}ms" + status_message += f"\n- Preprocessing: {preprocess_time*1000:.1f}ms" + status_message += f"\n- Inference: {inference_time*1000:.1f}ms" + status_message += f"\n- Postprocessing: {postprocess_time*1000:.1f}ms" + status_message += f"\n- Total Time: {(model_load_time + preprocess_time + inference_time + postprocess_time)*1000:.1f}ms" + bar_data = create_bar_data(object_counts) return result_images[0], status_message, bar_data @@ -400,7 +394,7 @@ def build_interface(model_path, class_names_path, example_images=None): gr.Markdown("Configure the model and run inference on an image.") # Add model selection - with gr.Accordion("Model Settings", open=True): + with gr.Accordion("Model Settings", open=False): with gr.Row(): custom_model_path = gr.File( label="Custom Model File (ONNX)", @@ -426,11 +420,13 @@ def build_interface(model_path, class_names_path, example_images=None): label="Confidence Threshold", ) - image_size = gr.Dropdown( - choices=[320, 480, 640, 800], + image_size = gr.Slider( + minimum=32, + maximum=1920, value=640, + step=32, label="Image Size", - info="Select image size for inference" + info="Select image size for inference (larger = slower but potentially more accurate)" ) submit_btn = gr.Button("Run Inference", variant="primary") @@ -495,12 +491,38 @@ def get_classes_path(custom_file, default_path): return demo +def parse_args(): + """Parse command line arguments.""" + parser = argparse.ArgumentParser(description='DEIMKit Detection Demo') + parser.add_argument( + '--model', + type=str, + default=MODEL_PATH, + help='Path to ONNX model file' + ) + parser.add_argument( + '--classes', + type=str, + default=CLASS_NAMES_PATH, + help='Path to class names file' + ) + parser.add_argument( + '--examples', + type=str, + default=os.path.join(BASE_DIR, "examples"), + help='Path to directory containing example images' + ) + return parser.parse_args() + + def launch_demo(): """ - Launch the Gradio demo with hardcoded model and class names paths. + Launch the Gradio demo with model and class names paths from command line arguments. """ + args = parse_args() + # Create examples directory if it doesn't exist - examples_dir = os.path.join(BASE_DIR, "examples") + examples_dir = args.examples if not os.path.exists(examples_dir): os.makedirs(examples_dir) print(f"Created examples directory at {examples_dir}") @@ -513,9 +535,9 @@ def launch_demo(): for f in os.listdir(examples_dir) if f.lower().endswith(('.png', '.jpg', '.jpeg')) ] - print(f"Found {len(example_images)} example images") + print(f"Found {len(example_images)} example images in {examples_dir}") - demo = build_interface(MODEL_PATH, CLASS_NAMES_PATH, example_images) + demo = build_interface(args.model, args.classes, example_images) # Launch the demo without the examples parameter demo.launch(share=False, inbrowser=True) # Set share=True if you want to create a shareable link diff --git a/scripts/live_inference.py b/scripts/live_inference.py index f29c5477..3070b4f6 100644 --- a/scripts/live_inference.py +++ b/scripts/live_inference.py @@ -4,281 +4,396 @@ import cv2 import numpy as np import onnxruntime as ort -import torch # Do not remove this lf you are using the CUDA or TensortEP. Weird bug - https://github.com/microsoft/onnxruntime/issues/11092 -from PIL import Image, ImageDraw -from tqdm import tqdm - -def resize_with_aspect_ratio(image, size, interpolation=Image.BILINEAR): - """Resizes an image while maintaining aspect ratio and pads it.""" - original_width, original_height = image.size - ratio = min(size / original_width, size / original_height) - new_width = int(original_width * ratio) - new_height = int(original_height * ratio) - image = image.resize((new_width, new_height), interpolation) - - # Create a new image with the desired size and paste the resized image onto it - new_image = Image.new("RGB", (size, size)) - new_image.paste(image, ((size - new_width) // 2, (size - new_height) // 2)) - return new_image, ratio, (size - new_width) // 2, (size - new_height) // 2 +ort.preload_dlls() def generate_colors(num_classes): - """Generate a list of distinct colors for different classes.""" - # Generate evenly spaced hues + """Generate distinct colors for visualization.""" hsv_tuples = [(x / num_classes, 0.8, 0.9) for x in range(num_classes)] - - # Convert to RGB colors = [] for hsv in hsv_tuples: rgb = colorsys.hsv_to_rgb(*hsv) - # Convert to 0-255 range and to tuple colors.append(tuple(int(255 * x) for x in rgb)) - return colors -def draw(images, labels, boxes, scores, ratios, paddings, thrh=0.4, class_names=None): - result_images = [] - +def draw_boxes( + image, labels, boxes, scores, ratio, padding, threshold=0.3, class_names=None +): + """Draw bounding boxes on the image.""" # Generate colors for classes - num_classes = ( - len(class_names) if class_names else 91 - ) # Use length of class_names if available, otherwise default to COCO's 91 classes + num_classes = len(class_names) if class_names else 91 colors = generate_colors(num_classes) - for i, im in enumerate(images): - draw = ImageDraw.Draw(im) - scr = scores[i] - lab = labels[i][scr > thrh] - box = boxes[i][scr > thrh] - scr = scr[scr > thrh] - - ratio = ratios[i] - pad_w, pad_h = paddings[i] - - for lbl, bb in zip(lab, box): - # Get color for this class - class_idx = int(lbl) - color = colors[class_idx % len(colors)] - - # Convert RGB to hex for PIL - hex_color = "#{:02x}{:02x}{:02x}".format(*color) - - # Adjust bounding boxes according to the resizing and padding - bb = [ - (bb[0] - pad_w) / ratio, - (bb[1] - pad_h) / ratio, - (bb[2] - pad_w) / ratio, - (bb[3] - pad_h) / ratio, - ] - - # Draw rectangle with class-specific color - draw.rectangle(bb, outline=hex_color, width=3) - - # Use class name if available, otherwise use class index - if class_names and class_idx < len(class_names): - label_text = f"{class_names[class_idx]} {scr[lab == lbl][0]:.2f}" - else: - label_text = f"Class {class_idx} {scr[lab == lbl][0]:.2f}" - - # Draw text background - text_size = draw.textbbox((0, 0), label_text, font=None) - text_width = text_size[2] - text_size[0] - text_height = text_size[3] - text_size[1] - - # Draw text background rectangle - draw.rectangle( - [bb[0], bb[1] - text_height - 4, bb[0] + text_width + 4, bb[1]], - fill=hex_color, - ) + # Filter detections by threshold + valid_indices = scores > threshold + labels = labels[valid_indices] + boxes = boxes[valid_indices] + scores = scores[valid_indices] + + for j, (lbl, box, score) in enumerate(zip(labels, boxes, scores)): + # Get color for this class + class_idx = int(lbl) + color = colors[class_idx % len(colors)] + + # Use box coordinates directly + box_coords = [ + int(box[0]), # x1 + int(box[1]), # y1 + int(box[2]), # x2 + int(box[3]), # y2 + ] - # Draw text in white or black depending on color brightness - brightness = (color[0] * 299 + color[1] * 587 + color[2] * 114) / 1000 - text_color = "black" if brightness > 128 else "white" + # Draw rectangle + cv2.rectangle( + image, + (box_coords[0], box_coords[1]), + (box_coords[2], box_coords[3]), + color, + 2, + ) - # Draw text - draw.text( - (bb[0] + 2, bb[1] - text_height - 2), text=label_text, fill=text_color - ) + # Prepare label text + if class_names and class_idx < len(class_names): + label_text = f"{class_names[class_idx]} {score:.2f}" + else: + label_text = f"Class {class_idx} {score:.2f}" - result_images.append(im) - return result_images + # Get text size + (text_width, text_height), _ = cv2.getTextSize( + label_text, cv2.FONT_HERSHEY_SIMPLEX, 0.7, 2 + ) + # Draw text background + cv2.rectangle( + image, + (box_coords[0], box_coords[1] - text_height - 4), + (box_coords[0] + text_width + 4, box_coords[1]), + color, + -1, # Filled rectangle + ) -def process_image(sess, im_pil, class_names=None, input_size=640): - # Resize image while preserving aspect ratio - resized_im_pil, ratio, pad_w, pad_h = resize_with_aspect_ratio(im_pil, input_size) - orig_size = np.array( - [[resized_im_pil.size[1], resized_im_pil.size[0]]], dtype=np.int64 - ) + # Calculate text color based on background brightness + brightness = (color[0] * 299 + color[1] * 587 + color[2] * 114) / 1000 + text_color = (0, 0, 0) if brightness > 128 else (255, 255, 255) - # Convert PIL image to numpy array and normalize to 0-1 range - im_data = np.array(resized_im_pil, dtype=np.float32) / 255.0 + # Draw text + cv2.putText( + image, + label_text, + (box_coords[0] + 2, box_coords[1] - 2), + cv2.FONT_HERSHEY_SIMPLEX, + 0.7, + text_color, + 2, + ) - # Transpose from HWC to CHW format (height, width, channels) -> (channels, height, width) - im_data = im_data.transpose(2, 0, 1) + return image - # Add batch dimension - im_data = np.expand_dims(im_data, axis=0) - output = sess.run( - output_names=None, - input_feed={"images": im_data, "orig_target_sizes": orig_size}, - ) +def run_inference( + model_path, image_path, class_names_path=None, threshold=0.3, provider="cpu" +): + # Set up providers based on selection + if provider == "cpu": + providers = ["CPUExecutionProvider"] + elif provider == "cuda": + providers = [ + ( + "CUDAExecutionProvider", + { + "arena_extend_strategy": "kNextPowerOfTwo", + "gpu_mem_limit": 2 * 1024 * 1024 * 1024, + "cudnn_conv_algo_search": "EXHAUSTIVE", + "do_copy_in_default_stream": True, + }, + ), + "CPUExecutionProvider", + ] + elif provider == "tensorrt": + providers = [ + ( + "TensorrtExecutionProvider", + { + "trt_fp16_enable": True, + "trt_engine_cache_enable": True, + "trt_engine_cache_path": "./trt_cache", + "trt_timing_cache_enable": True, + }, + ), + "CPUExecutionProvider", + ] - labels, boxes, scores = output + try: + print(f"Loading ONNX model with providers: {providers}...") + session = ort.InferenceSession(model_path, providers=providers) + print(f"Using provider: {session.get_providers()[0]}") + except Exception as e: + print(f"Error creating inference session with providers {providers}: {e}") + print("Attempting to fall back to CPU execution...") + session = ort.InferenceSession(model_path, providers=["CPUExecutionProvider"]) - result_images = draw( - [im_pil], - labels, - boxes, - scores, - [ratio], - [(pad_w, pad_h)], - class_names=class_names, - ) - filename = "onnx_result.jpg" - result_images[0].save(filename) - print(f"Image processing complete. Result saved as '{filename}'.") + # Load class names if provided + class_names = None + if class_names_path: + try: + with open(class_names_path, "r") as f: + class_names = [line.strip() for line in f.readlines()] + print(f"Loaded {len(class_names)} class names") + except Exception as e: + print(f"Error loading class names: {e}") - image = cv2.imread(filename) - cv2.imshow("Image", image) - cv2.waitKey(0) - cv2.destroyAllWindows() + # Load image + image = cv2.imread(image_path) # Load as BGR + image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB) # Convert BGR to RGB + original_image = image.copy() + im_data = np.ascontiguousarray( + image.transpose(2, 0, 1), # HWC to CHW format + dtype=np.float32, + ) + im_data = np.expand_dims(im_data, axis=0) # Add batch dimension + orig_size = np.array([[image.shape[0], image.shape[1]]], dtype=np.int64) -def process_video(sess, video_path, class_names=None, input_size=640): - cap = cv2.VideoCapture(video_path) + print(f"Image frame shape: {image.shape}") + print(f"Processed input shape: {im_data.shape}") - # Get video properties - fps = cap.get(cv2.CAP_PROP_FPS) - orig_w = int(cap.get(cv2.CAP_PROP_FRAME_WIDTH)) - orig_h = int(cap.get(cv2.CAP_PROP_FRAME_HEIGHT)) - total_frames = int(cap.get(cv2.CAP_PROP_FRAME_COUNT)) + # Get input name from model metadata + input_name = session.get_inputs()[0].name - # Define the codec and create VideoWriter object - fourcc = cv2.VideoWriter_fourcc(*"mp4v") - out = cv2.VideoWriter("onnx_result.mp4", fourcc, fps, (orig_w, orig_h)) + # Run inference + outputs = session.run( + output_names=None, + input_feed={input_name: im_data, "orig_target_sizes": orig_size}, + ) - print("Processing video frames...") - progress_bar = tqdm(total=total_frames, desc="Processing frames", unit="frames") + # Process outputs + labels, boxes, scores = outputs - # Create a simple window for displaying the video - window_name = "Video Detection" - cv2.namedWindow(window_name, cv2.WINDOW_NORMAL) - cv2.resizeWindow(window_name, min(orig_w, 1280), min(orig_h, 720)) + # print(outputs) - # Variables for FPS calculation - prev_time = time.time() - curr_time = 0 - fps_display = 0 + # Draw bounding boxes on the image + result_image = draw_boxes( + original_image, + labels[0], + boxes[0], + scores[0], + 1.0, # No ratio needed since we're not resizing + (0, 0), # No padding needed + threshold=threshold, + class_names=class_names, + ) - # Add provider display flag and get actual provider name - show_provider = True - provider = sess.get_providers()[0] # Get the first active provider + # Save and show result + output_path = "detection_result.jpg" + result_bgr = cv2.cvtColor( + result_image, cv2.COLOR_RGB2BGR + ) # Convert back to BGR for OpenCV + cv2.imwrite(output_path, result_bgr) + print(f"Detection complete. Result saved to {output_path}") - while cap.isOpened(): - ret, frame = cap.read() - if not ret: - break + # Display the result + cv2.imshow("Detection Result", result_bgr) + cv2.waitKey(0) + cv2.destroyAllWindows() - # Calculate FPS - curr_time = time.time() - if curr_time - prev_time > 0: # Avoid division by zero - fps_display = 1 / (curr_time - prev_time) - prev_time = curr_time + return result_image - # Convert frame to PIL image - frame_pil = Image.fromarray(cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)) - # Resize frame while preserving aspect ratio - resized_frame_pil, ratio, pad_w, pad_h = resize_with_aspect_ratio( - frame_pil, input_size - ) - orig_size = np.array( - [[resized_frame_pil.size[1], resized_frame_pil.size[0]]], dtype=np.int64 - ) +def run_inference_webcam( + model_path, class_names_path=None, provider="cpu", threshold=0.3, video_width=640 +): + """Run real-time object detection on webcam feed.""" + # Set up providers based on selection + if provider == "cpu": + providers = ["CPUExecutionProvider"] + elif provider == "cuda": + providers = [ + ( + "CUDAExecutionProvider", + { + "arena_extend_strategy": "kNextPowerOfTwo", + "gpu_mem_limit": 2 * 1024 * 1024 * 1024, + "cudnn_conv_algo_search": "EXHAUSTIVE", + "do_copy_in_default_stream": True, + }, + ), + "CPUExecutionProvider", + ] + elif provider == "tensorrt": + providers = [ + ( + "TensorrtExecutionProvider", + { + "trt_fp16_enable": False, + "trt_engine_cache_enable": True, + "trt_engine_cache_path": "./trt_cache", + "trt_timing_cache_enable": True, + }, + ), + "CPUExecutionProvider", + ] - # Convert PIL image to numpy array and normalize to 0-1 range - im_data = np.array(resized_frame_pil, dtype=np.float32) / 255.0 + try: + print(f"Loading ONNX model with providers: {providers}...") + session = ort.InferenceSession(model_path, providers=providers) + print(f"Using provider: {session.get_providers()[0]}") + except Exception as e: + print(f"Error creating inference session with providers {providers}: {e}") + print("Attempting to fall back to CPU execution...") + session = ort.InferenceSession(model_path, providers=["CPUExecutionProvider"]) - # Transpose from HWC to CHW format (height, width, channels) -> (channels, height, width) - im_data = im_data.transpose(2, 0, 1) + # Update FPS calculation variables + prev_time = time.time() + fps_display = 0 - # Add batch dimension - im_data = np.expand_dims(im_data, axis=0) + # Load class names if provided + class_names = None + if class_names_path: + try: + with open(class_names_path, "r") as f: + class_names = [line.strip() for line in f.readlines()] + print(f"Loaded {len(class_names)} class names") + except Exception as e: + print(f"Error loading class names: {e}") - output = sess.run( - output_names=None, - input_feed={"images": im_data, "orig_target_sizes": orig_size}, - ) + # Initialize webcam + cap = cv2.VideoCapture(0) + if not cap.isOpened(): + raise RuntimeError("Failed to open webcam") + + # Set camera to maximum possible FPS + cap.set( + cv2.CAP_PROP_FOURCC, cv2.VideoWriter_fourcc("M", "J", "P", "G") + ) # Use MJPG format for higher FPS + cap.set( + cv2.CAP_PROP_FPS, 1000 + ) # Request very high FPS - will default to max supported + cap.set(cv2.CAP_PROP_FRAME_WIDTH, video_width) + cap.set(cv2.CAP_PROP_FRAME_HEIGHT, int(video_width * 9 / 16)) # 16:9 aspect ratio + + # Print actual camera properties + actual_fps = cap.get(cv2.CAP_PROP_FPS) + actual_width = cap.get(cv2.CAP_PROP_FRAME_WIDTH) + actual_height = cap.get(cv2.CAP_PROP_FRAME_HEIGHT) + print( + f"Camera settings - FPS: {actual_fps}, Resolution: {actual_width}x{actual_height}" + ) - labels, boxes, scores = output - - # Draw detections on the original frame - result_images = draw( - [frame_pil], - labels, - boxes, - scores, - [ratio], - [(pad_w, pad_h)], - class_names=class_names, - ) - frame_with_detections = result_images[0] + try: + while True: + ret, frame = cap.read() + if not ret: + print("Failed to grab frame") + break + + # Calculate FPS + current_time = time.time() + if current_time - prev_time > 0: # Avoid division by zero + fps_display = 1 / (current_time - prev_time) + prev_time = current_time + + # Calculate scaling and padding + height, width = frame.shape[:2] + scale = 640.0 / max(height, width) + new_height = int(height * scale) + new_width = int(width * scale) + + # Calculate padding + y_offset = (640 - new_height) // 2 + x_offset = (640 - new_width) // 2 + + # Create model input with padding + model_input = np.zeros((640, 640, 3), dtype=np.uint8) + model_input[ + y_offset : y_offset + new_height, x_offset : x_offset + new_width + ] = cv2.resize(frame, (new_width, new_height)) + + # Convert BGR to RGB for model input + image = cv2.cvtColor(model_input, cv2.COLOR_BGR2RGB) + + # Prepare input data + im_data = np.ascontiguousarray( + image.transpose(2, 0, 1), + dtype=np.float32, + ) + im_data = np.expand_dims(im_data, axis=0) + orig_size = np.array([[640, 640]], dtype=np.int64) # Use padded size + + # Get input name and run inference + input_name = session.get_inputs()[0].name + outputs = session.run( + output_names=None, + input_feed={input_name: im_data, "orig_target_sizes": orig_size}, + ) - # Convert back to OpenCV image - display_frame = cv2.cvtColor(np.array(frame_with_detections), cv2.COLOR_RGB2BGR) + # Process outputs + labels, boxes, scores = outputs + + # Scale boxes from padded 640x640 to original frame size + boxes = boxes[0] # Remove batch dimension + boxes[:, [0, 2]] = (boxes[:, [0, 2]] - x_offset) / scale # x coordinates + boxes[:, [1, 3]] = (boxes[:, [1, 3]] - y_offset) / scale # y coordinates + + # Draw bounding boxes on the original frame + result_image = draw_boxes( + frame, # Use original frame + labels[0], + boxes, + scores[0], + 1.0, # No additional scaling needed + (0, 0), # No additional padding needed + threshold=threshold, + class_names=class_names, + ) - # Add FPS text to the top right corner with dark blue background - fps_text = f"FPS: {fps_display:.1f}" - text_size = cv2.getTextSize(fps_text, cv2.FONT_HERSHEY_SIMPLEX, 0.8, 2)[0] - text_x = display_frame.shape[1] - text_size[0] - 10 - text_y = 30 + # No need to convert back to BGR since we're using the original frame + result_bgr = result_image - # Draw background rectangle - cv2.rectangle( - display_frame, - (text_x - 5, text_y - text_size[1] - 5), - (text_x + text_size[0] + 5, text_y + 5), - (139, 0, 0), - -1, - ) # Dark blue background (BGR format) - - # Draw text in white - cv2.putText( - display_frame, - fps_text, - (text_x, text_y), - cv2.FONT_HERSHEY_SIMPLEX, - 0.8, - (255, 255, 255), - 2, - ) + # Add video width display at top left with dark green background + width_text = f"Width: {int(actual_width)}px" + text_size = cv2.getTextSize(width_text, cv2.FONT_HERSHEY_SIMPLEX, 0.8, 2)[0] + + # Draw dark green background rectangle + cv2.rectangle( + result_bgr, + (5, 5), # Slight padding from corner + (text_size[0] + 15, 35), # Add padding around text + (0, 100, 0), # Dark green in BGR + -1, # Filled rectangle + ) - # Add provider text at the bottom center when show_provider is True - if show_provider: - provider_text = f"Provider: {provider}" - text_size = cv2.getTextSize( - provider_text, cv2.FONT_HERSHEY_SIMPLEX, 0.8, 2 - )[0] - text_x = (display_frame.shape[1] - text_size[0]) // 2 - text_y = display_frame.shape[0] - 20 + # Draw text + cv2.putText( + result_bgr, + width_text, + (10, 30), + cv2.FONT_HERSHEY_SIMPLEX, + 0.8, + (255, 255, 255), # White text + 2, + ) + + # Add FPS display (existing code) + fps_text = f"FPS: {fps_display:.1f}" + text_size = cv2.getTextSize(fps_text, cv2.FONT_HERSHEY_SIMPLEX, 0.8, 2)[0] + text_x = result_bgr.shape[1] - text_size[0] - 10 + text_y = 30 - # Draw background rectangle + # Draw FPS background rectangle cv2.rectangle( - display_frame, + result_bgr, (text_x - 5, text_y - text_size[1] - 5), (text_x + text_size[0] + 5, text_y + 5), (139, 0, 0), -1, - ) # Dark blue background + ) - # Draw text in white + # Draw FPS text cv2.putText( - display_frame, - provider_text, + result_bgr, + fps_text, (text_x, text_y), cv2.FONT_HERSHEY_SIMPLEX, 0.8, @@ -286,148 +401,26 @@ def process_video(sess, video_path, class_names=None, input_size=640): 2, ) - # Display the frame in a clean window - cv2.imshow(window_name, display_frame) - - # Write the frame to output video - out.write(display_frame) - - # Update progress bar - progress_bar.update(1) - - # Toggle provider display on 'p' key press - key = cv2.waitKey(1) & 0xFF - if key == ord("q"): - print("\nProcessing interrupted by user") - break - elif key == ord("p"): - show_provider = not show_provider - - progress_bar.close() - cap.release() - out.release() - cv2.destroyAllWindows() - print("Video processing complete. Result saved as 'onnx_result.mp4'.") - - -def process_webcam(sess, device_id=0, class_names=None, input_size=640): - cap = cv2.VideoCapture(device_id) - - if not cap.isOpened(): - print(f"Error: Could not open webcam device {device_id}") - return - - print(f"Webcam opened successfully. Press 'q' to quit.") - - # Variables for FPS calculation - prev_time = 0 - curr_time = 0 - fps = 0 - - show_provider = True - provider = sess.get_providers()[0] - - while True: - # Calculate FPS - curr_time = time.time() - if curr_time - prev_time > 0: # Avoid division by zero - fps = 1 / (curr_time - prev_time) - prev_time = curr_time - - ret, frame = cap.read() - if not ret: - print("Error: Failed to capture frame from webcam") - break - - # Convert frame to PIL image - frame_pil = Image.fromarray(cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)) - - # Resize frame while preserving aspect ratio - resized_frame_pil, ratio, pad_w, pad_h = resize_with_aspect_ratio( - frame_pil, input_size - ) - orig_size = np.array( - [[resized_frame_pil.size[1], resized_frame_pil.size[0]]], dtype=np.int64 - ) - - # Convert PIL image to numpy array and normalize to 0-1 range - im_data = np.array(resized_frame_pil, dtype=np.float32) / 255.0 - - # Transpose from HWC to CHW format (height, width, channels) -> (channels, height, width) - im_data = im_data.transpose(2, 0, 1) - - # Add batch dimension - im_data = np.expand_dims(im_data, axis=0) - - output = sess.run( - output_names=None, - input_feed={"images": im_data, "orig_target_sizes": orig_size}, - ) - - labels, boxes, scores = output - - # Draw detections on the original frame - result_images = draw( - [frame_pil], - labels, - boxes, - scores, - [ratio], - [(pad_w, pad_h)], - class_names=class_names, - ) - frame_with_detections = result_images[0] - - # Convert back to OpenCV image for display - display_frame = cv2.cvtColor(np.array(frame_with_detections), cv2.COLOR_RGB2BGR) - - # Add FPS text to the top right corner with dark blue background - fps_text = f"FPS: {fps:.1f}" - text_size = cv2.getTextSize(fps_text, cv2.FONT_HERSHEY_SIMPLEX, 0.8, 2)[0] - text_x = display_frame.shape[1] - text_size[0] - 10 - text_y = 30 - - # Draw background rectangle - cv2.rectangle( - display_frame, - (text_x - 5, text_y - text_size[1] - 5), - (text_x + text_size[0] + 5, text_y + 5), - (139, 0, 0), - -1, - ) # Dark blue background (BGR format) - - # Draw text in white - cv2.putText( - display_frame, - fps_text, - (text_x, text_y), - cv2.FONT_HERSHEY_SIMPLEX, - 0.8, - (255, 255, 255), - 2, - ) - - # Add provider text at the bottom center when show_provider is True - if show_provider: - provider_text = f"ONNX Runtime EP: {provider}" + # Add provider display + provider_text = f"Provider: {session.get_providers()[0]}" text_size = cv2.getTextSize( provider_text, cv2.FONT_HERSHEY_SIMPLEX, 0.8, 2 )[0] - text_x = (display_frame.shape[1] - text_size[0]) // 2 - text_y = display_frame.shape[0] - 20 + text_x = (result_bgr.shape[1] - text_size[0]) // 2 + text_y = result_bgr.shape[0] - 20 - # Draw background rectangle + # Draw provider background rectangle cv2.rectangle( - display_frame, + result_bgr, (text_x - 5, text_y - text_size[1] - 5), (text_x + text_size[0] + 5, text_y + 5), - (0, 0, 255), + (0, 0, 139), -1, - ) # Red background + ) - # Draw text in white + # Draw provider text cv2.putText( - display_frame, + result_bgr, provider_text, (text_x, text_y), cv2.FONT_HERSHEY_SIMPLEX, @@ -436,25 +429,32 @@ def process_webcam(sess, device_id=0, class_names=None, input_size=640): 2, ) - # Display the frame - cv2.imshow("Webcam Detection", display_frame) - - # Toggle provider display on 'p' key press - key = cv2.waitKey(1) & 0xFF - if key == ord("q"): - break - elif key == ord("p"): - show_provider = not show_provider - - cap.release() - cv2.destroyAllWindows() - print("Webcam processing stopped.") - - -def main(args): - if args.provider == "cpu": + # Display the result + cv2.imshow("Webcam Detection", result_bgr) + + # Handle key presses + key = cv2.waitKey(1) & 0xFF + if key == ord("q"): + break + + finally: + cap.release() + cv2.destroyAllWindows() + + +def run_inference_video( + model_path, + video_path, + class_names_path=None, + provider="cpu", + threshold=0.3, + video_width=640, +): + """Run object detection on a video file.""" + # Set up providers (same as webcam function) + if provider == "cpu": providers = ["CPUExecutionProvider"] - elif args.provider == "cuda": + elif provider == "cuda": providers = [ ( "CUDAExecutionProvider", @@ -467,7 +467,7 @@ def main(args): ), "CPUExecutionProvider", ] - elif args.provider == "tensorrt": + elif provider == "tensorrt": providers = [ ( "TensorrtExecutionProvider", @@ -481,73 +481,236 @@ def main(args): "CPUExecutionProvider", ] + # Initialize model session try: print(f"Loading ONNX model with providers: {providers}...") - sess_options = ort.SessionOptions() - - sess = ort.InferenceSession( - args.onnx, sess_options=sess_options, providers=providers - ) - print(f"Using provider: {sess.get_providers()[0]}") - + session = ort.InferenceSession(model_path, providers=providers) + print(f"Using provider: {session.get_providers()[0]}") except Exception as e: print(f"Error creating inference session with providers {providers}: {e}") print("Attempting to fall back to CPU execution...") - sess = ort.InferenceSession(args.onnx, providers=["CPUExecutionProvider"]) + session = ort.InferenceSession(model_path, providers=["CPUExecutionProvider"]) - # Load class names if provided + # Load class names class_names = None - if args.class_names: + if class_names_path: try: - with open(args.class_names, "r") as f: + with open(class_names_path, "r") as f: class_names = [line.strip() for line in f.readlines()] print(f"Loaded {len(class_names)} class names") except Exception as e: print(f"Error loading class names: {e}") - # Get input size from args - input_size = args.input_size + # Open video file + cap = cv2.VideoCapture(video_path) + if not cap.isOpened(): + raise RuntimeError(f"Failed to open video file: {video_path}") - if args.webcam: - # Process webcam feed - process_webcam(sess, args.device_id, class_names, input_size) - else: - input_path = args.input - try: - # Try to open the input as an image - im_pil = Image.open(input_path).convert("RGB") - process_image(sess, im_pil, class_names, input_size) - except IOError: - # Not an image, process as video - process_video(sess, input_path, class_names, input_size) + # Get video properties + fps = int(cap.get(cv2.CAP_PROP_FPS)) + frame_width = int(cap.get(cv2.CAP_PROP_FRAME_WIDTH)) + frame_height = int(cap.get(cv2.CAP_PROP_FRAME_HEIGHT)) + total_frames = int(cap.get(cv2.CAP_PROP_FRAME_COUNT)) + + # Calculate output dimensions based on video_width + scale = video_width / frame_width + output_width = video_width + output_height = int(frame_height * scale) + + # Create video writer with new dimensions + output_path = "detection_output.mp4" + fourcc = cv2.VideoWriter_fourcc(*"mp4v") + out = cv2.VideoWriter(output_path, fourcc, fps, (output_width, output_height)) + + # Initialize FPS calculation + prev_time = time.time() + fps_display = 0 + frame_count = 0 + + try: + while cap.isOpened(): + ret, frame = cap.read() + if not ret: + break + + frame_count += 1 + if frame_count % 10 == 0: + progress = (frame_count / total_frames) * 100 + print(f"Processing: {progress:.1f}% complete", end="\r") + + # Calculate FPS + current_time = time.time() + if current_time - prev_time > 0: + fps_display = 1 / (current_time - prev_time) + prev_time = current_time + + # Calculate scaling and padding using video_width parameter + height, width = frame.shape[:2] + scale = video_width / max(height, width) + new_height = int(height * scale) + new_width = int(width * scale) + + # Calculate padding + y_offset = (video_width - new_height) // 2 + x_offset = (video_width - new_width) // 2 + + # Create model input with padding using video_width + model_input = np.zeros((video_width, video_width, 3), dtype=np.uint8) + model_input[ + y_offset : y_offset + new_height, x_offset : x_offset + new_width + ] = cv2.resize(frame, (new_width, new_height)) + + # Convert BGR to RGB for model input + image = cv2.cvtColor(model_input, cv2.COLOR_BGR2RGB) + + # Prepare input data + im_data = np.ascontiguousarray( + image.transpose(2, 0, 1), + dtype=np.float32, + ) + im_data = np.expand_dims(im_data, axis=0) + orig_size = np.array( + [[video_width, video_width]], dtype=np.int64 + ) # Use padded size + + # Run inference + input_name = session.get_inputs()[0].name + outputs = session.run( + output_names=None, + input_feed={input_name: im_data, "orig_target_sizes": orig_size}, + ) + + # Process outputs + labels, boxes, scores = outputs + + # Scale boxes from padded 640x640 to original frame size + boxes = boxes[0] # Remove batch dimension + boxes[:, [0, 2]] = (boxes[:, [0, 2]] - x_offset) / scale # x coordinates + boxes[:, [1, 3]] = (boxes[:, [1, 3]] - y_offset) / scale # y coordinates + + # Draw bounding boxes on the original frame + result_image = draw_boxes( + frame, # Use original frame + labels[0], + boxes, + scores[0], + 1.0, # No additional scaling needed + (0, 0), # No additional padding needed + threshold=threshold, + class_names=class_names, + ) + + # Before writing the frame, resize it + result_image = cv2.resize(result_image, (output_width, output_height)) + out.write(result_image) + + # Add video width display at top left with dark green background + width_text = f"Width: {output_width}px" + text_size = cv2.getTextSize(width_text, cv2.FONT_HERSHEY_SIMPLEX, 0.8, 2)[0] + + # Draw dark green background rectangle + cv2.rectangle( + result_image, + (5, 5), # Slight padding from corner + (text_size[0] + 15, 35), # Add padding around text + (0, 100, 0), # Dark green in BGR + -1, # Filled rectangle + ) + + # Draw text + cv2.putText( + result_image, + width_text, + (10, 30), + cv2.FONT_HERSHEY_SIMPLEX, + 0.8, + (255, 255, 255), # White text + 2, + ) + + # Add FPS counter and provider info (existing code) + fps_text = f"FPS: {fps_display:.1f}" + text_size = cv2.getTextSize(fps_text, cv2.FONT_HERSHEY_SIMPLEX, 0.8, 2)[0] + text_x = result_image.shape[1] - text_size[0] - 10 + text_y = 30 + + # Draw FPS background rectangle + cv2.rectangle( + result_image, + (text_x - 5, text_y - text_size[1] - 5), + (text_x + text_size[0] + 5, text_y + 5), + (139, 0, 0), + -1, + ) + + # Draw FPS text + cv2.putText( + result_image, + fps_text, + (text_x, text_y), + cv2.FONT_HERSHEY_SIMPLEX, + 0.8, + (255, 255, 255), + 2, + ) + + # Add provider display at bottom (matching webcam style) + provider_text = f"Provider: {session.get_providers()[0]}" + text_size = cv2.getTextSize( + provider_text, cv2.FONT_HERSHEY_SIMPLEX, 0.8, 2 + )[0] + text_x = (result_image.shape[1] - text_size[0]) // 2 + text_y = result_image.shape[0] - 20 + + # Draw provider background rectangle + cv2.rectangle( + result_image, + (text_x - 5, text_y - text_size[1] - 5), + (text_x + text_size[0] + 5, text_y + 5), + (139, 0, 0), + -1, + ) + + # Draw provider text + cv2.putText( + result_image, + provider_text, + (text_x, text_y), + cv2.FONT_HERSHEY_SIMPLEX, + 0.8, + (255, 255, 255), + 2, + ) + + # Display frame (optional) + cv2.imshow("Video Detection", result_image) + if cv2.waitKey(1) & 0xFF == ord("q"): + break + + finally: + cap.release() + out.release() + cv2.destroyAllWindows() + print(f"\nVideo processing complete. Output saved to {output_path}") if __name__ == "__main__": import argparse - parser = argparse.ArgumentParser() - parser.add_argument( - "--onnx", type=str, required=True, help="Path to the ONNX model file." - ) - parser.add_argument( - "--input", type=str, help="Path to the input image or video file." - ) - parser.add_argument( - "--webcam", action="store_true", help="Use webcam as input source" - ) + parser = argparse.ArgumentParser(description="Simple ONNX object detection") parser.add_argument( - "--device-id", type=int, default=0, help="Webcam device ID (default: 0)" + "--model", type=str, required=True, help="Path to ONNX model file" ) + parser.add_argument("--image", type=str, help="Path to input image (optional)") + parser.add_argument("--webcam", action="store_true", help="Use webcam input") parser.add_argument( - "--class-names", - type=str, - help="Path to a text file with class names (one per line)", - ) - parser.add_argument( - "--input-size", + "--video-width", type=int, default=640, - help="Input image size for the model (default: 640)", + help="Width of the video input in pixels (default: 640). Height will be adjusted to maintain aspect ratio", + ) + parser.add_argument( + "--classes", type=str, help="Path to class names file (optional)" ) parser.add_argument( "--provider", @@ -556,9 +719,32 @@ def main(args): default="cpu", help="ONNXRuntime provider to use for inference", ) - args = parser.parse_args() + parser.add_argument( + "--threshold", + type=float, + default=0.3, + help="Detection confidence threshold (default: 0.3)", + ) + parser.add_argument("--video", type=str, help="Path to input video file (optional)") - if not args.webcam and not args.input: - parser.error("Either --input or --webcam must be specified") + args = parser.parse_args() - main(args) + if args.webcam: + run_inference_webcam( + args.model, args.classes, args.provider, args.threshold, args.video_width + ) + elif args.video: + run_inference_video( + args.model, + args.video, + args.classes, + args.provider, + args.threshold, + args.video_width, + ) + elif args.image: + run_inference( + args.model, args.image, args.classes, args.threshold, args.provider + ) + else: + parser.error("Either --image, --video, or --webcam must be specified") diff --git a/scripts/quickstart.py b/scripts/quickstart.py index ce6984eb..b1ad5eb7 100644 --- a/scripts/quickstart.py +++ b/scripts/quickstart.py @@ -71,3 +71,30 @@ def download_and_unzip(url, extract_to="./"): logger.info("Testing predictions...") predictions = model.predict("./data/coco8-converted/000000000009.jpg") + +logger.info("Exporting pretrained model to ONNX...") + +coco_classes = ['person', 'bicycle', 'car', 'motorcycle', 'airplane', 'bus', 'train', 'truck', 'boat', 'traffic light', + 'fire hydrant', 'stop sign', 'parking meter', 'bench', 'bird', 'cat', 'dog', 'horse', 'sheep', 'cow', 'elephant', 'bear', + 'zebra', 'giraffe', 'backpack', 'umbrella', 'handbag', 'tie', 'suitcase', 'frisbee', 'skis', 'snowboard', 'sports ball', + 'kite', 'baseball bat', 'baseball glove', 'skateboard', 'surfboard', 'tennis racket', 'bottle', 'wine glass', 'cup', 'fork', + 'knife', 'spoon', 'bowl', 'banana', 'apple', 'sandwich', 'orange', 'broccoli', 'carrot', 'hot dog', 'pizza', 'donut', 'cake', + 'chair', 'couch', 'potted plant', 'bed', 'dining table', 'toilet', 'tv', 'laptop', 'mouse', 'remote', 'keyboard', 'cell phone', + 'microwave', 'oven', 'toaster', 'sink', 'refrigerator', 'book', 'clock', 'vase', 'scissors', 'teddy bear', 'hair drier', 'toothbrush' +] + +model = load_model("deim_hgnetv2_n", class_names=coco_classes) +model.cfg.save("./checkpoints/deim_hgnetv2_n.yml") + +from deimkit.exporter import Exporter +from deimkit.config import Config + +config = Config("./checkpoints/deim_hgnetv2_n.yml") +exporter = Exporter(config) + +output_path = exporter.to_onnx( + checkpoint_path="./checkpoints/deim_hgnetv2_n.pth", + output_path="./checkpoints/deim_hgnetv2_n.onnx" +) + +logger.info(f"ONNX model saved to {output_path}") diff --git a/scripts/train.py b/scripts/train.py index 46fc62a3..ace183b9 100644 --- a/scripts/train.py +++ b/scripts/train.py @@ -1,22 +1,23 @@ -from deimkit import Trainer, Config, configure_dataset, configure_model +from deimkit import Config, Trainer, configure_dataset, configure_model conf = Config.from_model_name("deim_hgnetv2_s") -conf = configure_model(conf, num_queries=100) +conf = configure_model(conf, num_queries=100, freeze_at=0, pretrained=True) conf = configure_dataset( config=conf, image_size=(640, 640), - train_ann_file="/home/dnth/Desktop/DEIMKit/dataset_collections/aquarium-combined-gjvb.v1i.coco/train/_annotations.coco.json", - train_img_folder="/home/dnth/Desktop/DEIMKit/dataset_collections/aquarium-combined-gjvb.v1i.coco/train", - val_ann_file="/home/dnth/Desktop/DEIMKit/dataset_collections/aquarium-combined-gjvb.v1i.coco/valid/_annotations.coco.json", - val_img_folder="/home/dnth/Desktop/DEIMKit/dataset_collections/aquarium-combined-gjvb.v1i.coco/valid", - train_batch_size=16, - val_batch_size=16, - num_classes=8, - output_dir="./outputs/aquarium/deim_hgnetv2_s_30ep_640px_num_queries_100_no_aug_epoch_15", + train_ann_file="/home/dnth/Desktop/DEIMKit/dataset_collections/Rock Paper Scissors SXSW.v14i.coco/train/_annotations.coco.json", + train_img_folder="/home/dnth/Desktop/DEIMKit/dataset_collections/Rock Paper Scissors SXSW.v14i.coco/train", + val_ann_file="/home/dnth/Desktop/DEIMKit/dataset_collections/Rock Paper Scissors SXSW.v14i.coco/valid/_annotations.coco.json", + val_img_folder="/home/dnth/Desktop/DEIMKit/dataset_collections/Rock Paper Scissors SXSW.v14i.coco/valid", + train_batch_size=20, + val_batch_size=20, + num_classes=4, + remap_mscoco=False, + output_dir="./outputs/rock-paper-scissors/deim_hgnetv2_s_30ep_640px_num_queries_pinto", ) trainer = Trainer(conf) -trainer.fit(epochs=30, save_best_only=True, no_aug_epoch=15) +trainer.fit(epochs=30, save_best_only=True) diff --git a/src/deimkit/configs/deim_dfine/deim_hgnetv2_n_coco.yml b/src/deimkit/configs/deim_dfine/deim_hgnetv2_n_coco.yml index 62db245d..a5c265a5 100644 --- a/src/deimkit/configs/deim_dfine/deim_hgnetv2_n_coco.yml +++ b/src/deimkit/configs/deim_dfine/deim_hgnetv2_n_coco.yml @@ -27,7 +27,7 @@ optimizer: epoches: 160 # 148 + 12 ## Our LR-Scheduler -flat_epoch: 7800 # 4 + epoch // 2, e.g., 40 = 4 + 72 / 2 +flat_epoch: 78 # 4 + epoch // 2, e.g., 40 = 4 + 72 / 2 no_aug_epoch: 12 lr_gamma: 1.0 diff --git a/src/deimkit/engine/data/dataset/coco_eval.py b/src/deimkit/engine/data/dataset/coco_eval.py index 75f6bd8d..a5fd366b 100644 --- a/src/deimkit/engine/data/dataset/coco_eval.py +++ b/src/deimkit/engine/data/dataset/coco_eval.py @@ -96,13 +96,19 @@ def prepare_for_coco_detection(self, predictions): boxes = prediction["boxes"] boxes = convert_to_xywh(boxes).tolist() scores = prediction["scores"].tolist() - labels = prediction["labels"].tolist() + labels = prediction["labels"] + + # Handle different label formats + if isinstance(labels, torch.Tensor): + labels = labels.flatten().tolist() + elif isinstance(labels, list) and isinstance(labels[0], list): + labels = [l[0] if isinstance(l, list) else l for l in labels] coco_results.extend( [ { "image_id": original_id, - "category_id": labels[k], + "category_id": int(labels[k]), "bbox": box, "score": scores[k], } diff --git a/src/deimkit/engine/deim/box_ops.py b/src/deimkit/engine/deim/box_ops.py index ede1b324..44bae964 100644 --- a/src/deimkit/engine/deim/box_ops.py +++ b/src/deimkit/engine/deim/box_ops.py @@ -16,10 +16,13 @@ def box_cxcywh_to_xyxy(x): def box_xyxy_to_cxcywh(x: Tensor) -> Tensor: - x0, y0, x1, y1 = x.unbind(-1) + x0 = x[..., 0:1] + y0 = x[..., 1:2] + x1 = x[..., 2:3] + y1 = x[..., 3:4] b = [(x0 + x1) / 2, (y0 + y1) / 2, (x1 - x0), (y1 - y0)] - return torch.stack(b, dim=-1) + return torch.cat(b, dim=-1) # modified from torchvision to also return the union diff --git a/src/deimkit/engine/deim/dfine_decoder.py b/src/deimkit/engine/deim/dfine_decoder.py index c791d8b9..9741b3a6 100644 --- a/src/deimkit/engine/deim/dfine_decoder.py +++ b/src/deimkit/engine/deim/dfine_decoder.py @@ -261,11 +261,11 @@ def __init__(self, reg_max=32): self.reg_max = reg_max def forward(self, x, project): - shape = x.shape - x = F.softmax(x.reshape(-1, self.reg_max + 1), dim=1) - x = F.linear(x, project.to(x.device)).reshape(-1, 4) - return x.reshape(list(shape[:-1]) + [-1]) - + n,c,h = x.shape + b = n*c*h // (self.reg_max + 1) + x = F.softmax(x.reshape([b, self.reg_max + 1]), dim=1) + x = F.linear(x, project.to(x.device)) + return x.reshape([n,c,4]) class LQE(nn.Module): def __init__(self, k, hidden_dim, num_layers, reg_max, act='relu'): @@ -393,8 +393,10 @@ def forward(self, ref_points_detach = inter_ref_bbox.detach() output_detach = output.detach() - return torch.stack(dec_out_bboxes), torch.stack(dec_out_logits), \ - torch.stack(dec_out_pred_corners), torch.stack(dec_out_refs), pre_bboxes, pre_scores + if len(dec_out_logits) > 1: + return torch.stack(dec_out_bboxes), torch.stack(dec_out_logits), torch.stack(dec_out_pred_corners), torch.stack(dec_out_refs), pre_bboxes, pre_scores + else: + return dec_out_bboxes[0], dec_out_logits[0], dec_out_pred_corners[0], dec_out_refs[0], pre_bboxes, pre_scores @register() @@ -593,7 +595,8 @@ def _get_encoder_input(self, feats: List[torch.Tensor]): for i, feat in enumerate(proj_feats): _, _, h, w = feat.shape # [b, c, h, w] -> [b, h*w, c] - feat_flatten.append(feat.flatten(2).permute(0, 2, 1)) + n,c,h,w = feat.shape + feat_flatten.append(feat.reshape([n,c,h*w]).permute(0, 2, 1)) # [num_levels, 2] spatial_shapes.append([h, w]) @@ -625,7 +628,6 @@ def _generate_anchors(self, valid_mask = ((anchors > self.eps) * (anchors < 1 - self.eps)).all(-1, keepdim=True) anchors = torch.log(anchors / (1 - anchors)) anchors = torch.where(valid_mask, anchors, torch.inf) - return anchors, valid_mask @@ -691,14 +693,11 @@ def _select_topk(self, memory: torch.Tensor, outputs_logits: torch.Tensor, outpu topk_ind: torch.Tensor - topk_anchors = outputs_anchors_unact.gather(dim=1, \ - index=topk_ind.unsqueeze(-1).repeat(1, 1, outputs_anchors_unact.shape[-1])) - - topk_logits = outputs_logits.gather(dim=1, \ - index=topk_ind.unsqueeze(-1).repeat(1, 1, outputs_logits.shape[-1])) if self.training else None - - topk_memory = memory.gather(dim=1, \ - index=topk_ind.unsqueeze(-1).repeat(1, 1, memory.shape[-1])) + n = topk_ind.shape[0] + outputs_anchors_unact = outputs_anchors_unact * torch.ones([n, 1, 1], device=outputs_anchors_unact.device) + topk_anchors = outputs_anchors_unact.gather(dim=1, index=topk_ind.unsqueeze(-1).repeat(1, 1, outputs_anchors_unact.shape[-1])) + topk_logits = outputs_logits.gather(dim=1, index=topk_ind.unsqueeze(-1).repeat(1, 1, outputs_logits.shape[-1])) if self.training else None + topk_memory = memory.gather(dim=1, index=topk_ind.unsqueeze(-1).repeat(1, 1, memory.shape[-1])) return topk_memory, topk_logits, topk_anchors @@ -752,21 +751,21 @@ def forward(self, feats, targets=None): if self.training: - out = {'pred_logits': out_logits[-1], 'pred_boxes': out_bboxes[-1], 'pred_corners': out_corners[-1], - 'ref_points': out_refs[-1], 'up': self.up, 'reg_scale': self.reg_scale} + out = {'pred_logits': out_logits[-1], 'pred_boxes': out_bboxes[-1], 'pred_corners': out_corners[-1], 'ref_points': out_refs[-1], 'up': self.up, 'reg_scale': self.reg_scale} else: - out = {'pred_logits': out_logits[-1], 'pred_boxes': out_bboxes[-1]} + if len(out_logits.shape) == 4: + out = {'pred_logits': out_logits[-1], 'pred_boxes': out_bboxes[-1]} + else: + out = {'pred_logits': out_logits, 'pred_boxes': out_bboxes} if self.training and self.aux_loss: - out['aux_outputs'] = self._set_aux_loss2(out_logits[:-1], out_bboxes[:-1], out_corners[:-1], out_refs[:-1], - out_corners[-1], out_logits[-1]) + out['aux_outputs'] = self._set_aux_loss2(out_logits[:-1], out_bboxes[:-1], out_corners[:-1], out_refs[:-1], out_corners[-1], out_logits[-1]) out['enc_aux_outputs'] = self._set_aux_loss(enc_topk_logits_list, enc_topk_bboxes_list) out['pre_outputs'] = {'pred_logits': pre_logits, 'pred_boxes': pre_bboxes} out['enc_meta'] = {'class_agnostic': self.query_select_method == 'agnostic'} if dn_meta is not None: - out['dn_outputs'] = self._set_aux_loss2(dn_out_logits, dn_out_bboxes, dn_out_corners, dn_out_refs, - dn_out_corners[-1], dn_out_logits[-1]) + out['dn_outputs'] = self._set_aux_loss2(dn_out_logits, dn_out_bboxes, dn_out_corners, dn_out_refs, dn_out_corners[-1], dn_out_logits[-1]) out['dn_pre_outputs'] = {'pred_logits': dn_pre_logits, 'pred_boxes': dn_pre_bboxes} out['dn_meta'] = dn_meta @@ -789,4 +788,4 @@ def _set_aux_loss2(self, outputs_class, outputs_coord, outputs_corners, outputs_ # as a dict having both a Tensor and a list. return [{'pred_logits': a, 'pred_boxes': b, 'pred_corners': c, 'ref_points': d, 'teacher_corners': teacher_corners, 'teacher_logits': teacher_logits} - for a, b, c, d in zip(outputs_class, outputs_coord, outputs_corners, outputs_ref)] + for a, b, c, d in zip(outputs_class, outputs_coord, outputs_corners, outputs_ref)] \ No newline at end of file diff --git a/src/deimkit/engine/deim/dfine_utils.py b/src/deimkit/engine/deim/dfine_utils.py index c0864e20..a54d88ad 100644 --- a/src/deimkit/engine/deim/dfine_utils.py +++ b/src/deimkit/engine/deim/dfine_utils.py @@ -119,13 +119,11 @@ def distance2bbox(points, distance, reg_scale): Tensor: Bounding boxes in (N, 4) or (B, N, 4) format [cx, cy, w, h]. """ reg_scale = abs(reg_scale) - x1 = points[..., 0] - (0.5 * reg_scale + distance[..., 0]) * (points[..., 2] / reg_scale) - y1 = points[..., 1] - (0.5 * reg_scale + distance[..., 1]) * (points[..., 3] / reg_scale) - x2 = points[..., 0] + (0.5 * reg_scale + distance[..., 2]) * (points[..., 2] / reg_scale) - y2 = points[..., 1] + (0.5 * reg_scale + distance[..., 3]) * (points[..., 3] / reg_scale) - - bboxes = torch.stack([x1, y1, x2, y2], -1) - + x1 = points[..., 0:1] - (0.5 * reg_scale + distance[..., 0:1]) * (points[..., 2:3] / reg_scale) + y1 = points[..., 1:2] - (0.5 * reg_scale + distance[..., 1:2]) * (points[..., 3:4] / reg_scale) + x2 = points[..., 0:1] + (0.5 * reg_scale + distance[..., 2:3]) * (points[..., 2:3] / reg_scale) + y2 = points[..., 1:2] + (0.5 * reg_scale + distance[..., 3:4]) * (points[..., 3:4] / reg_scale) + bboxes = torch.cat([x1, y1, x2, y2], -1) return box_xyxy_to_cxcywh(bboxes) @@ -153,4 +151,4 @@ def bbox2distance(points, bbox, reg_max, reg_scale, up, eps=0.1): four_lens, weight_right, weight_left = translate_gt(four_lens, reg_max, reg_scale, up) if reg_max is not None: four_lens = four_lens.clamp(min=0, max=reg_max-eps) - return four_lens.reshape(-1).detach(), weight_right.detach(), weight_left.detach() + return four_lens.reshape(-1).detach(), weight_right.detach(), weight_left.detach() \ No newline at end of file diff --git a/src/deimkit/engine/deim/hybrid_encoder.py b/src/deimkit/engine/deim/hybrid_encoder.py index 14b49159..5ed36724 100644 --- a/src/deimkit/engine/deim/hybrid_encoder.py +++ b/src/deimkit/engine/deim/hybrid_encoder.py @@ -401,7 +401,8 @@ def forward(self, feats): for i, enc_ind in enumerate(self.use_encoder_idx): h, w = proj_feats[enc_ind].shape[2:] # flatten [B, C, H, W] to [B, HxW, C] - src_flatten = proj_feats[enc_ind].flatten(2).permute(0, 2, 1) + n,c,h,w = proj_feats[enc_ind].shape + src_flatten = proj_feats[enc_ind].reshape([n,c,h*w]).permute(0, 2, 1) if self.training or self.eval_spatial_size is None: pos_embed = self.build_2d_sincos_position_embedding( w, h, self.hidden_dim, self.pe_temperature).to(src_flatten.device) @@ -430,4 +431,4 @@ def forward(self, feats): out = self.pan_blocks[idx](torch.concat([downsample_feat, feat_height], dim=1)) outs.append(out) - return outs + return outs \ No newline at end of file diff --git a/src/deimkit/engine/deim/postprocessor.py b/src/deimkit/engine/deim/postprocessor.py index ff66842b..dee4c0a5 100644 --- a/src/deimkit/engine/deim/postprocessor.py +++ b/src/deimkit/engine/deim/postprocessor.py @@ -47,21 +47,23 @@ def extra_repr(self) -> str: return f'use_focal_loss={self.use_focal_loss}, num_classes={self.num_classes}, num_top_queries={self.num_top_queries}' # def forward(self, outputs, orig_target_sizes): - def forward(self, outputs, orig_target_sizes: torch.Tensor): + def forward(self, outputs, orig_target_sizes: torch.Tensor=None): logits, boxes = outputs['pred_logits'], outputs['pred_boxes'] # orig_target_sizes = torch.stack([t["orig_size"] for t in targets], dim=0) bbox_pred = torchvision.ops.box_convert(boxes, in_fmt='cxcywh', out_fmt='xyxy') - bbox_pred *= orig_target_sizes.repeat(1, 2).unsqueeze(1) + if orig_target_sizes is not None: + bbox_pred *= orig_target_sizes.repeat(1, 2).unsqueeze(1) if self.use_focal_loss: scores = F.sigmoid(logits) scores, index = torch.topk(scores.flatten(1), self.num_top_queries, dim=-1) - # TODO for older tensorrt - # labels = index % self.num_classes + if orig_target_sizes is None: + scores = scores.unsqueeze(-1) + index = index.unsqueeze(-1) labels = mod(index, self.num_classes) index = index // self.num_classes - boxes = bbox_pred.gather(dim=1, index=index.unsqueeze(-1).repeat(1, 1, bbox_pred.shape[-1])) + boxes = bbox_pred.gather(dim=1, index=index.repeat(1, 1, bbox_pred.shape[-1])) else: scores = F.softmax(logits)[:, :, :-1] @@ -73,7 +75,10 @@ def forward(self, outputs, orig_target_sizes: torch.Tensor): # TODO for onnx export if self.deploy_mode: - return labels, boxes, scores + if orig_target_sizes is not None: + return labels, boxes, scores + else: + return torch.cat([labels, boxes, scores], dim=2) # TODO if self.remap_mscoco_category: @@ -83,6 +88,8 @@ def forward(self, outputs, orig_target_sizes: torch.Tensor): results = [] for lab, box, sco in zip(labels, boxes, scores): + # Ensure labels are flattened single values + lab = lab.flatten() if isinstance(lab, torch.Tensor) else lab result = dict(labels=lab, boxes=box, scores=sco) results.append(result) @@ -92,4 +99,4 @@ def forward(self, outputs, orig_target_sizes: torch.Tensor): def deploy(self, ): self.eval() self.deploy_mode = True - return self + return self \ No newline at end of file diff --git a/src/deimkit/exporter.py b/src/deimkit/exporter.py index adbfebf7..8a4fdb63 100644 --- a/src/deimkit/exporter.py +++ b/src/deimkit/exporter.py @@ -1,8 +1,9 @@ import os import torch import torch.nn as nn +import torch.nn.functional as F from loguru import logger -from typing import Any, Dict +from typing import Any, Dict, Optional, Tuple class Exporter: @@ -10,7 +11,7 @@ class Exporter: Export a DEIM model to ONNX format. This class provides functionality to export trained DEIM models to ONNX format - for deployment in production environments. + for deployment in production environments, optionally including preprocessing steps. """ def __init__(self, config: Any): @@ -22,14 +23,65 @@ def __init__(self, config: Any): """ self.config = config + class PreprocessingModule(nn.Module): + """Handles image preprocessing: resize, BGR->RGB, normalize.""" + def __init__(self, target_height: int, target_width: int): + super().__init__() + self.target_height = target_height + self.target_width = target_width + logger.info( + f"Initialized PreprocessingModule with target size: " + f"({target_height}, {target_width})" + ) + + def forward(self, input_bgr: torch.Tensor) -> torch.Tensor: + """ + Apply preprocessing steps. + + Args: + input_bgr: Input tensor in BGR format (N, 3, H, W). + + Returns: + Preprocessed tensor in RGB format, normalized, and resized. + """ + # 1. Resize + x = F.interpolate( + input=input_bgr, + size=(self.target_height, self.target_width), + mode='bilinear', # Common interpolation mode, adjust if needed + align_corners=False # Common practice + ) + logger.debug(f"Preprocessing: Resized shape: {x.shape}") + + # 2. BGR -> RGB + # Ensure input has 3 channels + if x.shape[1] != 3: + raise ValueError(f"Input tensor must have 3 channels (BGR), got {x.shape[1]}") + # Swap channels: (B:0, G:1, R:2) -> (R:2, G:1, B:0) + x = x[:, [2, 1, 0], :, :] + logger.debug("Preprocessing: Swapped BGR to RGB") + + # 3. Normalize (0-255 -> 0-1) + # Assuming input is uint8 [0, 255], scale to [0, 1] + x = x * (1.0 / 255.0) + logger.debug("Preprocessing: Normalized pixel values to [0, 1]") + + return x + def to_onnx( self, checkpoint_path: str, output_path: str | None = None, - input_shape: tuple[int, int, int, int] | None = None, + input_shape: Optional[Tuple[int, int, int, int]] = None, check: bool = True, simplify: bool = True, - ) -> None: + dynamic_batch: bool = True, + dynamic_input_size: bool = True, + include_preprocessing: bool = True, + fp16: bool = False, + opset_version: int = 20, + device: Optional[str] = None, + ) -> str: """ Export a model to ONNX format from a checkpoint file. @@ -37,16 +89,42 @@ def to_onnx( checkpoint_path: Path to the checkpoint file (.pth) output_path: Path for the ONNX model (defaults to checkpoint_path with .onnx extension) input_shape: Shape of the input tensor (batch_size, channels, height, width). + If `include_preprocessing` is True, this defines the *target* size + for the internal resize operation. Otherwise, it's the direct model input size. If None, will be determined from the config. check: Whether to validate the exported ONNX model simplify: Whether to simplify the exported ONNX model + dynamic_batch: Whether to allow dynamic batch size (N) in the exported model. + dynamic_input_size: Whether to allow dynamic input height (H) and width (W) + if `include_preprocessing` is True. + include_preprocessing: If True, include resize, BGR->RGB, and normalization + steps in the exported ONNX graph. The input will + expect raw BGR images. + fp16: Whether to export the model in FP16 precision (requires CUDA). + opset_version: The ONNX opset version to use for export. + device: The device to use for export ('cpu' or 'cuda'). Auto-selected if None. Returns: Path to the exported ONNX model """ + # Determine device + _device_str = device if device else ("cuda" if fp16 else "cpu") + if fp16 and _device_str == "cpu": + logger.warning("FP16 export requested but device is CPU. Switching to CUDA.") + _device_str = "cuda" + if _device_str == "cuda" and not torch.cuda.is_available(): + logger.warning("CUDA device requested but not available. Switching to CPU.") + _device_str = "cpu" + if fp16: + logger.warning("FP16 export disabled as CUDA is not available.") + fp16 = False + + _device = torch.device(_device_str) + logger.info(f"Using device: {_device}") + # Load checkpoint logger.info(f"Loading checkpoint from {checkpoint_path}") - checkpoint = torch.load(checkpoint_path, map_location="cpu") + checkpoint = torch.load(checkpoint_path, map_location=_device) # Extract state dict if "ema" in checkpoint and "module" in checkpoint["ema"]: @@ -54,7 +132,10 @@ def to_onnx( state_dict = checkpoint["ema"]["module"] else: logger.info("EMA weights not found, using regular model weights") - state_dict = checkpoint.get("model") + state_dict = checkpoint.get("model", checkpoint.get("state_dict")) + if state_dict is None: + logger.error("Could not find model state_dict in checkpoint.") + raise KeyError("Checkpoint does not contain 'model' or 'state_dict' key.") # Load state dict into model self.config.model.load_state_dict(state_dict) @@ -65,22 +146,30 @@ def to_onnx( # Determine output path if not provided if output_path is None: - output_path = checkpoint_path.replace(".pth", ".onnx") - - # Create wrapper model - wrapper_model = self._create_wrapper_model(model, postprocessor) + base_name = os.path.splitext(os.path.basename(checkpoint_path))[0] + suffix = "" + if dynamic_batch: + suffix += "_n_batch" + if fp16: + suffix += "_fp16" + output_path = f"{base_name}{suffix}.onnx" + else: + os.makedirs(os.path.dirname(os.path.abspath(output_path)), exist_ok=True) # Determine input shape from config if not provided + target_height, target_width = None, None if input_shape is None: logger.info("Input shape not provided, getting size from config") - # Get base size from config base_size = self.config.get( "yaml_cfg.train_dataloader.collate_fn.base_size", None + ) or self.config.get( + "yaml_cfg.val_dataloader.collate_fn.base_size", None ) if base_size is None: logger.warning( - "Base size not found in config. Please specify input_shape explicitly." + "Base size not found in config (checked train/val dataloader.collate_fn.base_size)." + " Please specify input_shape explicitly." ) raise ValueError( "Could not determine input shape from config. Please provide input_shape parameter." @@ -88,97 +177,264 @@ def to_onnx( if isinstance(base_size, (list, tuple)) and len(base_size) == 2: height, width = base_size - else: + elif isinstance(base_size, int): height, width = base_size, base_size + else: + logger.error(f"Unexpected base_size format in config: {base_size}") + raise ValueError("Invalid base_size format in config.") - # Default to 3 channels (RGB) and batch size of 1 - input_shape = (1, 3, height, width) - logger.info(f"Using input shape from config: {input_shape}") - - # Create dummy inputs - dummy_data = torch.rand(*input_shape) - dummy_size = torch.tensor([[input_shape[2], input_shape[3]]]) - - # Ensure output directory exists - os.makedirs(os.path.dirname(os.path.abspath(output_path)), exist_ok=True) + target_height, target_width = height, width + input_shape = (1, 3, target_height, target_width) + logger.info(f"Using target shape from config: {input_shape}") + else: + # If input_shape is provided, extract target height/width + _, _, target_height, target_width = input_shape + logger.info(f"Using provided target shape: {(target_height, target_width)}") + + # Create preprocessing module if requested + preprocessing_module = None + if include_preprocessing: + if target_height is None or target_width is None: + raise ValueError("Cannot include preprocessing without a defined target height/width." + "Provide input_shape or ensure base_size is in config.") + preprocessing_module = self.PreprocessingModule(target_height, target_width) + logger.info("Including preprocessing steps in the ONNX model.") + + # Create wrapper model and move to device + wrapper_model = self._create_wrapper_model( + model, postprocessor, preprocessing_module + ).to(_device) + wrapper_model.eval() + + # Create dummy inputs and move to device + dummy_data_shape = input_shape + dummy_data = torch.rand(*dummy_data_shape, device=_device) + + # The 'orig_target_sizes' input typically corresponds to the size *before* padding/resizing + # If preprocessing is included, this might represent the original image size. + # If not, it represents the size the postprocessor needs. + # For export, we use the target H/W here. The actual value depends on runtime usage. + dummy_size_h, dummy_size_w = target_height, target_width + # If include_preprocessing and dynamic input size, the actual input H/W can vary. + # For export dummy data, we still need concrete values. + dummy_size = torch.tensor([[dummy_size_h, dummy_size_w]], device=_device) + if not dynamic_batch and dummy_data_shape[0] > 1: + dummy_size = dummy_size.repeat(dummy_data_shape[0], 1) + + # Define input/output names based on whether preprocessing is included + input_names = ["images", "orig_target_sizes"] + if include_preprocessing: + input_names[0] = "input_bgr" + + output_names = ["labels", "boxes", "scores"] # Define dynamic axes - dynamic_axes = {"images": {0: "N"}, "orig_target_sizes": {0: "N"}} - + _dynamic_axes = {} + first_input_name = input_names[0] + _dynamic_axes[first_input_name] = {} + _dynamic_axes["orig_target_sizes"] = {} + _dynamic_axes["labels"] = {} + _dynamic_axes["boxes"] = {} + _dynamic_axes["scores"] = {} + + if dynamic_batch: + _dynamic_axes[first_input_name][0] = "N" + _dynamic_axes["orig_target_sizes"][0] = "N" + _dynamic_axes["labels"][0] = "N" + _dynamic_axes["boxes"][0] = "N" + _dynamic_axes["scores"][0] = "N" + + # Add dynamic H/W for the input if preprocessing is enabled and requested + if include_preprocessing and dynamic_input_size: + _dynamic_axes[first_input_name][2] = "H" + _dynamic_axes[first_input_name][3] = "W" + # Note: 'orig_target_sizes' might also need dynamic axes depending on usage, + # but typically it relates to the *original* size before preprocessing. + # Keeping it simple here unless specific needs arise. + + # Remove empty dicts if no dynamic axes are specified for a name + _dynamic_axes = {k: v for k, v in _dynamic_axes.items() if v} + if not _dynamic_axes: + _dynamic_axes = None + + logger.info(f"Using input names: {input_names}") + logger.info(f"Using output names: {output_names}") + logger.info(f"Using dynamic axes: {_dynamic_axes}") logger.info(f"Exporting model to ONNX: {output_path}") try: - # Export to ONNX - torch.onnx.export( - wrapper_model, - (dummy_data, dummy_size), - output_path, - input_names=["images", "orig_target_sizes"], - output_names=["labels", "boxes", "scores"], - dynamic_axes=dynamic_axes, - opset_version=20, - do_constant_folding=True, - ) + # Export to ONNX with FP16 context if enabled + export_kwargs = { + "model": wrapper_model, + "args": (dummy_data, dummy_size), + "f": output_path, + "input_names": input_names, + "output_names": output_names, + "dynamic_axes": _dynamic_axes, + "opset_version": opset_version, + "do_constant_folding": True, + } + + if fp16: + with torch.autocast(device_type=_device_str, dtype=torch.float16): + _ = wrapper_model(dummy_data, dummy_size) + torch.onnx.export(**export_kwargs) + else: + torch.onnx.export(**export_kwargs) - logger.success("ONNX export completed successfully") + logger.success(f"ONNX export completed successfully: {output_path}") # Validate and simplify if requested - if check: - self._check_onnx_model(output_path) + final_output_path = output_path if simplify: - self._simplify_onnx_model( + # Input shapes for simplification should match the dummy data used for export + input_shapes_for_sim = { + input_names[0]: dummy_data.shape, + input_names[1]: dummy_size.shape, + } + logger.info(f"Simplifying ONNX model with input shapes: {input_shapes_for_sim}") + simplified_path = self._simplify_onnx_model( output_path, - {"images": dummy_data.shape, "orig_target_sizes": dummy_size.shape}, + input_shapes=input_shapes_for_sim, + target_path=output_path, ) + if simplified_path: + final_output_path = simplified_path + + if check: + self._check_onnx_model(final_output_path) + + return final_output_path except Exception as e: - logger.error(f"ONNX export failed: {str(e)}") + logger.error(f"ONNX export failed: {str(e)}", exc_info=True) + if os.path.exists(output_path): + try: + os.remove(output_path) + logger.info(f"Removed partially exported file: {output_path}") + except OSError as remove_err: + logger.warning(f"Failed to remove partial file {output_path}: {remove_err}") raise RuntimeError(f"Failed to export model to ONNX: {str(e)}") from e def _create_wrapper_model( - self, model: nn.Module, postprocessor: nn.Module + self, + model: nn.Module, + postprocessor: nn.Module, + preprocessing: Optional[nn.Module] = None ) -> nn.Module: - """Create a wrapper model that includes both model and postprocessor.""" + """ + Create a wrapper model that includes optional preprocessing, the main model, + and the postprocessor. + """ class WrappedModel(nn.Module): - def __init__(self, model: nn.Module, postprocessor: nn.Module): + def __init__( + self, + model: nn.Module, + postprocessor: nn.Module, + preprocessing: Optional[nn.Module] = None + ): super().__init__() + self.preprocessing = preprocessing self.model = model self.postprocessor = postprocessor def forward(self, images: torch.Tensor, orig_target_sizes: torch.Tensor): - outputs = self.model(images) + # Apply preprocessing if it exists + if self.preprocessing: + x = self.preprocessing(images) + else: + x = images + + # Pass preprocessed data to the main model + outputs = self.model(x) + + # Pass model outputs and original sizes to postprocessor return self.postprocessor(outputs, orig_target_sizes) - return WrappedModel(model, postprocessor) + return WrappedModel(model, postprocessor, preprocessing) def _check_onnx_model(self, model_path: str) -> None: """Check if the exported ONNX model is valid.""" + if not os.path.exists(model_path): + logger.error(f"Cannot check ONNX model: File not found at {model_path}") + return try: import onnx onnx_model = onnx.load(model_path) onnx.checker.check_model(onnx_model) - logger.info("ONNX model validation successful") + logger.info(f"ONNX model validation successful: {model_path}") except ImportError: - logger.warning("ONNX validation skipped: onnx package not installed") + logger.warning("ONNX validation skipped: 'onnx' package not installed") + except Exception as e: + logger.error(f"ONNX model validation failed for {model_path}: {str(e)}", exc_info=True) def _simplify_onnx_model( - self, model_path: str, input_shapes: dict[str, tuple] - ) -> None: - """Simplify the exported ONNX model.""" + self, + model_path: str, + input_shapes: dict[str, tuple], + target_path: Optional[str] = None, + ) -> Optional[str]: + """ + Simplify the exported ONNX model using onnxsim. + + Args: + model_path: Path to the input ONNX model. + input_shapes: Dictionary mapping input names (e.g., 'input_bgr', 'orig_target_sizes') + to their concrete shapes for simplification. + target_path: Path to save the simplified model. If None, saves inplace. + + Returns: + Path to the simplified model, or None if simplification failed. + """ + if not os.path.exists(model_path): + logger.error(f"Cannot simplify ONNX model: File not found at {model_path}") + return None + if target_path is None: + target_path = model_path try: import onnx import onnxsim + logger.info(f"Simplifying ONNX model: {model_path} -> {target_path}") onnx_model_simplify, check = onnxsim.simplify( - model_path, test_input_shapes=input_shapes + model_path, + test_input_shapes=input_shapes, + perform_optimization=True, + skip_fuse_bn=False, ) - onnx.save(onnx_model_simplify, model_path) - status = "successful" if check else "failed" - logger.info(f"ONNX model simplification {status}") + + if check: + onnx.save(onnx_model_simplify, target_path) + logger.success(f"ONNX model simplification successful: {target_path}") + return target_path + else: + logger.error(f"ONNX model simplification check failed for: {model_path}") + if model_path != target_path and os.path.exists(model_path): + import shutil + try: + shutil.copyfile(model_path, target_path) + logger.warning(f"Saved original (unsimplified) model to {target_path} due to check failure.") + return target_path + except Exception as copy_e: + logger.error(f"Failed to copy original model {model_path} to {target_path}: {copy_e}") + return None + except ImportError: logger.warning( - "ONNX simplification skipped: required packages not installed" + "ONNX simplification skipped: 'onnx' or 'onnxsim' package not installed" ) + return None + except Exception as e: + logger.error(f"ONNX model simplification failed for {model_path}: {str(e)}", exc_info=True) + if model_path != target_path and os.path.exists(model_path): + import shutil + try: + shutil.copyfile(model_path, target_path) + logger.warning(f"Saved original model to {target_path} due to simplification error.") + return target_path + except Exception as copy_e: + logger.error(f"Failed to copy original model {model_path} to {target_path}: {copy_e}") + return None diff --git a/src/deimkit/predictor.py b/src/deimkit/predictor.py index 6026c55c..95b5549a 100644 --- a/src/deimkit/predictor.py +++ b/src/deimkit/predictor.py @@ -23,7 +23,7 @@ "deim_hgnetv2_x": "1dPtbgtGgq1Oa7k_LgH1GXPelg1IVeu0j", # XLarge model } -DEFAULT_CACHE_DIR = os.path.expanduser("~/.cache/deim/checkpoints") +DEFAULT_CACHE_DIR = os.path.expanduser("./checkpoints") DEFAULT_IMAGE_SIZE = (640, 640) DEFAULT_MEAN = [0.485, 0.456, 0.406] DEFAULT_STD = [0.229, 0.224, 0.225]