Skip to content
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
1 change: 1 addition & 0 deletions .gitignore
Original file line number Diff line number Diff line change
Expand Up @@ -5,3 +5,4 @@
*.pth
*.pt
*.dat
.envrc
4 changes: 2 additions & 2 deletions preprocessing/config.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -24,10 +24,10 @@ rearrange:
default: 'FaceForensics++'
dataset_root_path: # the root path to the dataset
type: str
default: ''
default: 'datasets/rgb'
output_file_path: # the json path to the dataset
type: str
default: '../preprocessing/dataset_json_v6'
default: 'datasets/json'
comp: # the compression level of videos, only in the dataset of FaceForensics++.
choices: ['raw', 'c23', 'c40']
default: 'c23'
Expand Down
2 changes: 1 addition & 1 deletion preprocessing/rearrange.py
Original file line number Diff line number Diff line change
Expand Up @@ -500,7 +500,7 @@ def generate_dataset_file(dataset_name, dataset_root_path, output_file_path, com

if __name__ == '__main__':
# from config.yaml load parameters
yaml_path = './config.yaml'
yaml_path = './preprocessing/config.yaml'
# open the yaml file
try:
with open(yaml_path, 'r') as f:
Expand Down
44 changes: 44 additions & 0 deletions pyproject.toml
Original file line number Diff line number Diff line change
@@ -0,0 +1,44 @@
[project]
name = "deepfakebench"
version = "0.1.0"
description = "Add your description here"
readme = "README.md"
requires-python = ">=3.10"
dependencies = [
"albumentations>=2.0.8",
"clip",
"dlib>=20.0.0",
"efficientnet-pytorch>=0.7.1",
"einops>=0.8.1",
"filterpy>=1.4.5",
"fvcore>=0.1.5.post20221221",
"imageio>=2.37.0",
"imgaug>=0.4.0",
"imutils>=0.5.4",
"kornia>=0.8.1",
"loralib>=0.1.2",
"numpy<2.0",
"opencv-python>=4.11.0.86",
"pandas>=2.3.0",
"pillow>=11.2.1",
"pyyaml>=6.0.2",
"requests>=2.25.0",
"scikit-image>=0.25.2",
"scikit-learn>=1.7.0",
"scipy>=1.15.3",
"seaborn>=0.13.2",
"segmentation-models-pytorch>=0.5.0",
"setuptools>=80.9.0",
"simplejson>=3.20.1",
"tensorboard>=2.19.0",
"timm>=1.0.16",
"torch>=2.7.1",
"torchaudio>=2.7.1",
"torchtoolbox>=0.1.8.2",
"torchvision>=0.22.1",
"tqdm>=4.67.1",
"transformers>=4.53.0",
]

[tool.uv.sources]
clip = { git = "https://github.com/openai/CLIP.git" }
6 changes: 3 additions & 3 deletions training/config/train_config.yaml
Original file line number Diff line number Diff line change
@@ -1,9 +1,9 @@
mode: train
lmdb: True
lmdb: False
dry_run: false
rgb_dir: './datasets/rgb'
rgb_dir: '.'
lmdb_dir: './datasets/lmdb'
dataset_json_folder: './preprocessing/dataset_json'
dataset_json_folder: './datasets/json'
SWA: False
save_avg: True
log_dir: ./logs/training/
Expand Down
22 changes: 15 additions & 7 deletions training/dataset/abstract_dataset.py
Original file line number Diff line number Diff line change
Expand Up @@ -14,6 +14,7 @@
import yaml
import glob
import json
from pathlib import Path

import numpy as np
from copy import deepcopy
Expand Down Expand Up @@ -115,11 +116,11 @@ def init_data_aug_method(self):
A.HorizontalFlip(p=self.config['data_aug']['flip_prob']),
A.Rotate(limit=self.config['data_aug']['rotate_limit'], p=self.config['data_aug']['rotate_prob']),
A.GaussianBlur(blur_limit=self.config['data_aug']['blur_limit'], p=self.config['data_aug']['blur_prob']),
A.OneOf([
IsotropicResize(max_side=self.config['resolution'], interpolation_down=cv2.INTER_AREA, interpolation_up=cv2.INTER_CUBIC),
IsotropicResize(max_side=self.config['resolution'], interpolation_down=cv2.INTER_AREA, interpolation_up=cv2.INTER_LINEAR),
IsotropicResize(max_side=self.config['resolution'], interpolation_down=cv2.INTER_LINEAR, interpolation_up=cv2.INTER_LINEAR),
], p = 0 if self.config['with_landmark'] else 1),
# A.OneOf([
# IsotropicResize(max_side=self.config['resolution'], interpolation_down=cv2.INTER_AREA, interpolation_up=cv2.INTER_CUBIC),
# IsotropicResize(max_side=self.config['resolution'], interpolation_down=cv2.INTER_AREA, interpolation_up=cv2.INTER_LINEAR),
# IsotropicResize(max_side=self.config['resolution'], interpolation_down=cv2.INTER_LINEAR, interpolation_up=cv2.INTER_LINEAR),
# ], p = 0 if self.config['with_landmark'] else 1),
A.OneOf([
A.RandomBrightnessContrast(brightness_limit=self.config['data_aug']['brightness_limit'], contrast_limit=self.config['data_aug']['contrast_limit']),
A.FancyPCA(),
Expand Down Expand Up @@ -296,7 +297,7 @@ def load_rgb(self, file_path):
size = self.config['resolution'] # if self.mode == "train" else self.config['resolution']
if not self.lmdb:
if not file_path[0] == '.':
file_path = f'./{self.config["rgb_dir"]}\\'+file_path
file_path = f'./{self.config["rgb_dir"]}/'+file_path
assert os.path.exists(file_path), f"{file_path} does not exist"
img = cv2.imread(file_path)
if img is None:
Expand Down Expand Up @@ -469,14 +470,21 @@ def __getitem__(self, index, no_norm=False):
label = self.data_dict['label'][index]

if not isinstance(image_paths, list):
image_paths = [image_paths] # for the image-level IO, only one frame is used
image_paths = [Path(image_paths)] # for the image-level IO, only one frame is used

image_paths = [Path(path) for path in image_paths] # Sanitize image paths

image_tensors = []
landmark_tensors = []
mask_tensors = []
augmentation_seed = None

for image_path in image_paths:
# check if this is a valid path
if not image_path.exists():
raise ValueError(f"Image path {image_path.absolute()} does not exist.")

image_path = str(image_path)
# Initialize a new seed for data augmentation at the start of each video
if self.video_level and image_path == image_paths[0]:
augmentation_seed = random.randint(0, 2**32 - 1)
Expand Down
2 changes: 1 addition & 1 deletion training/test.py
Original file line number Diff line number Diff line change
Expand Up @@ -37,7 +37,7 @@

parser = argparse.ArgumentParser(description='Process some paths.')
parser.add_argument('--detector_path', type=str,
default='/home/zhiyuanyan/DeepfakeBench/training/config/detector/resnet34.yaml',
default='config/detector/resnet34.yaml',
help='path to detector YAML file')
parser.add_argument("--test_dataset", nargs="+")
parser.add_argument('--weights_path', type=str,
Expand Down
2 changes: 1 addition & 1 deletion training/train.py
Original file line number Diff line number Diff line change
Expand Up @@ -38,7 +38,7 @@

parser = argparse.ArgumentParser(description='Process some paths.')
parser.add_argument('--detector_path', type=str,
default='/data/home/zhiyuanyan/DeepfakeBenchv2/training/config/detector/sbi.yaml',
default='training/config/detector/xception.yaml',
help='path to detector YAML file')
parser.add_argument("--train_dataset", nargs="+")
parser.add_argument("--test_dataset", nargs="+")
Expand Down
Loading