Skip to content

Commit

Permalink
changed the dataset order
Browse files Browse the repository at this point in the history
  • Loading branch information
UTKARSH OJHA committed Aug 18, 2023
1 parent ce1d948 commit 3bf7228
Show file tree
Hide file tree
Showing 3 changed files with 88 additions and 92 deletions.
162 changes: 77 additions & 85 deletions dataset_paths.py
Original file line number Diff line number Diff line change
Expand Up @@ -5,161 +5,153 @@
# = = = = = = = = = = = = = = LDM = = = = = = = = = = = = = = = = #



dict(
real_path='../laion400m_data/val.pickle',
fake_path='../FAKE_IMAGES/ldm_200step/val.pickle',
data_mode='ours',
real_path='../FAKE_IMAGES/CNN/test/progan',
fake_path='../FAKE_IMAGES/CNN/test/progan',
data_mode='wang2020',
key='progan'
),

dict(
real_path='../laion400m_data/val.pickle',
fake_path='../FAKE_IMAGES/ldm_200step_cfg3/val.pickle',
data_mode='ours',
real_path='../FAKE_IMAGES/CNN/test/cyclegan',
fake_path='../FAKE_IMAGES/CNN/test/cyclegan',
data_mode='wang2020',
key='cyclegan'
),




# = = = = = = = = = = = = = = GLIDE = = = = = = = = = = = = = = = = #

dict(
real_path='../laion400m_data/val.pickle',
fake_path='../FAKE_IMAGES/glide_100ddpm_27ddim/val.pickle',
data_mode='ours',
real_path='../FAKE_IMAGES/CNN/test/biggan/', # Imagenet
fake_path='../FAKE_IMAGES/CNN/test/biggan/',
data_mode='wang2020',
key='biggan'
),

dict(
real_path='../laion400m_data/val.pickle',
fake_path='../FAKE_IMAGES/glide_75ddpm_27ddim/val.pickle',
data_mode='ours',
),

dict(
real_path='../laion400m_data/val.pickle',
fake_path='../FAKE_IMAGES/glide_50ddpm_27ddim/val.pickle',
data_mode='ours',
real_path='../FAKE_IMAGES/CNN/test/stylegan',
fake_path='../FAKE_IMAGES/CNN/test/stylegan',
data_mode='wang2020',
key='stylegan'
),

dict(
real_path='../laion400m_data/val.pickle',
fake_path='../FAKE_IMAGES/glide_50ddim_27ddim/val.pickle',
data_mode='ours',
),

dict(
real_path='../laion400m_data/val.pickle',
fake_path='../FAKE_IMAGES/glide_100ddpm_10ddim/val.pickle',
data_mode='ours',
real_path='../FAKE_IMAGES/CNN/test/gaugan', # It is COCO
fake_path='../FAKE_IMAGES/CNN/test/gaugan',
data_mode='wang2020',
key='gaugan'
),


# = = = = = = = = = = = = = = GUIDED = = = = = = = = = = = = = = = = #

dict(
real_path='../imagenet/val.pickle',
fake_path='../FAKE_IMAGES/guided_imagenet_ddim25_cg1/val.pickle',
data_mode='ours',
real_path='../FAKE_IMAGES/CNN/test/stargan',
fake_path='../FAKE_IMAGES/CNN/test/stargan',
data_mode='wang2020',
key='stargan'
),



# = = = = = = = = = = = = = = DALLE-MINI = = = = = = = = = = = = = = = = #

dict(
real_path='../laion400m_data/val.pickle',
fake_path='../FAKE_IMAGES/DALLE-MINI',
data_mode='ours',
real_path='../FAKE_IMAGES/CNN/test/deepfake',
fake_path='../FAKE_IMAGES/CNN/test/deepfake',
data_mode='wang2020',
key='deepfake'
),


# = = = = = = = = = = = = = = CNN = = = = = = = = = = = = = = = = #


dict(
real_path='../FAKE_IMAGES/CNN/test/biggan/', # Imagenet
fake_path='../FAKE_IMAGES/CNN/test/biggan/',
real_path='../FAKE_IMAGES/CNN/test/seeingdark',
fake_path='../FAKE_IMAGES/CNN/test/seeingdark',
data_mode='wang2020',
key='sitd'
),

dict(
real_path='../FAKE_IMAGES/CNN/test/cyclegan',
fake_path='../FAKE_IMAGES/CNN/test/cyclegan',
data_mode='wang2020',
),

dict(
real_path='../FAKE_IMAGES/CNN/test/gaugan', # It is COCO
fake_path='../FAKE_IMAGES/CNN/test/gaugan',
real_path='../FAKE_IMAGES/CNN/test/san',
fake_path='../FAKE_IMAGES/CNN/test/san',
data_mode='wang2020',
key='san'
),


dict(
real_path='../FAKE_IMAGES/CNN/test/progan',
fake_path='../FAKE_IMAGES/CNN/test/progan',
real_path='../FAKE_IMAGES/CNN/test/crn', # Images from some video games
fake_path='../FAKE_IMAGES/CNN/test/crn',
data_mode='wang2020',
key='crn'
),


dict(
real_path='../FAKE_IMAGES/CNN/test/stylegan',
fake_path='../FAKE_IMAGES/CNN/test/stylegan',
real_path='../FAKE_IMAGES/CNN/test/imle', # Images from some video games
fake_path='../FAKE_IMAGES/CNN/test/imle',
data_mode='wang2020',
key='imle'
),


dict(
real_path='../FAKE_IMAGES/CNN/test/whichfaceisreal', # It is FFHQ
fake_path='../FAKE_IMAGES/CNN/test/whichfaceisreal',
data_mode='wang2020',
real_path='../imagenet/val.pickle',
fake_path='../FAKE_IMAGES/guided_imagenet_ddim25_cg1/val.pickle',
data_mode='ours',
key='guided'
),


dict(
real_path='../FAKE_IMAGES/CNN/test/crn', # Images from some video games
fake_path='../FAKE_IMAGES/CNN/test/crn',
data_mode='wang2020',
real_path='../laion400m_data/val.pickle',
fake_path='../FAKE_IMAGES/ldm_200step/val.pickle',
data_mode='ours',
key='ldm_200'
),

dict(
real_path='../FAKE_IMAGES/CNN/test/imle', # Images from some video games
fake_path='../FAKE_IMAGES/CNN/test/imle',
data_mode='wang2020',
real_path='../laion400m_data/val.pickle',
fake_path='../FAKE_IMAGES/ldm_200step_cfg3/val.pickle',
data_mode='ours',
key='ldm_200_cfg'
),

dict(
real_path='../FAKE_IMAGES/CNN/test/stargan',
fake_path='../FAKE_IMAGES/CNN/test/stargan',
data_mode='wang2020',
),
real_path='../laion400m_data/val.pickle',
fake_path='../FAKE_IMAGES/ldm_100step/val.pickle',
data_mode='ours',
key='ldm_100'
),


dict(
real_path='../FAKE_IMAGES/CNN/test/stylegan2',
fake_path='../FAKE_IMAGES/CNN/test/stylegan2',
data_mode='wang2020',
real_path='../laion400m_data/val.pickle',
fake_path='../FAKE_IMAGES/glide_100ddpm_27ddim/val.pickle',
data_mode='ours',
key='glide_100_27'
),


dict(
real_path='../FAKE_IMAGES/CNN/test/deepfake',
fake_path='../FAKE_IMAGES/CNN/test/deepfake',
data_mode='wang2020',
real_path='../laion400m_data/val.pickle',
fake_path='../FAKE_IMAGES/glide_50ddpm_27ddim/val.pickle',
data_mode='ours',
key='glide_50_27'
),


dict(
real_path='../FAKE_IMAGES/CNN/test/san',
fake_path='../FAKE_IMAGES/CNN/test/san',
data_mode='wang2020',
real_path='../laion400m_data/val.pickle',
fake_path='../FAKE_IMAGES/glide_100ddpm_10ddim/val.pickle',
data_mode='ours',
key='glide_100_10'
),


dict(
real_path='../FAKE_IMAGES/CNN/test/seeingdark',
fake_path='../FAKE_IMAGES/CNN/test/seeingdark',
data_mode='wang2020',
real_path='../laion400m_data/val.pickle',
fake_path='../FAKE_IMAGES/DALLE-MINI',
data_mode='ours',
key='dalle'
),




]
1 change: 1 addition & 0 deletions test.sh
Original file line number Diff line number Diff line change
@@ -0,0 +1 @@
CUDA_VISIBLE_DEVICES=0 python3 validate.py --arch=CLIP:ViT-L/14 --ckpt=pretrained_weights/fc_weights.pth --result_folder=clip_vitl14
17 changes: 10 additions & 7 deletions validate.py
Original file line number Diff line number Diff line change
Expand Up @@ -8,7 +8,7 @@
import numpy as np
from sklearn.metrics import average_precision_score, precision_recall_curve, accuracy_score
from torch.utils.data import Dataset

import sys
from models import get_model
from PIL import Image
import pickle
Expand Down Expand Up @@ -98,11 +98,13 @@ def validate(model, loader, find_thres=False):

with torch.no_grad():
y_true, y_pred = [], []
print ("Length of dataset: %d" %(len(loader)))
for img, label in loader:
in_tens = img.cuda()

y_pred.extend(model(in_tens).sigmoid().flatten().tolist())
y_true.extend(label.flatten().tolist())

y_true, y_pred = np.array(y_true), np.array(y_pred)

# ================== save this if you want to plot the curves =========== #
Expand Down Expand Up @@ -194,7 +196,6 @@ def __init__(self, real_path,

stat_from = "imagenet" if arch.lower().startswith("imagenet") else "clip"
self.transform = transforms.Compose([
transforms.Resize(256),
transforms.CenterCrop(224),
transforms.ToTensor(),
transforms.Normalize( mean=MEAN[stat_from], std=STD[stat_from] ),
Expand Down Expand Up @@ -230,7 +231,9 @@ def __len__(self):
return len(self.total_list)

def __getitem__(self, idx):

img_path = self.total_list[idx]

label = self.labels_dict[img_path]
img = Image.open(img_path).convert("RGB")

Expand All @@ -253,13 +256,13 @@ def __getitem__(self, idx):
parser.add_argument('--real_path', type=str, default=None, help='dir name or a pickle')
parser.add_argument('--fake_path', type=str, default=None, help='dir name or a pickle')
parser.add_argument('--data_mode', type=str, default=None, help='wang2020 or ours')
parser.add_argument('--max_sample', type=int, default=10000, help='only check this number of images for both fake/real')
parser.add_argument('--max_sample', type=int, default=1000, help='only check this number of images for both fake/real')

parser.add_argument('--arch', type=str, default='res50')
parser.add_argument('--ckpt', type=str, default='./pretrained_weights/fc_weights.pth')

parser.add_argument('--result_folder', type=str, default='result', help='')
parser.add_argument('--batch_size', type=int, default=64)
parser.add_argument('--batch_size', type=int, default=128)

parser.add_argument('--jpeg_quality', type=int, default=None, help="100, 90, 80, ... 30. Used to test robustness of our model. Not apply if None")
parser.add_argument('--gaussian_sigma', type=int, default=None, help="0,1,2,3,4. Used to test robustness of our model. Not apply if None")
Expand All @@ -286,7 +289,7 @@ def __getitem__(self, idx):



for dataset_path in tqdm(dataset_paths):
for dataset_path in (dataset_paths):
set_seed()

dataset = RealFakeDataset( dataset_path['real_path'],
Expand All @@ -302,8 +305,8 @@ def __getitem__(self, idx):
ap, r_acc0, f_acc0, acc0, r_acc1, f_acc1, acc1, best_thres = validate(model, loader, find_thres=True)

with open( os.path.join(opt.result_folder,'ap.txt'), 'a') as f:
f.write( str(round(ap*100, 2))+'\n' )
f.write(dataset_path['key']+': ' + str(round(ap*100, 2))+'\n' )

with open( os.path.join(opt.result_folder,'acc0.txt'), 'a') as f:
f.write( str(round(r_acc0*100, 2))+' '+str(round(f_acc0*100, 2))+' '+str(round(acc0*100, 2))+'\n' )
f.write(dataset_path['key']+': ' + str(round(r_acc0*100, 2))+' '+str(round(f_acc0*100, 2))+' '+str(round(acc0*100, 2))+'\n' )

0 comments on commit 3bf7228

Please sign in to comment.