We read every piece of feedback, and take your input very seriously.
To see all available qualifiers, see our documentation.
Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.
By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.
Already on GitHub? Sign in to your account
Hi,
I found the wrong code about validating average accuracy in your rafdb.py #231 and #239
Mathematically, your calculation is not correct. I was writing a new one and your average accuracy is only 83.76.
#!/usr/bin/env python # coding: utf-8 # In[12]: import os from PIL import Image import torch from torchvision import transforms from networks.dan import DAN import torch.utils.data as data import numpy as np import pandas as pd # In[2]: device = torch.device("cuda:1" if torch.cuda.is_available() else "cpu") # In[5]: model = DAN(num_head=4, num_class=7, pretrained=False) checkpoint = torch.load('rafdb_epoch21_acc0.897_bacc0.8532.pth', map_location=device) model.load_state_dict(checkpoint['model_state_dict'],strict=True) model.to(device) model.eval() # In[10]: class RafDataSet(data.Dataset): def __init__(self, raf_path, phase, transform = None): self.phase = phase self.transform = transform self.raf_path = raf_path df = pd.read_csv(os.path.join(self.raf_path, 'EmoLabel/list_patition_label.txt'), sep=' ', header=None,names=['name','label']) if phase == 'train': self.data = df[df['name'].str.startswith('train')] else: self.data = df[df['name'].str.startswith('test')] file_names = self.data.loc[:, 'name'].values self.label = self.data.loc[:, 'label'].values - 1 # 0:Surprise, 1:Fear, 2:Disgust, 3:Happiness, 4:Sadness, 5:Anger, 6:Neutral _, self.sample_counts = np.unique(self.label, return_counts=True) # print(f' distribution of {phase} samples: {self.sample_counts}') self.file_paths = [] for f in file_names: f = f.split(".")[0] f = f +"_aligned.jpg" path = os.path.join(self.raf_path, 'Image/aligned', f) self.file_paths.append(path) def __len__(self): return len(self.file_paths) def __getitem__(self, idx): path = self.file_paths[idx] image = Image.open(path).convert('RGB') label = self.label[idx] if self.transform is not None: image = self.transform(image) return image, label # In[13]: data_transforms_val = transforms.Compose([ transforms.Resize((224, 224)), transforms.ToTensor(), transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])]) val_dataset = RafDataSet('datasets/', phase = 'test', transform = data_transforms_val) # In[14]: val_loader = torch.utils.data.DataLoader(val_dataset, batch_size = 64, num_workers = 1, shuffle = False, pin_memory = True) # In[16]: y_true = [] y_pred = [] with torch.no_grad(): model.eval() for (imgs, targets) in val_loader: imgs = imgs.to(device) targets = targets.to(device) out,feat,heads = model(imgs) _, predicts = torch.max(out, 1) y_true.append(predicts.cpu().numpy()) y_pred.append(targets.cpu().numpy()) # In[20]: y_true = np.concatenate(y_true) y_pred = np.concatenate(y_pred) # In[21]: from sklearn.metrics import confusion_matrix, accuracy_score, ConfusionMatrixDisplay, balanced_accuracy_score # In[23]: print('Acc', accuracy_score(y_true, y_pred)) print('Mean Acc', balanced_accuracy_score(y_true, y_pred))
Acc 0.8970013037809648 Mean Acc 0.8376120557760152
The text was updated successfully, but these errors were encountered:
Thanks for your reminder, I will check it~
Sorry, something went wrong.
It my mistake, the result will be updated, Thanks again!
No branches or pull requests
Hi,
I found the wrong code about validating average accuracy in your rafdb.py #231 and #239
Mathematically, your calculation is not correct. I was writing a new one and your average accuracy is only 83.76.
Acc 0.8970013037809648
Mean Acc 0.8376120557760152
The text was updated successfully, but these errors were encountered: