13. Benchmarking pyfeat Emotion detection algorithms using data#

written by Tiankang Xie

In the tutorial we will demonstrate how to evaluate pyfeat emotion detection algorithms with evaluation data. The evaluative data comes from a subset of affectnet. Please see the csv file inside this tutorial folder for more details

import sys
import torch
import torch.nn as nn
import math
from feat.utils import set_torch_device
import torch.nn.functional as F
from copy import deepcopy
import numpy as np
from skimage import draw
import matplotlib.pyplot as plt
from PIL import Image
from torchvision import transforms
from feat.utils.image_operations import extract_face_from_landmarks

from PIL import Image
from itertools import product
import os 
from torchvision.transforms import Compose, Normalize, Grayscale

import pandas as pd
from tqdm import tqdm
from feat import Detector

from joblib import delayed, Parallel
from torchvision.utils import save_image
from torchvision.io import read_image, read_video
from torch.utils.data import Dataset
from feat.transforms import Rescale
import glob
from skimage.feature import hog
import pickle
import joblib
from sklearn.metrics import f1_score
import xgboost as xgb
import torch
from torch.utils.data import Dataset
from torchvision import datasets
from torchvision.transforms import ToTensor
import matplotlib.pyplot as plt
from sklearn import preprocessing

from tqdm import tqdm

Provide the path for

  1. downloaded AffectNet dataset

  2. where to save the results

  3. the path to affectnet_testSubset, which can be found inside /py-feat/docs/extra_tutorials/

You can request access to AffectNet at http://mohammadmahoor.com/affectnet/

data_dir = '/Storage/Data/AffectNet/Manual_Annot/Manually_Annotated_Images/'
save_result_dir = '/Storage/Projects/pyfeat_testing/Data_Eshin/emo_test/'
test_file_csv = pd.read_csv('/Storage/Projects/pyfeat_testing/Data_Eshin/emo_test/affectnet_testSubset.csv', index_col=0)
inp_fnames = [data_dir + fp for fp in test_file_csv.subDirectory_filePath]
test_file_csv['filename'] = test_file_csv.subDirectory_filePath.apply(lambda x: os.path.basename(x).split('.')[0])

1. Test ResmaskNet#

detector = Detector(face_model='retinaface',emotion_model='resmasknet', landmark_model="mobilefacenet", au_model='xgb', device='cuda')
/home/tiankang/anaconda3/envs/py38/lib/python3.8/site-packages/torchvision/models/_utils.py:135: UserWarning: Using 'backbone_name' as positional parameter(s) is deprecated since 0.13 and may be removed in the future. Please use keyword parameter(s) instead.
  warnings.warn(
/home/tiankang/anaconda3/envs/py38/lib/python3.8/site-packages/torchvision/models/_utils.py:223: UserWarning: Arguments other than a weight enum or `None` for 'weights' are deprecated since 0.13 and may be removed in the future. The current behavior is equivalent to passing `weights=ResNet18_Weights.IMAGENET1K_V1`. You can also use `weights=ResNet18_Weights.DEFAULT` to get the most up-to-date weights.
  warnings.warn(msg)
predicted_data = []
bad_file = 0
for inp_name in tqdm(inp_fnames):
    try:
        img_df = detector.detect_image(input_file_list=inp_name, output_size=None, batch_size=1, num_workers=1)
        emo_df = img_df[['anger','disgust','fear','happiness','sadness','surprise','neutral']]
        emo_df['filename'] = os.path.basename(inp_name).split('.')[0]
        predicted_data.append(emo_df)
    except:
        bad_file += 1
        continue;
predicted_data = pd.concat(predicted_data)
predicted_data.to_csv(save_result_dir+'resmasknet_bench_result.csv')
def test_emo_resmasknet():

    emo_categories = ['anger', "disgust", "fear", "happiness", "sadness", "surprise", "neutral"]
    a1 = pd.read_csv(save_result_dir+'resmasknet_bench_result.csv', index_col=0)
    a2 = pd.merge(a1, test_file_csv, on=['filename'])

    lb = preprocessing.LabelBinarizer()
    emo_labels_bi = lb.fit_transform(a2.expression)
    emo_preds = np.round(a2[emo_categories])

    emo_result = []
    for i in range(7):
        print('========')
        print(emo_categories[i], ' f1 score: ', f1_score(emo_labels_bi[:, i], emo_preds.iloc[:, i]))
        emo_result.append(f1_score(emo_labels_bi[:, i], emo_preds.iloc[:, i]))
    
    arrangement_df_p1 = pd.DataFrame(emo_labels_bi, columns=['anger_label', "disgust_label", "fear_label", "happiness_label", "sadness_label", "surprise_label", "neutral_label"])
    arrangement_df_p2 = emo_preds.rename(columns={'anger':'anger_pred', "disgust":'disgust_pred', 
                                                "fear": 'fear_pred', "happiness":'happiness_pred', "sadness":'sadness_pred', "surprise":'surprise_pred', "neutral":'neutral_pred'})
    arrangement_df = pd.concat((arrangement_df_p1, arrangement_df_p2), 1)

    return emo_result, arrangement_df

Calculating metrics in F1 scores#

rmn_result, rmn_individuals = test_emo_resmasknet()

SVM model#

detector = Detector(face_model='retinaface',emotion_model='svm', landmark_model="mobilefacenet", au_model='xgb', device='cuda')
predicted_data = []
for inp_name in tqdm(inp_fnames):
    try:
        eye_df = detector.detect_image(input_file_list=inp_name, output_size=None, batch_size=1, num_workers=1)
        eye_new_df = eye_df[['anger','disgust','fear','happiness','sadness','surprise','neutral']]
        eye_new_df['filename'] = os.path.basename(inp_name).split('.')[0]
        predicted_data.append(eye_new_df)
    except:
        continue;
        
predicted_data = pd.concat(predicted_data)
predicted_data.to_csv(save_result_dir+'svm_bench_result.csv')
def test_svm_resmasknet():

    a1 = pd.read_csv(save_result_dir+'svm_bench_result.csv', index_col=0)
    a2 = pd.merge(a1, test_file_csv, on=['filename'])

    emo_categories = ['anger', "disgust", "fear", "happiness", "sadness", "surprise", "neutral"]
    lb = preprocessing.LabelBinarizer()
    emo_labels_bi = lb.fit_transform(a2.expression)
    emo_preds = np.round(a2[emo_categories])

    emo_result = []
    for i in range(7):
        print('========')
        print(emo_categories[i], ' f1 score: ', f1_score(emo_labels_bi[:, i], emo_preds.iloc[:, i]))
        emo_result.append(f1_score(emo_labels_bi[:, i], emo_preds.iloc[:, i]))
    
    arrangement_df_p1 = pd.DataFrame(emo_labels_bi, columns=['anger_label', "disgust_label", "fear_label", "happiness_label", "sadness_label", "surprise_label", "neutral_label"])
    arrangement_df_p2 = emo_preds.rename(columns={'anger':'anger_pred', "disgust":'disgust_pred', 
                                                "fear": 'fear_pred', "happiness":'happiness_pred', "sadness":'sadness_pred', "surprise":'surprise_pred', "neutral":'neutral_pred'})
    arrangement_df = pd.concat((arrangement_df_p1, arrangement_df_p2), 1)

    return emo_result, arrangement_df

Calculating metrics in F1 scores#

svm_result, svm_individuals = test_svm_resmasknet()