From 74a5c91252120ff8772ad066a53dbb555455c2c5 Mon Sep 17 00:00:00 2001 From: hanezu Date: Fri, 14 Sep 2018 18:27:59 +0900 Subject: [PATCH] fix README, test.py minor typos --- README.md | 2 +- test.py | 6 +++--- 2 files changed, 4 insertions(+), 4 deletions(-) diff --git a/README.md b/README.md index f1a231377..ce9bab0f6 100644 --- a/README.md +++ b/README.md @@ -15,7 +15,7 @@ This code was made public to share our research for the benefit of the scientifi ## Data Preparation The code requires a directory containing the following files: - `imgs/`: folder with all image -- `aus_openpose.pkl`: dictionary containing the images action units. +- `aus_openface.pkl`: dictionary containing the images action units. - `train_ids.csv`: file containing the images names to be used to train. - `test_ids.csv`: file containing the images names to be used to test. diff --git a/test.py b/test.py index 9ec0c9b9d..8f060938a 100644 --- a/test.py +++ b/test.py @@ -43,10 +43,10 @@ def _img_morph(self, img, expresion): return morphed_face - def _morph_face(self, face, expresion): + def _morph_face(self, face, expression): face = torch.unsqueeze(self._transform(Image.fromarray(face)), 0) - expresion = torch.unsqueeze(torch.from_numpy(expresion/5.0), 0) - test_batch = {'real_img': face, 'real_cond': expresion, 'desired_cond': expresion, 'sample_id': torch.FloatTensor(), 'real_img_path': []} + expression = torch.unsqueeze(torch.from_numpy(expression/5.0), 0) + test_batch = {'real_img': face, 'real_cond': expression, 'desired_cond': expression, 'sample_id': torch.FloatTensor(), 'real_img_path': []} self._model.set_input(test_batch) imgs, _ = self._model.forward(keep_data_for_visuals=False, return_estimates=True) return imgs['concat']