-
Notifications
You must be signed in to change notification settings - Fork 1
Commit
This commit does not belong to any branch on this repository, and may belong to a fork outside of the repository.
- Loading branch information
1 parent
ba3f80d
commit 745414c
Showing
3 changed files
with
285 additions
and
0 deletions.
There are no files selected for viewing
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -0,0 +1,143 @@ | ||
import uuid | ||
import glob | ||
import json | ||
import numpy as np | ||
|
||
from chm.configs.args_file import parse_arguments | ||
from chm.utils.experiment_result_keys import * | ||
|
||
""" | ||
Script to extract error stats from the awareness estimation experiments. If the file to be parsed is named | ||
experiment_type_awareness_estimates_EXTRA_INFO.json, the script is to be used as follows | ||
Usage: | ||
python parse_awareness_estimation_data.py --results_json_prefix experiment_type_awareness_estimates_ (--display_balanced_results) | ||
""" | ||
|
||
|
||
def arg_setter(parser): | ||
parser.add_argument( | ||
"--results_json_prefix", | ||
type=str, | ||
default="", | ||
help="Prefix for data json files from awareness estimation experiments", | ||
) | ||
parser.add_argument( | ||
"--display_balanced_results", | ||
action="store_true", | ||
default=False, | ||
help="Flag for displaying target class balanced results", | ||
) | ||
|
||
|
||
if __name__ == "__main__": | ||
session_hash = uuid.uuid4().hex | ||
args = parse_arguments(session_hash, [arg_setter]) | ||
|
||
display_balanced_results = args.display_balanced_results | ||
# extract all the data files with the specified prefix and sort them | ||
filenames = glob.glob(os.path.expanduser(args.results_json_prefix) + "*.json") | ||
filenames = sorted(filenames) | ||
|
||
for filename in filenames: | ||
print(" ") | ||
print(filename) | ||
with open(filename, "r") as fp: | ||
jsn = json.load(fp) | ||
|
||
results_sorted_according_to_labels = collections.defaultdict(list) | ||
# gather all possible targets for target class balanced results | ||
targets = jsn[AWARENESS_TARGET_KEY] | ||
target_histogram = collections.Counter(targets) | ||
sample_weights = [1.0 / target_histogram[t] for t in targets] | ||
|
||
# extract errors and estimates from the jsons | ||
sq_error_chm = jsn[AWARENESS_ERROR_CHM_KEY] | ||
sq_error_of_spatiotemporal_gaussian = jsn[AWARENESS_ERROR_OF_SPATIOTEMPORAL_GAUSSIAN_KEY] | ||
|
||
abs_error_chm = jsn[AWARENESS_ABS_ERROR_CHM_KEY] | ||
abs_error_of_spatiotemporal_gaussian = jsn[AWARENESS_ABS_ERROR_OF_SPATIOTEMPORAL_GAUSSIAN_KEY] | ||
|
||
awareness_estimate_chm = jsn[AWARENESS_ESTIMATE_CHM_KEY] | ||
awareness_estimate_of_spatiotemporal_gaussian = jsn[AWARENESS_ESTIMATE_OF_SPATIOTEMPORAL_GAUSSIAN_KEY] | ||
|
||
# sanity check to make sure all metrics are logged properly | ||
assert ( | ||
len(targets) | ||
== len(sq_error_chm) | ||
== len(sq_error_of_spatiotemporal_gaussian) | ||
== len(abs_error_chm) | ||
== len(abs_error_of_spatiotemporal_gaussian) | ||
== len(awareness_estimate_chm) | ||
== len(awareness_estimate_of_spatiotemporal_gaussian) | ||
) | ||
|
||
if display_balanced_results: | ||
# weighted abs and eq errors | ||
abs_error_chm = np.array(abs_error_chm) * np.array(sample_weights) | ||
abs_error_of_spatiotemporal_gaussian = np.array(abs_error_of_spatiotemporal_gaussian) * np.array( | ||
sample_weights | ||
) | ||
sq_error_chm = np.array(sq_error_chm) * np.array(sample_weights) | ||
sq_error_of_spatiotemporal_gaussian = np.array(sq_error_of_spatiotemporal_gaussian) * np.array( | ||
sample_weights | ||
) | ||
|
||
# absolute weighted error mean and std | ||
std_abs_chm = np.std(abs_error_chm) | ||
std_abs_of_spatiotemporal_gaussian = np.std(abs_error_of_spatiotemporal_gaussian) | ||
mean_abs_chm = np.sum(abs_error_chm) / np.sum(sample_weights) | ||
mean_abs_of_spatiotemporal_gaussian = np.sum(abs_error_of_spatiotemporal_gaussian) / np.sum(sample_weights) | ||
|
||
# squared error mean and std | ||
std_sq_chm = np.std(sq_error_chm) | ||
std_sq_of_spatiotemporal_gaussian = np.std(sq_error_of_spatiotemporal_gaussian) | ||
mean_sq_chm = np.sum(sq_error_chm) / np.sum(sample_weights) | ||
mean_sq_of_spatiotemporal_gaussian = np.sum(sq_error_of_spatiotemporal_gaussian) / np.sum(sample_weights) | ||
else: | ||
# absolute error mean and std | ||
std_abs_chm = np.std(abs_error_chm) | ||
std_abs_of_spatiotemporal_gaussian = np.std(abs_error_of_spatiotemporal_gaussian) | ||
mean_abs_chm = np.average(abs_error_chm) | ||
mean_abs_of_spatiotemporal_gaussian = np.average(abs_error_of_spatiotemporal_gaussian) | ||
# squared error mean and std | ||
std_sq_chm = np.std(sq_error_chm) | ||
std_sq_of_spatiotemporal_gaussian = np.std(sq_error_of_spatiotemporal_gaussian) | ||
mean_sq_chm = np.average(sq_error_chm) | ||
mean_sq_of_spatiotemporal_gaussian = np.average(sq_error_of_spatiotemporal_gaussian) | ||
|
||
# print the means and std deviations for chm and baseline estimate to screen | ||
print( | ||
"ABS ERROR CHM: {} +/- {}. SpatioTemporal OF Gaussian: {} +/- {}".format( | ||
mean_abs_chm, | ||
std_abs_chm, | ||
mean_abs_of_spatiotemporal_gaussian, | ||
std_abs_of_spatiotemporal_gaussian, | ||
) | ||
) | ||
|
||
print( | ||
"SQUARED ERROR CHM: {} +/- {}. SpatioTemporal OF Gaussian: {} +/- {}".format( | ||
mean_sq_chm, | ||
std_sq_chm, | ||
mean_sq_of_spatiotemporal_gaussian, | ||
std_sq_of_spatiotemporal_gaussian, | ||
) | ||
) | ||
|
||
# mean and std deviation of the awareness estimates | ||
std_awareness_estimate_chm = np.std(awareness_estimate_chm) | ||
std_awareness_estimate_of_spatiotemporal_gaussian = np.std(awareness_estimate_of_spatiotemporal_gaussian) | ||
mean_awareness_estimate_chm = np.average(awareness_estimate_chm) | ||
mean_awareness_estimate_of_spatiotemporal_gaussian = np.average(awareness_estimate_of_spatiotemporal_gaussian) | ||
|
||
# print the awareness estimate from CHM and baseline estimate to screen | ||
print( | ||
"AWARENESS ESTIMATE CHM: {} +/- {}. SpatioTemporal OF Gaussian: {} +/- {}".format( | ||
mean_awareness_estimate_chm, | ||
std_awareness_estimate_chm, | ||
mean_awareness_estimate_of_spatiotemporal_gaussian, | ||
std_awareness_estimate_of_spatiotemporal_gaussian, | ||
) | ||
) |
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -0,0 +1,71 @@ | ||
import uuid | ||
import json | ||
import numpy as np | ||
|
||
from chm.configs.args_file import parse_arguments | ||
from chm.utils.experiment_result_keys import * | ||
|
||
|
||
def arg_setter(parser): | ||
parser.add_argument( | ||
"--folder_containing_results", | ||
type=str, | ||
default="", | ||
help="Path to folder containing calibration optimization results", | ||
) | ||
|
||
parser.add_argument( | ||
"--num_optimization_runs", | ||
type=int, | ||
default=5, | ||
help="Number of optimizations runs performed for a given noise level for the calibration experiment", | ||
) | ||
|
||
parser.add_argument( | ||
"--noise_levels", | ||
action="store", | ||
nargs="*", | ||
type=float, | ||
default=[0.1, 0.2, 0.3, 0.5], | ||
help="Noise levels used in the calibration experiments", | ||
) | ||
|
||
parser.add_argument( | ||
"--filename_append", type=str, default="", help="Additional descriptive string for filename string components" | ||
) | ||
|
||
|
||
if __name__ == "__main__": | ||
session_hash = uuid.uuid4().hex | ||
args = parse_arguments(session_hash, [arg_setter]) | ||
noise_levels = args.noise_levels | ||
num_optimization_runs = args.num_optimization_runs | ||
filename_append = args.filename_append | ||
file_prefix = "experiment_type_gaze_calibration_miscalibration_noise_level_" | ||
for noise_level in noise_levels: | ||
starting_mse_error_list = [] | ||
end_mse_error_list = [] | ||
for i in range(num_optimization_runs): | ||
jsnfile_name = ( | ||
file_prefix + str(noise_level) + "_optimization_run_num_" + str(i) + filename_append + ".json" | ||
) | ||
print("Loading : ", jsnfile_name) | ||
jsn_full_path = os.path.join(args.folder_containing_results, jsnfile_name) | ||
with open(jsn_full_path, "r") as fp: | ||
res = json.load(fp) | ||
|
||
starting_mse_error_list.append(res["errors"][0]["error"]) | ||
# assumes that the calibration optimization successfully converged | ||
end_mse_error_list.append(res["errors"][-1]["error"]) | ||
|
||
print( | ||
"Mean and std for starting error for noise level {} is {} and {}".format( | ||
noise_level, np.mean(starting_mse_error_list), np.std(starting_mse_error_list) | ||
) | ||
) | ||
print( | ||
"Mean and std for ending error for noise level {} is {} and {}".format( | ||
noise_level, np.mean(end_mse_error_list), np.std(end_mse_error_list) | ||
) | ||
) | ||
print(" ") |
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -0,0 +1,71 @@ | ||
import uuid | ||
import glob | ||
import json | ||
import numpy as np | ||
|
||
from chm.configs.args_file import parse_arguments | ||
from chm.utils.experiment_result_keys import * | ||
|
||
""" | ||
Script to extract error stats from gaze denoising experiments. If the file to be parsed is named | ||
experiment_type_gaze_denoising_EXTRA_INFO.json, the script is to be used as follows | ||
Usage: | ||
python parse_denoising_data.py --results_json_prefix experiment_type_gaze_denoising_ | ||
""" | ||
|
||
|
||
def arg_setter(parser): | ||
parser.add_argument( | ||
"--results_json_prefix", | ||
type=str, | ||
default="", | ||
help="Prefix for data json files from awareness estimation experiments", | ||
) | ||
|
||
|
||
if __name__ == "__main__": | ||
session_hash = uuid.uuid4().hex | ||
args = parse_arguments(session_hash, [arg_setter]) | ||
# extract all the data files with the specified prefix and sort them | ||
filenames = glob.glob(os.path.expanduser(args.results_json_prefix) + "*.json") | ||
filenames = sorted(filenames) | ||
for filename in filenames: | ||
print(" ") | ||
print(filename) | ||
with open(filename, "r") as fp: | ||
jsn = json.load(fp) | ||
|
||
# grab errors computed during experiment. | ||
error_noisy = jsn[GAZE_ERROR_NOISY_KEY] | ||
error_objectbased = jsn[GAZE_ERROR_OBJ_KEY] | ||
error_chm = jsn[GAZE_ERROR_CHM_KEY_WITH_GAZE] | ||
error_saliency = jsn[GAZE_ERROR_CHM_KEY_WITHOUT_GAZE] | ||
|
||
# grab the mean sqrt errors. | ||
sqrt_error_noisy = jsn["overall_" + GAZE_ERROR_NOISY_KEY + "_sqrt_mean"] | ||
sqrt_error_objectbased = jsn["overall_" + GAZE_ERROR_OBJ_KEY + "_sqrt_mean"] | ||
sqrt_error_chm = jsn["overall_" + GAZE_ERROR_CHM_KEY_WITH_GAZE + "_sqrt_mean"] | ||
sqrt_error_saliency = jsn["overall_" + GAZE_ERROR_CHM_KEY_WITHOUT_GAZE + "_sqrt_mean"] | ||
|
||
# compute mean and std of errors | ||
mean_noisy = np.average(error_noisy) | ||
mean_object = np.average(error_objectbased) | ||
mean_chm = np.average(error_chm) | ||
mean_saliency = np.average(error_saliency) | ||
std_noisy = np.std(error_noisy) | ||
std_object = np.std(error_objectbased) | ||
std_chm = np.std(error_chm) | ||
std_saliency = np.std(error_saliency) | ||
# print the mean and std errors for different denoising approaches. "Noisy" refers to without denoising | ||
print( | ||
"CHM: {} +/- {}. Saliency: {} +/- {}. Object-based: {} +/- {}. Noisy: {} +/- {}".format( | ||
mean_chm, std_chm, mean_saliency, std_saliency, mean_object, std_object, mean_noisy, std_noisy | ||
) | ||
) | ||
print( | ||
"SQRT MEAN CHM: {}. Saliency: {}. Object-based: {}. Noisy: {}".format( | ||
sqrt_error_chm, sqrt_error_saliency, sqrt_error_objectbased, sqrt_error_noisy | ||
) | ||
) |