diff --git a/.gitignore b/.gitignore index 1c9f2d562..8703427be 100644 --- a/.gitignore +++ b/.gitignore @@ -7,3 +7,8 @@ build-* *.so *.a *.so.* +.idea/ +.vscode/ +cmake-build-*/ +result.txt +*.pyc \ No newline at end of file diff --git a/README.md b/README.md index 0a38231fa..c140db18a 100644 --- a/README.md +++ b/README.md @@ -241,6 +241,10 @@ using the TUM RGB-D / TUM monoVO format ([timestamp x y z qx qy qz qw] of the ca +#### 3.6 Evaluation +For the [TUM-VI dataset](https://vision.in.tum.de/data/datasets/visual-inertial-dataset) an evaluation script exists with `evaluation/tum-vi_run_all.py`. It runs many sequences multiple times and reports the median RMS ATE. Run `tum-vi_run_all.py -h` for usage instructions. + + ### 4 General Notes for Good Results diff --git a/evaluation/associate.py b/evaluation/associate.py new file mode 100644 index 000000000..6e770e907 --- /dev/null +++ b/evaluation/associate.py @@ -0,0 +1,130 @@ +#!/usr/bin/python2 +# Software License Agreement (BSD License) +# +# Copyright (c) 2013, Juergen Sturm, TUM +# All rights reserved. +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions +# are met: +# +# * Redistributions of source code must retain the above copyright +# notice, this list of conditions and the following disclaimer. +# * Redistributions in binary form must reproduce the above +# copyright notice, this list of conditions and the following +# disclaimer in the documentation and/or other materials provided +# with the distribution. +# * Neither the name of TUM nor the names of its +# contributors may be used to endorse or promote products derived +# from this software without specific prior written permission. +# +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS +# FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE +# COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, +# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, +# BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; +# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT +# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN +# ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE +# POSSIBILITY OF SUCH DAMAGE. +# +# Requirements: +# sudo apt-get install python-argparse + +""" +The Kinect provides the color and depth images in an un-synchronized way. This means that the set of time stamps from the color images do not intersect with those of the depth images. Therefore, we need some way of associating color images to depth images. + +For this purpose, you can use the ''associate.py'' script. It reads the time stamps from the rgb.txt file and the depth.txt file, and joins them by finding the best matches. +""" + +import argparse +import sys +import os +import numpy + + +def read_file_list(filename,remove_bounds): + """ + Reads a trajectory from a text file. + + File format: + The file format is "stamp d1 d2 d3 ...", where stamp denotes the time stamp (to be matched) + and "d1 d2 d3.." is arbitary data (e.g., a 3D position and 3D orientation) associated to this timestamp. + + Input: + filename -- File name + + Output: + dict -- dictionary of (stamp,data) tuples + + """ + file = open(filename) + data = file.read() + lines = data.replace(","," ").replace("\t"," ").split("\n") + if remove_bounds: + lines = lines[100:-100] + list = [[v.strip() for v in line.split(" ") if v.strip()!=""] for line in lines if len(line)>0 and line[0]!="#"] + list = [(float(l[0]),l[1:]) for l in list if len(l)>1] + return dict(list) + +def associate(first_list, second_list,offset,max_difference): + """ + Associate two dictionaries of (stamp,data). As the time stamps never match exactly, we aim + to find the closest match for every input tuple. + + Input: + first_list -- first dictionary of (stamp,data) tuples + second_list -- second dictionary of (stamp,data) tuples + offset -- time offset between both dictionaries (e.g., to model the delay between the sensors) + max_difference -- search radius for candidate generation + + Output: + matches -- list of matched tuples ((stamp1,data1),(stamp2,data2)) + + """ + first_keys = first_list.keys() + second_keys = second_list.keys() + potential_matches = [(abs(a - (b + offset)), a, b) + for a in first_keys + for b in second_keys + if abs(a - (b + offset)) < max_difference] + potential_matches.sort() + matches = [] + for diff, a, b in potential_matches: + if a in first_keys and b in second_keys: + first_keys.remove(a) + second_keys.remove(b) + matches.append((a, b)) + + matches.sort() + return matches + +if __name__ == '__main__': + + # parse command line + parser = argparse.ArgumentParser(description=''' + This script takes two data files with timestamps and associates them + ''') + parser.add_argument('first_file', help='first text file (format: timestamp data)') + parser.add_argument('second_file', help='second text file (format: timestamp data)') + parser.add_argument('--first_only', help='only output associated lines from first file', action='store_true') + parser.add_argument('--offset', help='time offset added to the timestamps of the second file (default: 0.0)',default=0.0) + parser.add_argument('--max_difference', help='maximally allowed time difference for matching entries (default: 0.02)',default=0.02) + args = parser.parse_args() + + first_list = read_file_list(args.first_file) + second_list = read_file_list(args.second_file) + + matches = associate(first_list, second_list,float(args.offset),float(args.max_difference)) + + if args.first_only: + for a,b in matches: + print("%f %s"%(a," ".join(first_list[a]))) + else: + for a,b in matches: + print("%f %s %f %s"%(a," ".join(first_list[a]),b-float(args.offset)," ".join(second_list[b]))) + + diff --git a/evaluation/evaluate_ate_scale.py b/evaluation/evaluate_ate_scale.py new file mode 100644 index 000000000..bf3ec55f6 --- /dev/null +++ b/evaluation/evaluate_ate_scale.py @@ -0,0 +1,246 @@ + +#!/usr/bin/python2 +# Modified by Raul Mur-Artal and Martin Wudenka +# Automatically compute the optimal scale factor for monocular VO/SLAM. + +# Software License Agreement (BSD License) +# +# Copyright (c) 2013, Juergen Sturm, TUM +# All rights reserved. +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions +# are met: +# +# * Redistributions of source code must retain the above copyright +# notice, this list of conditions and the following disclaimer. +# * Redistributions in binary form must reproduce the above +# copyright notice, this list of conditions and the following +# disclaimer in the documentation and/or other materials provided +# with the distribution. +# * Neither the name of TUM nor the names of its +# contributors may be used to endorse or promote products derived +# from this software without specific prior written permission. +# +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS +# FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE +# COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, +# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, +# BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; +# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT +# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN +# ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE +# POSSIBILITY OF SUCH DAMAGE. +# +# Requirements: +# sudo apt-get install python-argparse + +""" +This script computes the absolute trajectory error from the ground truth +trajectory and the estimated trajectory. +""" + +import sys +import numpy +import argparse +import associate + +def align(model,data): + """Align two trajectories using the method of Horn (closed-form). + + Input: + model -- first trajectory (3xn) + data -- second trajectory (3xn) + + Output: + rot -- rotation matrix (3x3) + trans -- translation vector (3x1) + trans_error -- translational error per point (1xn) + """ + + + numpy.set_printoptions(precision=3,suppress=True) + model_zerocentered = model - model.mean(1) + data_zerocentered = data - data.mean(1) + + W = numpy.zeros( (3,3) ) + for column in range(model.shape[1]): + W += numpy.outer(model_zerocentered[:,column],data_zerocentered[:,column]) + U,d,Vh = numpy.linalg.linalg.svd(W.transpose()) + S = numpy.matrix(numpy.identity( 3 )) + if(numpy.linalg.det(U) * numpy.linalg.det(Vh)<0): + S[2,2] = -1 + rot = U*S*Vh + + rotmodel = rot*model_zerocentered + dots = 0.0 + norms = 0.0 + + for column in range(data_zerocentered.shape[1]): + dots += numpy.dot(data_zerocentered[:,column].transpose(),rotmodel[:,column]) + normi = numpy.linalg.norm(model_zerocentered[:,column]) + norms += normi*normi + + s = float(dots/norms) + + transGT = data.mean(1) - s*rot * model.mean(1) + trans = data.mean(1) - rot * model.mean(1) + + model_alignedGT = s*rot * model + transGT + model_aligned = rot * model + trans + + alignment_errorGT = model_alignedGT - data + alignment_error = model_aligned - data + + trans_errorGT = numpy.sqrt(numpy.sum(numpy.multiply(alignment_errorGT,alignment_errorGT),0)).A[0] + trans_error = numpy.sqrt(numpy.sum(numpy.multiply(alignment_error,alignment_error),0)).A[0] + + return rot,transGT,trans_errorGT,trans,trans_error, s + +def plot_traj(ax,stamps,traj,style,color,label): + """ + Plot a trajectory using matplotlib. + + Input: + ax -- the plot + stamps -- time stamps (1xn) + traj -- trajectory (3xn) + style -- line style + color -- line color + label -- plot legend + + """ + stamps.sort() + interval = numpy.median([s-t for s,t in zip(stamps[1:],stamps[:-1])]) + + x = [] + y = [] + last = stamps[0] + for i in range(len(stamps)): + if stamps[i]-last < 20*interval: + x.append(traj[i][0]) + y.append(traj[i][1]) + elif len(x)>0: + ax.plot(x,y,style,color=color,label=label) + ax.scatter(traj[i][0],traj[i][1],s=1,color=color) + label="" + x=[] + y=[] + last= stamps[i] + if len(x)>0: + ax.plot(x,y,style,color=color,label=label) + + +if __name__=="__main__": + # parse command line + parser = argparse.ArgumentParser(description=''' + This script computes the absolute trajectory error from the ground truth trajectory and the estimated trajectory. + ''') + parser.add_argument('first_file', help='ground truth trajectory (format: timestamp tx ty tz qx qy qz qw)') + parser.add_argument('second_file', help='estimated trajectory (format: timestamp tx ty tz qx qy qz qw)') + parser.add_argument('--offset', help='time offset added to the timestamps of the second file (default: 0.0)',default=0.0) + parser.add_argument('--scale', help='scaling factor for the second trajectory (default: 1.0)',default=1.0) + parser.add_argument('--max_difference', help='maximally allowed time difference for matching entries (default: 10000000 ns)',default=20000000) + parser.add_argument('--save', help='save aligned second trajectory to disk (format: stamp2 x2 y2 z2)') + parser.add_argument('--save_associations', help='save associated first and aligned second trajectory to disk (format: stamp1 x1 y1 z1 stamp2 x2 y2 z2)') + parser.add_argument('--plot', help='plot the first and the aligned second trajectory to an image (format: png)') + parser.add_argument('--verbose', help='print all evaluation data (otherwise, only the RMSE absolute translational error in meters after alignment will be printed)', action='store_true') + parser.add_argument('--verbose2', help='print scale error and RMSE absolute translational error in meters after alignment with and without scale correction', action='store_true') + args = parser.parse_args() + + first_list = associate.read_file_list(args.first_file, False) + second_list = associate.read_file_list(args.second_file, False) + + matches = associate.associate(first_list, second_list,float(args.offset),float(args.max_difference)) + + if len(matches)<2: + sys.exit("Couldn't find matching timestamp pairs between groundtruth and estimated trajectory! Did you choose the correct sequence?") + first_xyz = numpy.matrix([[float(value) for value in first_list[a][0:3]] for a,b in matches]).transpose() + second_xyz = numpy.matrix([[float(value)*float(args.scale) for value in second_list[b][0:3]] for a,b in matches]).transpose() + dictionary_items = second_list.items() + sorted_second_list = sorted(dictionary_items) + + second_xyz_full = numpy.matrix([[float(value)*float(args.scale) for value in sorted_second_list[i][1][0:3]] for i in range(len(sorted_second_list))]).transpose() # sorted_second_list.keys()]).transpose() + rot,transGT,trans_errorGT,trans,trans_error, scale = align(second_xyz,first_xyz) + + second_xyz_aligned = scale * rot * second_xyz + transGT + second_xyz_notscaled = rot * second_xyz + trans + second_xyz_notscaled_full = rot * second_xyz_full + trans + first_stamps = first_list.keys() + first_stamps.sort() + first_xyz_full = numpy.matrix([[float(value) for value in first_list[b][0:3]] for b in first_stamps]).transpose() + + second_stamps = second_list.keys() + second_stamps.sort() + second_xyz_full = numpy.matrix([[float(value)*float(args.scale) for value in second_list[b][0:3]] for b in second_stamps]).transpose() + second_xyz_full_aligned = scale * rot * second_xyz_full + transGT + + if args.verbose: + print "compared_pose_pairs %d pairs"%(len(trans_error)) + + print "absolute_translational_error.rmse %f m"%numpy.sqrt(numpy.dot(trans_error,trans_error) / len(trans_error)) + print "absolute_translational_error.mean %f m"%numpy.mean(trans_error) + print "absolute_translational_error.median %f m"%numpy.median(trans_error) + print "absolute_translational_error.std %f m"%numpy.std(trans_error) + print "absolute_translational_error.min %f m"%numpy.min(trans_error) + print "absolute_translational_error.max %f m"%numpy.max(trans_error) + print "max idx: %i" %numpy.argmax(trans_error) + else: + # print "%f, %f " % (numpy.sqrt(numpy.dot(trans_error,trans_error) / len(trans_error)), scale) + # print "%f,%f" % (numpy.sqrt(numpy.dot(trans_error,trans_error) / len(trans_error)), scale) + print "%f,%f,%f" % (numpy.sqrt(numpy.dot(trans_error,trans_error) / len(trans_error)), scale, numpy.sqrt(numpy.dot(trans_errorGT,trans_errorGT) / len(trans_errorGT))) + # print "%f" % len(trans_error) + if args.verbose2: + print "compared_pose_pairs %d pairs"%(len(trans_error)) + print "absolute_translational_error.rmse %f m"%numpy.sqrt(numpy.dot(trans_error,trans_error) / len(trans_error)) + print "absolute_translational_errorGT.rmse %f m"%numpy.sqrt(numpy.dot(trans_errorGT,trans_errorGT) / len(trans_errorGT)) + + if args.save_associations: + file = open(args.save_associations,"w") + file.write("\n".join(["%f %f %f %f %f %f %f %f"%(a,x1,y1,z1,b,x2,y2,z2) for (a,b),(x1,y1,z1),(x2,y2,z2) in zip(matches,first_xyz.transpose().A,second_xyz_aligned.transpose().A)])) + file.close() + + if args.save: + file = open(args.save,"w") + file.write("\n".join(["%f "%stamp+" ".join(["%f"%d for d in line]) for stamp,line in zip(second_stamps,second_xyz_notscaled_full.transpose().A)])) + file.close() + + if args.plot: + import matplotlib + matplotlib.use('Agg') + import matplotlib.pyplot as plt + import matplotlib.pylab as pylab + from matplotlib.patches import Ellipse + fig = plt.figure(figsize=(10,10)) + ax = fig.add_subplot(111) + plot_traj(ax,first_stamps,first_xyz_full.transpose().A,'-',"black","ground truth") + plot_traj(ax,second_stamps,second_xyz_full_aligned.transpose().A,'-',"blue","estimated") + label="difference" + for (a,b),(x1,y1,z1),(x2,y2,z2) in zip(matches,first_xyz.transpose().A,second_xyz_aligned.transpose().A): + ax.plot([x1,x2],[y1,y2],'-',color="red",linewidth=0.5,label=label) + label="" + + ax.legend() + + ax.set_xlabel('x [m]') + ax.set_ylabel('y [m]') + + x_diff = first_xyz_full[0].max() - first_xyz_full[0].min() + x_center = first_xyz_full[0].min() + 0.5 * x_diff + y_diff = first_xyz_full[1].max() - first_xyz_full[1].min() + y_center = first_xyz_full[1].min() + 0.5 * y_diff + + max_diff = numpy.max([x_diff, y_diff]) + + ax.set_xlim(x_center-0.55 * max_diff, x_center+0.55 * max_diff) + ax.set_ylim(y_center-0.55 * max_diff, y_center+0.55 * max_diff) + #plt.axis('equal') + plt.tight_layout() + plt.grid() + plt.savefig(args.plot,format="svg") + + + diff --git a/evaluation/tum-vi_run_all.py b/evaluation/tum-vi_run_all.py new file mode 100644 index 000000000..b502f4745 --- /dev/null +++ b/evaluation/tum-vi_run_all.py @@ -0,0 +1,160 @@ +#!/usr/bin/env python3 + +import argparse +import subprocess +import csv +import os.path +from send2trash import send2trash +import numpy as np + +sequences = [ + "room1", + "room2", + "room3", + "room4", + "room5", + "room6" +] + + +def convert_gt(new_gt_file_name, sequence_path): + """ + Convert the timestamps of a ground truth sequence from ns to s. + + """ + + if os.path.isfile(new_gt_file_name): + return + + csv_row_dicts = [] + + with open("%s/dso/gt_imu.csv" % sequence_path) as old_gt_file: + old_gt_reader = csv.DictReader(old_gt_file, delimiter=',') + for row in old_gt_reader: + + csv_row_dicts.append({ + "#timestamp [s]": float(row['# timestamp[ns]']) / 1e9, + "p_RS_R_x [m]": row['tx'], + "p_RS_R_y [m]": row['ty'], + "p_RS_R_z [m]": row['tz'], + "q_RS_x []": row['qx'], + "q_RS_y []": row['qy'], + "q_RS_z []": row['qz'], + "q_RS_w []": row['qw'] + }) + + with open(new_gt_file_name, 'w') as new_gt_file: + fieldnames = ["#timestamp [s]", "p_RS_R_x [m]", "p_RS_R_y [m]", + "p_RS_R_z [m]", "q_RS_x []", "q_RS_y []", "q_RS_z []", "q_RS_w []"] + new_gt_writer = csv.DictWriter(new_gt_file, fieldnames=fieldnames) + + new_gt_writer.writeheader() + for row_dict in csv_row_dicts: + new_gt_writer.writerow(row_dict) + + +if __name__ == "__main__": + parser = argparse.ArgumentParser(description=''' + This script runs over many sequences (specified in 'sequences') of the TUM-VI dataset. Every sequence is executed 'runs_per_sequence' times. The evaluation script from TUM-RGBD (also used by ORB-SLAM3) is used to calculate the Root Mean Square Absolute Trajectory Error (RMS ATE). The median of all runs is reported in 'rmsate_summary.txt'. + ''') + parser.add_argument( + "--dataset_path", help="Path to the TUM-VI dataset. Should lead to a directory that contains the folders '1024_16' and '512_16'.") + parser.add_argument( + "--resolution", help="Either '1024_16' or '512_16'. Default: '512_16'", default="512_16") + parser.add_argument('--runs_per_sequence', + help='How often should every sequence be evaluated. Default: 3', default=3) + args = parser.parse_args() + + dir_path = os.path.dirname(os.path.realpath(__file__)) + + if os.path.isfile("%s/rmsate_summary.txt" % dir_path): + print("An old version of 'rmsate_summary.txt' exists. Going to delete it.") + send2trash("%s/rmsate_summary.txt" % dir_path) + + with open("rmsate_summary.txt", "a") as summary_file: + summary_file.write( + "#sequence name: median RMS ATE, fail count/ runs per sequence\n") + + # run over all sequences + for sequence in sequences: + + sequence_path = "%s/%s/dataset-%s_%s" % ( + args.dataset_path, args.resolution, sequence, args.resolution) + + print("Looking for a sequence in %s" % sequence_path) + + # initialize statistics + rmsates = np.zeros(args.runs_per_sequence, dtype=np.float64) + fail_count = 0 + + # execute this sequence runs_per_sequence times + for run_number in range(args.runs_per_sequence): + print("Running DSO on sequence %s run number %d" % + (sequence, run_number + 1)) + + failed = False + + gt_file_name = "%s/gt_%s.txt" % (dir_path, sequence) + convert_gt(gt_file_name, sequence_path) + + # the result.txt file is the indicator if a run was successful + # we delete it know to see if a new file exists after DSO finished + if os.path.isfile("%s/result.txt" % dir_path): + os.remove("%s/result.txt" % dir_path) + + # execute DSO + subprocess.run(["%s/../build/bin/dso_dataset" % dir_path, + "files=%s/dso/cam0/images" % sequence_path, + "calib=%s/dso/cam0/camera.txt" % sequence_path, + "gamma=%s/dso/cam0/pcalib.txt" % sequence_path, + "vignette=%s/dso/cam0/vignette.png" % sequence_path, + "preset=0", + "mode=0", + "quiet=1", + "nolog=1", + "nogui=1"], + cwd=dir_path) + + # indicator if the run was successful + if not os.path.isfile("result.txt"): + failed = True + fail_count += 1 + print("DSM on sequence %s run number %d FAILED" % + (sequence, run_number + 1)) + + if not failed: + + print("Calculating RMS ATE for %s run number %d" % + (sequence, run_number + 1)) + + # Calculate RMS ATE by using the evaluation script from TUM-RGBD (also used by ORB-SLAM3) + evaluate_ate_scale_proc = subprocess.Popen(["python2", "-u", + "%s/evaluate_ate_scale.py" % dir_path, + gt_file_name, + "%s/result.txt" % dir_path, + "--max_difference", "0.1", + "--plot", "plot-%s.svg" % sequence], + cwd=dir_path, + universal_newlines=True, stdout=subprocess.PIPE, + stderr=subprocess.STDOUT) + + stdout = evaluate_ate_scale_proc.communicate()[0] + # parse the output of the evaluation script + try: + rmsates[run_number] = float(stdout.rstrip().split(',')[2]) + print("RMSATE: %f" % rmsates[run_number]) + except: + print(stdout) + + # get median of runs + median = np.NaN + if fail_count < args.runs_per_sequence: + median = np.median(rmsates[rmsates != 0]) + + # write statistics + with open("%s/rmsate_summary.txt" % dir_path, "a") as summary_file: + summary_file.write("%s: %f, %d/%d\n" % (sequence, + median, fail_count, args.runs_per_sequence)) + + print("median RMS ATE of %s: %f" % (sequence, median)) + print("failed %d/%d" % (fail_count, args.runs_per_sequence)) diff --git a/src/FullSystem/CoarseTracker.cpp b/src/FullSystem/CoarseTracker.cpp index e56662586..e128cf0c4 100644 --- a/src/FullSystem/CoarseTracker.cpp +++ b/src/FullSystem/CoarseTracker.cpp @@ -721,27 +721,39 @@ bool CoarseTracker::trackNewestCoarse( void CoarseTracker::debugPlotIDepthMap(float* minID_pt, float* maxID_pt, std::vector &wraps) { - if(w[1] == 0) return; + if(w[1] == 0) return; + const int lvl = 0; + const int wl = w[lvl]; + const int hl = h[lvl]; - int lvl = 0; + std::vector allID; + for(int i=0; i < (hl*wl); i++) + { + if(idepth[lvl][i] > 0) + allID.push_back(idepth[lvl][i]); + } + // Initialise debug image + MinimalImageB3 mf(wl, hl); + mf.setBlack(); + for(int i=0; i < (hl*wl); i++) + { + int c = lastRef->dIp[lvl][i][0]*0.9f; + if(c>255) c=255; + mf.at(i) = Vec3b(c,c,c); + } + + // Check in case all idepths are 0 + if(!allID.empty()) { - std::vector allID; - for(int i=0;i 0) - allID.push_back(idepth[lvl][i]); - } std::sort(allID.begin(), allID.end()); - int n = allID.size()-1; - float minID_new = allID[(int)(n*0.05)]; - float maxID_new = allID[(int)(n*0.95)]; + const int n = allID.size()-1; - float minID, maxID; - minID = minID_new; - maxID = maxID_new; + float minID = allID[(int)(n * 0.05)], + maxID = allID[(int)(n * 0.95)]; + if(minID_pt!=0 && maxID_pt!=0) { if(*minID_pt < 0 || *maxID_pt < 0) @@ -751,38 +763,26 @@ void CoarseTracker::debugPlotIDepthMap(float* minID_pt, float* maxID_pt, std::ve } else { - // slowly adapt: change by maximum 10% of old span. - float maxChange = 0.3*(*maxID_pt - *minID_pt); - + const float maxChange = 0.3*(*maxID_pt - *minID_pt); + if(minID < *minID_pt - maxChange) minID = *minID_pt - maxChange; if(minID > *minID_pt + maxChange) minID = *minID_pt + maxChange; - - + if(maxID < *maxID_pt - maxChange) maxID = *maxID_pt - maxChange; if(maxID > *maxID_pt + maxChange) maxID = *maxID_pt + maxChange; - + *maxID_pt = maxID; *minID_pt = minID; } } - - MinimalImageB3 mf(w[lvl], h[lvl]); - mf.setBlack(); - for(int i=0;idIp[lvl][i][0]*0.9f; - if(c>255) c=255; - mf.at(i) = Vec3b(c,c,c); - } - int wl = w[lvl]; - for(int y=3;y 0 || nid >= 3) { - float id = ((sid / nid)-minID) / ((maxID-minID)); + const float id = ((sid / nid)-minID) / (maxID-minID); mf.setPixelCirc(x,y,makeJet3B(id)); //mf.at(idx) = makeJet3B(id); } } - //IOWrap::displayImage("coarseDepth LVL0", &mf, false); - + } + //IOWrap::displayImage("coarseDepth LVL0", &mf, false); - for(IOWrap::Output3DWrapper* ow : wraps) - ow->pushDepthImage(&mf); - - if(debugSaveImages) - { - char buf[1000]; - snprintf(buf, 1000, "images_out/predicted_%05d_%05d.png", lastRef->shell->id, refFrameID); - IOWrap::writeImage(buf,&mf); - } + for(IOWrap::Output3DWrapper* ow : wraps) + ow->pushDepthImage(&mf); + if(debugSaveImages) + { + char buf[1000]; + snprintf(buf, 1000, "images_out/predicted_%05d_%05d.png", lastRef->shell->id, refFrameID); + IOWrap::writeImage(buf,&mf); } } diff --git a/src/FullSystem/FullSystem.cpp b/src/FullSystem/FullSystem.cpp index 516f6ccec..31bd54899 100644 --- a/src/FullSystem/FullSystem.cpp +++ b/src/FullSystem/FullSystem.cpp @@ -1141,10 +1141,11 @@ void FullSystem::makeKeyFrame( FrameHessian* fh) coarseTracker_forNewKF->makeK(&Hcalib); coarseTracker_forNewKF->setCoarseTrackingRef(frameHessians); - - - coarseTracker_forNewKF->debugPlotIDepthMap(&minIdJetVisTracker, &maxIdJetVisTracker, outputWrapper); - coarseTracker_forNewKF->debugPlotIDepthMapFloat(outputWrapper); + if(!outputWrapper.empty() || debugSaveImages) + { + coarseTracker_forNewKF->debugPlotIDepthMap(&minIdJetVisTracker, &maxIdJetVisTracker, outputWrapper); + coarseTracker_forNewKF->debugPlotIDepthMapFloat(outputWrapper); + } } diff --git a/src/IOWrapper/Output3DWrapper.h b/src/IOWrapper/Output3DWrapper.h index d3ddf1531..897ab3ad1 100644 --- a/src/IOWrapper/Output3DWrapper.h +++ b/src/IOWrapper/Output3DWrapper.h @@ -127,7 +127,7 @@ class Output3DWrapper * Calling: * Always called, no overhead if not used. */ - virtual void publishGraph(const std::map, Eigen::aligned_allocator > > &connectivity) {} + virtual void publishGraph(const std::map, Eigen::aligned_allocator > > &connectivity) {} diff --git a/src/OptimizationBackend/EnergyFunctional.h b/src/OptimizationBackend/EnergyFunctional.h index 16d8bb920..93e463089 100644 --- a/src/OptimizationBackend/EnergyFunctional.h +++ b/src/OptimizationBackend/EnergyFunctional.h @@ -117,7 +117,7 @@ class EnergyFunctional { std::map, - Eigen::aligned_allocator> + Eigen::aligned_allocator> > connectivityMap; private: