From 069d78c7a0f115af354f66629c621da731a6a724 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Daniel=20San=20Jos=C3=A9=20Pro?= Date: Thu, 6 Jul 2023 19:25:58 +0200 Subject: [PATCH 1/6] Reduced size of point cloud points --- test_multiview.m | 2 +- test_scaling_from_two_points.m | 4 ++-- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/test_multiview.m b/test_multiview.m index f34f223..ab4bace 100644 --- a/test_multiview.m +++ b/test_multiview.m @@ -5,7 +5,7 @@ dataDir = "test/delivery_area_dslr_undistorted"; if exist('images','var') == 0 % Load the images if they are not already loaded yet - images = util.loadImages(dataDir + "/images", log=true, numImages=15); + images = util.loadImages(dataDir + "/images", log=true, numImages=2); end % Load the camera parameters diff --git a/test_scaling_from_two_points.m b/test_scaling_from_two_points.m index 10820e3..a6763ca 100644 --- a/test_scaling_from_two_points.m +++ b/test_scaling_from_two_points.m @@ -29,7 +29,7 @@ % Plot the point cloud unscaled as well as the points of the door frame figure -plotting.plotPointCloud(denoisedPointCloud, camPoses, cameraSizePlotSize=0.2, pcMarkerSize=20) +plotting.plotPointCloud(denoisedPointCloud, camPoses, cameraSizePlotSize=0.2, pcMarkerSize=10) plotting.plotPointCloud(pointCloud(markedPoints, Color=[0,0,1]), [], pcMarkerSize=500); xlabel('X [Unknown]'); ylabel('Y [Unknown]'); @@ -38,7 +38,7 @@ % Plot the point cloud scaled figure -plotting.plotPointCloud(denoisedPointCloudScaled, camPosesScaled, cameraSizePlotSize=0.2*scalingFactor, pcMarkerSize=20*scalingFactor) +plotting.plotPointCloud(denoisedPointCloudScaled, camPosesScaled, cameraSizePlotSize=0.2*scalingFactor, pcMarkerSize=10) xlabel('X [m]'); ylabel('Y [m]'); zlabel('Z [m]'); From 12507ba8a932fcc27114ecefa9de375a53bf5bc1 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Daniel=20San=20Jos=C3=A9=20Pro?= Date: Thu, 6 Jul 2023 19:26:46 +0200 Subject: [PATCH 2/6] First extract features of all images (and save them), then reconstruct --- +logic/+reconstruct3D/extractFeatures.m | 43 ++++++++++++++ +logic/reconstruct3DMultiview.m | 75 +++++++++++++++---------- 2 files changed, 89 insertions(+), 29 deletions(-) create mode 100644 +logic/+reconstruct3D/extractFeatures.m diff --git a/+logic/+reconstruct3D/extractFeatures.m b/+logic/+reconstruct3D/extractFeatures.m new file mode 100644 index 0000000..e818e84 --- /dev/null +++ b/+logic/+reconstruct3D/extractFeatures.m @@ -0,0 +1,43 @@ +function [points, features] = extractFeatures(image, ~, varargin) +% EXTRACTFEATURES Extracts features from the image +% [points, features] = EXTRACTFEATURES(image, prevFeatures, method, numOctaves, roiBorder) +% extracts features from the image using the specified method. +% Inputs: +% image - The image from which the features should be extracted +% prevFeatures - The features from the previous image +% method = 'SURF' - The method used to extract the features. Can be 'SURF', +% 'FAST', 'Harris' or 'BRISK' +% numOctaves = 20 - The number of octaves used for SURF +% roiBorder = 20 - The border around the image that is not used for SURF +% Outputs: +% points - The points of the features +% features - The features + +p = inputParser; +p.addOptional('method', 'SURF', @(x) any(validatestring(x, {'SURF', 'FAST', 'Harris', 'BRISK'}))); +p.addOptional('numOctaves', 20); +p.addOptional('roiBorder', 20); +p.parse(varargin{:}); +method = p.Results.method; +numOctaves = p.Results.numOctaves; +roiBorder = p.Results.roiBorder; + +% Detect features in the images +% https://de.mathworks.com/help/vision/ug/point-feature-types.html +if strcmp(p.Results.method, 'SURF') + roi = [roiBorder, roiBorder, size(image, 2) - 2 * roiBorder, size(image, 1) - 2 * roiBorder]; + points = detectSURFFeatures((image), NumOctaves=numOctaves, ROI=roi); + features = extractFeatures(image, points); +elseif strcmp(p.Results.method, 'FAST') + points = detectFASTFeatures(image); + features = extractFeatures(image, points); +elseif strcmp(p.Results.method, 'Harris') + points = detectHarrisFeatures(image); + features = extractFeatures(image, points); +elseif strcmp(p.Results.method, 'BRISK') + points = detectBRISKFeatures(image); + features = extractFeatures(image, points); +end + + +end \ No newline at end of file diff --git a/+logic/reconstruct3DMultiview.m b/+logic/reconstruct3DMultiview.m index cbe23cc..518cd1c 100644 --- a/+logic/reconstruct3DMultiview.m +++ b/+logic/reconstruct3DMultiview.m @@ -24,6 +24,7 @@ p.addOptional('scalingFactor', 0.5); %% Reconstruction Params % Feature extraction parameters +p.addOptional('featureExtractionMethod', 'SURF'); p.addOptional('numOctaves', 20); p.addOptional('roiBorder', 20); % Epipolar geometry parameters @@ -37,6 +38,7 @@ p.parse(varargin{:}); log = p.Results.log; scalingFactor = p.Results.scalingFactor; +featureExtractionMethod = p.Results.featureExtractionMethod; numOctaves = p.Results.numOctaves; roiBorder = p.Results.roiBorder; eMaxDistance = p.Results.eMaxDistance; @@ -48,7 +50,7 @@ numImages = length(images); - +tic; if log fprintf('Starting preprocessing\n'); end @@ -64,70 +66,85 @@ cameraParams = logic.reconstruct3D.scaleCameraParameters(cameraParams, scalingFactor, size(images{1})); if log - fprintf('\nPreprocessing finished.\n'); + fprintf('\nPreprocessing finished in %f seconds.\n', toc); end % ========================= -%% === 2. Feature Extraction First Image === -image1 = images{1}; +%% === 2. Extract Features === +% We first extract the features/points of all the images and save them on a cell +% array. This is done to avoid recomputing the features when matching the +% images. +if log + fprintf('Extracting features\n'); +end +features = cell(numImages, 1); +points = cell(numImages, 1); +for i = 1:numImages + if log + fprintf('Extracting features of image %d of %d\r', i, numImages); + end + currentImage = images{i}; + [points{i}, features{i}] = logic.reconstruct3D.extractFeatures(currentImage, ... + method=featureExtractionMethod, ... + numOctaves=numOctaves, roiBorder=roiBorder); +end +if log + fprintf('\nFeature extraction finished in %f seconds.\n', toc); +end + +% === 3. Feature Matching and triangulation === % The object vSet contains the view set that stores the views (camera pose, feature vectors and points) and the connections % (relative poses and point matches) between the views. if log - tic; - fprintf("Generating view set and finding features of first picture\n"); + fprintf("Generating view set\n"); end -[vSet, prevFeatures, prevPoints] = logic.reconstruct3D.createViewSet(image1, numOctaves=numOctaves, roiBorder=roiBorder); +% Create an empty imageviewset object to manage the data associated with each view. +% Add the first view. Place the camera associated with the first view and the origin, oriented along the Z-axis. +vSet = imageviewset; +vSet = addView(vSet, 1, rigidtform3d, Points=points{1}); + -% === 3. Feature Extraction and matching remaining Images === for i = 2:numImages if log - fprintf('Matching points of image %d of %d\r', i, numImages); + fprintf('Matching points of image %d of %d and triangulation with previous images. \r', i, numImages); end - % Load the current image and extract common features to all previous views. - currentImage = images{i}; - - [matchedPoints1, matchedPoints2, currPoints, currFeatures, indexPairs] = logic.reconstruct3D.extractCommonFeaturesMultiView(currentImage, prevFeatures, prevPoints, ... - numOctaves=numOctaves, roiBorder=roiBorder); + % Match the features between the previous and the current image. + indexPairs = matchFeatures(features{i-1}, features{i}, 'Unique', true); + matchedPointsPrev = points{i-1}(indexPairs(:, 1)); + matchedPointsCurr = points{i}(indexPairs(:, 2)); % Estimate the camera pose of current view relative to the previous view. % The pose is computed up to scale, meaning that the distance between % the cameras in the previous view and the current view is set to 1. % This will be corrected by the bundle adjustment. - [E, relPose, status, inlierIdx] = logic.reconstruct3D.getEpipolarGeometry(matchedPoints1, matchedPoints2, cameraParams, ... + [E, relPose, status, inlierIdx] = logic.reconstruct3D.getEpipolarGeometry(matchedPointsPrev, matchedPointsCurr, cameraParams, ... eMaxDistance=eMaxDistance, eConfidence=eConfidence, eMaxNumTrials=eMaxNumTrials, eValidPointFraction=eValidPointFraction); - % Get the table containing the previous camera pose. + + % Get the table containing the previous camera pose. prevPose = poses(vSet, i-1).AbsolutePose; - % Compute the current camera pose in the global coordinate system % relative to the first view. - if length(relPose) > 1 % TODO: BUG sometimes two poses are returned. This is a workaround + if length(relPose) > 1 % TODO: BUG sometimes two poses are returned. This is a workaround. size(relPose) relPose = relPose(1); end currPose = rigidtform3d(prevPose.A * relPose.A); - + % Add the current view to the view set. - vSet = addView(vSet, i, currPose, Points=currPoints); - + vSet = addView(vSet, i, currPose, Points=points{i}); % Store the point matches between the previous and the current views. vSet = addConnection(vSet, i-1, i, relPose, Matches=indexPairs(inlierIdx, :)); - - % Find point tracks across all views. - tracks = findTracks(vSet); - + tracks = findTracks(vSet); % Find point tracks across all views. % Get the table containing camera poses for all views. camPoses = poses(vSet); + % Triangulate initial locations for the 3-D world points and do bundle adjustment. [pointCloudInstance, camPoses] = logic.reconstruct3D.getTriangulatedPointsMultiView(tracks, camPoses, cameraParams, ... maxReprojectionError=maxReprojectionError); % Store the refined camera poses. vSet = updateView(vSet, camPoses); - - prevFeatures = currFeatures; - prevPoints = currPoints; - end % Rotate the point cloud From 8a2b21cb376789ba7ecb4a5863d5340307332c39 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Daniel=20San=20Jos=C3=A9=20Pro?= Date: Thu, 6 Jul 2023 21:45:28 +0200 Subject: [PATCH 3/6] Doing some test with 2 images --- +logic/+reconstruct3D/createViewSet.m | 29 ------- .../extractCommonFeaturesMultiView.m | 24 ------ +logic/reconstruct3DMultiview.m | 83 ++++++++++++++++--- 3 files changed, 73 insertions(+), 63 deletions(-) delete mode 100644 +logic/+reconstruct3D/createViewSet.m delete mode 100644 +logic/+reconstruct3D/extractCommonFeaturesMultiView.m diff --git a/+logic/+reconstruct3D/createViewSet.m b/+logic/+reconstruct3D/createViewSet.m deleted file mode 100644 index 62c0f5c..0000000 --- a/+logic/+reconstruct3D/createViewSet.m +++ /dev/null @@ -1,29 +0,0 @@ -function [vSet, prevFeatures, prevPoints] = createViewSet(image1, varargin) -% CREATEVIEWSET Create a viewSet object containing the first view. -% Inputs: -% image1 - first image -% numOctaves - number of octaves for SURF feature detection -% roiBorder - border around the image to exclude from feature detection -% Outputs: -% vSet - viewSet object containing the first view -% prevFeatures - SURF features of the first image -% prevPoints - SURF points of the first image - -p = inputParser; -p.addOptional('numOctaves', 20); -p.addOptional('roiBorder', 20); -p.parse(varargin{:}); -numOctaves = p.Results.numOctaves; -roiBorder = p.Results.roiBorder; - -roi = [roiBorder, roiBorder, size(image1, 2)- 2*roiBorder, size(image1, 1)- 2*roiBorder]; -prevPoints = detectSURFFeatures(image1, NumOctaves=numOctaves, ROI=roi); -prevFeatures = extractFeatures(image1, prevPoints); - -% Create an empty imageviewset object to manage the data associated with each view. -vSet = imageviewset; - -% Add the first view. Place the camera associated with the first view and the origin, oriented along the Z-axis. -viewId = 1; -vSet = addView(vSet, viewId, rigidtform3d, Points=prevPoints); -end diff --git a/+logic/+reconstruct3D/extractCommonFeaturesMultiView.m b/+logic/+reconstruct3D/extractCommonFeaturesMultiView.m deleted file mode 100644 index bde344e..0000000 --- a/+logic/+reconstruct3D/extractCommonFeaturesMultiView.m +++ /dev/null @@ -1,24 +0,0 @@ -function [matchedPoints1, matchedPoints2, currPoints, currFeatures, indexPairs] = extractCommonFeaturesMultiView(image, prevFeatures, prevPoints, ~, varargin) - % Extracts common features from two images and returns - % the essential matrix and the inlier points. - - p = inputParser; - p.addOptional('numOctaves', 20); - p.addOptional('roiBorder', 20); - p.parse(varargin{:}); - numOctaves = p.Results.numOctaves; - roiBorder = p.Results.roiBorder; - - % Detect features in the images - % TODO: add a parameter to try different feature extraction methods - % TODO: consider changing detection algorithm and getting different point types - % https://de.mathworks.com/help/vision/ug/point-feature-types.html - roi = [roiBorder, roiBorder, size(image, 2) - 2 * roiBorder, size(image, 1) - 2 * roiBorder]; - currPoints = detectSURFFeatures((image), NumOctaves=numOctaves, ROI=roi); - currFeatures = extractFeatures(image, currPoints); - indexPairs = matchFeatures(prevFeatures, currFeatures); - - % Select matched points. - matchedPoints1 = prevPoints(indexPairs(:, 1)); - matchedPoints2 = currPoints(indexPairs(:, 2)); -end \ No newline at end of file diff --git a/+logic/reconstruct3DMultiview.m b/+logic/reconstruct3DMultiview.m index 518cd1c..2611465 100644 --- a/+logic/reconstruct3DMultiview.m +++ b/+logic/reconstruct3DMultiview.m @@ -21,7 +21,7 @@ p = inputParser; p.addOptional('log', true); %% Preprocessing Params -p.addOptional('scalingFactor', 0.5); +p.addOptional('scalingFactor', 0.1); %% Reconstruction Params % Feature extraction parameters p.addOptional('featureExtractionMethod', 'SURF'); @@ -48,7 +48,7 @@ maxReprojectionError = p.Results.maxReprojectionError; % ========================= - +imagesOriginal = images; numImages = length(images); tic; if log @@ -59,8 +59,8 @@ if log fprintf('Analyzing image %d of %d\r', i, numImages); end - images{i} = imresize(images{i}, scalingFactor); - images{i} = logic.reconstruct3D.preprocessImage(images{i}); + imagesOriginal{i} = imresize(imagesOriginal{i}, scalingFactor); + images{i} = logic.reconstruct3D.preprocessImage(imagesOriginal{i}); end % Modify camera parameters to compensate for image resizing cameraParams = logic.reconstruct3D.scaleCameraParameters(cameraParams, scalingFactor, size(images{1})); @@ -130,7 +130,7 @@ relPose = relPose(1); end currPose = rigidtform3d(prevPose.A * relPose.A); - + % Add the current view to the view set. vSet = addView(vSet, i, currPose, Points=points{i}); % Store the point matches between the previous and the current views. @@ -147,14 +147,77 @@ vSet = updateView(vSet, camPoses); end -% Rotate the point cloud -R = [1 0 0; 0 0 1; 0 -1 0]; -tform = affinetform3d([R, zeros(3, 1); zeros(1, 3), 1]); -[pointCloudInstance, camPoses] = logic.reconstruct3D.transformScene(pointCloudInstance, camPoses, tform); +minZ = min(pointCloudInstance.Location(:, 3)) +maxZ = max(pointCloudInstance.Location(:, 3)) +% We will use this to filter out points that are too far away. +% after doing dense reconstruction. -% TODO: Add an extra step to generate more features once we get the full 3D reconstruction if log fprintf('\n3D reconstruction finished after %.2f seconds.\n', toc); end + + +% === 4. Dense reconstruction === +if log + fprintf('Dense reconstruction\n'); +end +% Create a dense point cloud from the triangulated points and camera poses. +for i = 1:1 + if log + fprintf('Dense reconstruction of image %d of %d \r', i, numImages-1); + end + image1 = imagesOriginal{i}; + image2 = imagesOriginal{i+1}; + figure + imshowpair(image1, image2, 'montage'); + + % Compute stereo parameters and rectify the stereo images. + relPose = rigidtform3d(poses(vSet, i+1).AbsolutePose.A); + stereoParams = stereoParameters(cameraParams, cameraParams, relPose); + %[image1Rect, image2Rect] = rectifyStereoImages(rgb2gray(image1), rgb2gray(image2), stereoParams); + [image1Rect, image2Rect] = rectifyStereoImages(image1, image2, stereoParams); + imshowpair(image1Rect, image2Rect, 'montage'); + + % Compute disparity. + disparityMap = disparitySGM(rgb2gray(image1Rect), rgb2gray(image2Rect)); + % Remove left most column of zeros from disparity map and corresponding pixels from the rectified image. + figure + % imshow(disparityMap); + % colormap jet + % colorbar + + % Reconstruct the 3-D world coordinates of points corresponding to each pixel from the disparity map. + xyzPoints = reconstructScene(disparityMap, stereoParams); + numColumnsToRemove = 200; + xyzPoints = xyzPoints(:, numColumnsToRemove:end, :); + disparityMap = disparityMap(:, numColumnsToRemove:end); + % imshow(disparityMap); + + + % Filter points that are too far away and transform the MxNx3 matrix into a Nx3 matrix. + xyzPoints = reshape(xyzPoints, [], 3); + pixelColors = reshape(image1Rect, [], 3); + size(xyzPoints) + size(pixelColors) + validIdx = xyzPoints(:, 3) < maxZ & xyzPoints(:, 3) > minZ; + xyzPoints = xyzPoints(validIdx, :); + pixelColors = pixelColors(validIdx, :); + + + % create a pointCloud object and assing the color of the original image at the corresponding pixel + size(xyzPoints) + size(pixelColors) + % pointCloudInstance = pointCloud(xyzPoints, Color=pixelColors); + pointCloudInstance = pointCloud(xyzPoints); +end +if log + fprintf('\nDense reconstruction finished after %.2f seconds.\n', toc); +end + +% Rotate the point cloud +R = [1 0 0; 0 0 1; 0 -1 0]; +tform = affinetform3d([R, zeros(3, 1); zeros(1, 3), 1]); +[pointCloudInstance, camPoses] = logic.reconstruct3D.transformScene(pointCloudInstance, camPoses, tform); + end \ No newline at end of file From e7c36c03226347d2504bb8344a08a3c850d48825 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Daniel=20San=20Jos=C3=A9=20Pro?= Date: Fri, 7 Jul 2023 09:21:21 +0200 Subject: [PATCH 4/6] Further tests, not really usable still --- +logic/reconstruct3DMultiview.m | 41 +++++++++++++++------------------ test_multiview.m | 2 +- test_plot.m | 7 ++++++ 3 files changed, 27 insertions(+), 23 deletions(-) create mode 100644 test_plot.m diff --git a/+logic/reconstruct3DMultiview.m b/+logic/reconstruct3DMultiview.m index 2611465..05afee6 100644 --- a/+logic/reconstruct3DMultiview.m +++ b/+logic/reconstruct3DMultiview.m @@ -21,7 +21,7 @@ p = inputParser; p.addOptional('log', true); %% Preprocessing Params -p.addOptional('scalingFactor', 0.1); +p.addOptional('scalingFactor', 0.25); %% Reconstruction Params % Feature extraction parameters p.addOptional('featureExtractionMethod', 'SURF'); @@ -163,53 +163,50 @@ fprintf('Dense reconstruction\n'); end % Create a dense point cloud from the triangulated points and camera poses. -for i = 1:1 +for i = 1:(numImages-1) if log fprintf('Dense reconstruction of image %d of %d \r', i, numImages-1); end image1 = imagesOriginal{i}; image2 = imagesOriginal{i+1}; - figure - imshowpair(image1, image2, 'montage'); + % figure + % imshowpair(image1, image2, 'montage'); % Compute stereo parameters and rectify the stereo images. - relPose = rigidtform3d(poses(vSet, i+1).AbsolutePose.A); + relPose = rigidtform3d(poses(vSet, i).AbsolutePose.A \ poses(vSet, i+1).AbsolutePose.A); stereoParams = stereoParameters(cameraParams, cameraParams, relPose); %[image1Rect, image2Rect] = rectifyStereoImages(rgb2gray(image1), rgb2gray(image2), stereoParams); - [image1Rect, image2Rect] = rectifyStereoImages(image1, image2, stereoParams); - imshowpair(image1Rect, image2Rect, 'montage'); + [image1Rect, image2Rect, ~, camMatrix1] = rectifyStereoImages(image1, image2, stereoParams); + % imshowpair(image1Rect, image2Rect, 'montage'); % Compute disparity. disparityMap = disparitySGM(rgb2gray(image1Rect), rgb2gray(image2Rect)); % Remove left most column of zeros from disparity map and corresponding pixels from the rectified image. - figure + % figure % imshow(disparityMap); % colormap jet % colorbar % Reconstruct the 3-D world coordinates of points corresponding to each pixel from the disparity map. xyzPoints = reconstructScene(disparityMap, stereoParams); - numColumnsToRemove = 200; - xyzPoints = xyzPoints(:, numColumnsToRemove:end, :); - disparityMap = disparityMap(:, numColumnsToRemove:end); - % imshow(disparityMap); - - % Filter points that are too far away and transform the MxNx3 matrix into a Nx3 matrix. + % Filter points that are too far away and transform the MxNx3 matrix into a Nx3 matrix + roiBorder = 50; % TODO: make this a parameter + roi = zeros(size(disparityMap)); + roi(roiBorder:end-roiBorder, roiBorder:end-roiBorder) = 1; xyzPoints = reshape(xyzPoints, [], 3); - pixelColors = reshape(image1Rect, [], 3); - size(xyzPoints) - size(pixelColors) - validIdx = xyzPoints(:, 3) < maxZ & xyzPoints(:, 3) > minZ; + roi = reshape(roi, [], 1); + % validIdx = xyzPoints(:, 3) < 1.5*maxZ & xyzPoints(:, 3) > 0.5*minZ & roi; + validIdx = xyzPoints(:, 3) < maxZ & roi; xyzPoints = xyzPoints(validIdx, :); - pixelColors = pixelColors(validIdx, :); + xyzPointsInImage1 = (camMatrix1 * [xyzPoints, ones(size(xyzPoints, 1), 1)]')'; + xyzPointsInImage1 = floor(xyzPointsInImage1(:, 2:-1:1) ./ xyzPointsInImage1(:, 3)); + pixelColors = impixel(image1Rect, xyzPointsInImage1(: , 1), xyzPointsInImage1(: , 2)); % create a pointCloud object and assing the color of the original image at the corresponding pixel size(xyzPoints) - size(pixelColors) - % pointCloudInstance = pointCloud(xyzPoints, Color=pixelColors); - pointCloudInstance = pointCloud(xyzPoints); + pointCloudInstance = pccat([pointCloudInstance, pointCloud(xyzPoints, Color=pixelColors)]); end if log fprintf('\nDense reconstruction finished after %.2f seconds.\n', toc); diff --git a/test_multiview.m b/test_multiview.m index ab4bace..9eab795 100644 --- a/test_multiview.m +++ b/test_multiview.m @@ -5,7 +5,7 @@ dataDir = "test/delivery_area_dslr_undistorted"; if exist('images','var') == 0 % Load the images if they are not already loaded yet - images = util.loadImages(dataDir + "/images", log=true, numImages=2); + images = util.loadImages(dataDir + "/images", log=true, numImages=3); end % Load the camera parameters diff --git a/test_plot.m b/test_plot.m new file mode 100644 index 0000000..d64c7ef --- /dev/null +++ b/test_plot.m @@ -0,0 +1,7 @@ +% Plot the point cloud unscaled as well as the points of the door frame +figure +plotting.plotPointCloud(denoisedPointCloud, camPoses, cameraSizePlotSize=0.2, pcMarkerSize=10) +xlabel('X [Unknown]'); +ylabel('Y [Unknown]'); +zlabel('Z [Unknown]'); +hold off \ No newline at end of file From e320bb8c41f700ab1a68859fff69a7b2eed110c9 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Daniel=20San=20Jos=C3=A9=20Pro?= Date: Fri, 7 Jul 2023 19:19:37 +0200 Subject: [PATCH 5/6] Removed the denseMatching for now. Doing this in a different branch --- +logic/+reconstruct3D/denseMatching.m | 60 +++++++++++++++++++++++++++ +logic/reconstruct3DMultiview.m | 59 -------------------------- test_multiview.m | 2 +- test_plot.m | 2 +- 4 files changed, 62 insertions(+), 61 deletions(-) create mode 100644 +logic/+reconstruct3D/denseMatching.m diff --git a/+logic/+reconstruct3D/denseMatching.m b/+logic/+reconstruct3D/denseMatching.m new file mode 100644 index 0000000..1d9039c --- /dev/null +++ b/+logic/+reconstruct3D/denseMatching.m @@ -0,0 +1,60 @@ +function pointCloudDense = denseMatching(pointCloudInstance, imagesOriginal, cameraParams) +% TODO: add description + +% === 4. Dense reconstruction === +if log + fprintf('Dense reconstruction\n'); +end +% Create a dense point cloud from the triangulated points and camera poses. +numImagesDenseRec = 2; +for i = 1:(numImagesDenseRec-1) + if log + fprintf('Dense reconstruction of image %d of %d \r', i, numImages-1); + end + image1 = imagesOriginal{i}; + image2 = imagesOriginal{i+1}; + % figure + % imshowpair(image1, image2, 'montage'); + + % Compute stereo parameters and rectify the stereo images. + relPose = rigidtform3d(poses(vSet, i).AbsolutePose.A \ poses(vSet, i+1).AbsolutePose.A); + stereoParams = stereoParameters(cameraParams, cameraParams, relPose); + %[image1Rect, image2Rect] = rectifyStereoImages(rgb2gray(image1), rgb2gray(image2), stereoParams); + [image1Rect, image2Rect, ~, camMatrix1] = rectifyStereoImages(image1, image2, stereoParams); + figure + imshowpair(image1Rect, image2Rect, 'montage'); + + % Compute disparity. + disparityRange = [0 128]; + disparityMap = disparitySGM(rgb2gray(image1Rect), rgb2gray(image2Rect), DisparityRange=disparityRange); + figure + imshow(disparityMap, disparityRange); + colormap jet + colorbar + + + % Reconstruct the 3-D world coordinates of points corresponding to each pixel from the disparity map. + xyzPoints = reconstructScene(disparityMap, stereoParams); + + % Filter points that are too far away and transform the MxNx3 matrix into a Nx3 matrix + roiBorder = 50; % TODO: make this a parameter + roi = zeros(size(disparityMap)); + roi(roiBorder:end-roiBorder, roiBorder:end-roiBorder) = 1; + xyzPoints = reshape(xyzPoints, [], 3); + roi = reshape(roi, [], 1); + % validIdx = xyzPoints(:, 3) < 1.5*maxZ & xyzPoints(:, 3) > 0.5*minZ & roi; + validIdx = xyzPoints(:, 3) < maxZ & roi; + xyzPoints = xyzPoints(validIdx, :); + xyzPointsInImage1 = (camMatrix1 * [xyzPoints, ones(size(xyzPoints, 1), 1)]')'; + xyzPointsInImage1 = floor(xyzPointsInImage1(:, 2:-1:1) ./ xyzPointsInImage1(:, 3)); + pixelColors = impixel(image1Rect, xyzPointsInImage1(: , 1), xyzPointsInImage1(: , 2)); + + + % create a pointCloud object and assing the color of the original image at the corresponding pixel + size(xyzPoints) + pointCloudInstance = pccat([pointCloudInstance, pointCloud(xyzPoints, Color=pixelColors)]); +end +if log + fprintf('\nDense reconstruction finished after %.2f seconds.\n', toc); +end +end diff --git a/+logic/reconstruct3DMultiview.m b/+logic/reconstruct3DMultiview.m index 05afee6..199cc26 100644 --- a/+logic/reconstruct3DMultiview.m +++ b/+logic/reconstruct3DMultiview.m @@ -103,7 +103,6 @@ vSet = imageviewset; vSet = addView(vSet, 1, rigidtform3d, Points=points{1}); - for i = 2:numImages if log fprintf('Matching points of image %d of %d and triangulation with previous images. \r', i, numImages); @@ -146,9 +145,6 @@ % Store the refined camera poses. vSet = updateView(vSet, camPoses); end - -minZ = min(pointCloudInstance.Location(:, 3)) -maxZ = max(pointCloudInstance.Location(:, 3)) % We will use this to filter out points that are too far away. % after doing dense reconstruction. @@ -157,61 +153,6 @@ fprintf('\n3D reconstruction finished after %.2f seconds.\n', toc); end - -% === 4. Dense reconstruction === -if log - fprintf('Dense reconstruction\n'); -end -% Create a dense point cloud from the triangulated points and camera poses. -for i = 1:(numImages-1) - if log - fprintf('Dense reconstruction of image %d of %d \r', i, numImages-1); - end - image1 = imagesOriginal{i}; - image2 = imagesOriginal{i+1}; - % figure - % imshowpair(image1, image2, 'montage'); - - % Compute stereo parameters and rectify the stereo images. - relPose = rigidtform3d(poses(vSet, i).AbsolutePose.A \ poses(vSet, i+1).AbsolutePose.A); - stereoParams = stereoParameters(cameraParams, cameraParams, relPose); - %[image1Rect, image2Rect] = rectifyStereoImages(rgb2gray(image1), rgb2gray(image2), stereoParams); - [image1Rect, image2Rect, ~, camMatrix1] = rectifyStereoImages(image1, image2, stereoParams); - % imshowpair(image1Rect, image2Rect, 'montage'); - - % Compute disparity. - disparityMap = disparitySGM(rgb2gray(image1Rect), rgb2gray(image2Rect)); - % Remove left most column of zeros from disparity map and corresponding pixels from the rectified image. - % figure - % imshow(disparityMap); - % colormap jet - % colorbar - - % Reconstruct the 3-D world coordinates of points corresponding to each pixel from the disparity map. - xyzPoints = reconstructScene(disparityMap, stereoParams); - - % Filter points that are too far away and transform the MxNx3 matrix into a Nx3 matrix - roiBorder = 50; % TODO: make this a parameter - roi = zeros(size(disparityMap)); - roi(roiBorder:end-roiBorder, roiBorder:end-roiBorder) = 1; - xyzPoints = reshape(xyzPoints, [], 3); - roi = reshape(roi, [], 1); - % validIdx = xyzPoints(:, 3) < 1.5*maxZ & xyzPoints(:, 3) > 0.5*minZ & roi; - validIdx = xyzPoints(:, 3) < maxZ & roi; - xyzPoints = xyzPoints(validIdx, :); - xyzPointsInImage1 = (camMatrix1 * [xyzPoints, ones(size(xyzPoints, 1), 1)]')'; - xyzPointsInImage1 = floor(xyzPointsInImage1(:, 2:-1:1) ./ xyzPointsInImage1(:, 3)); - pixelColors = impixel(image1Rect, xyzPointsInImage1(: , 1), xyzPointsInImage1(: , 2)); - - - % create a pointCloud object and assing the color of the original image at the corresponding pixel - size(xyzPoints) - pointCloudInstance = pccat([pointCloudInstance, pointCloud(xyzPoints, Color=pixelColors)]); -end -if log - fprintf('\nDense reconstruction finished after %.2f seconds.\n', toc); -end - % Rotate the point cloud R = [1 0 0; 0 0 1; 0 -1 0]; tform = affinetform3d([R, zeros(3, 1); zeros(1, 3), 1]); diff --git a/test_multiview.m b/test_multiview.m index 9eab795..f34f223 100644 --- a/test_multiview.m +++ b/test_multiview.m @@ -5,7 +5,7 @@ dataDir = "test/delivery_area_dslr_undistorted"; if exist('images','var') == 0 % Load the images if they are not already loaded yet - images = util.loadImages(dataDir + "/images", log=true, numImages=3); + images = util.loadImages(dataDir + "/images", log=true, numImages=15); end % Load the camera parameters diff --git a/test_plot.m b/test_plot.m index d64c7ef..6721693 100644 --- a/test_plot.m +++ b/test_plot.m @@ -1,6 +1,6 @@ % Plot the point cloud unscaled as well as the points of the door frame figure -plotting.plotPointCloud(denoisedPointCloud, camPoses, cameraSizePlotSize=0.2, pcMarkerSize=10) +plotting.plotPointCloud(denoisedPointCloud, camPoses, cameraSizePlotSize=0.2, pcMarkerSize=10, usePcViewer=true) xlabel('X [Unknown]'); ylabel('Y [Unknown]'); zlabel('Z [Unknown]'); From 424f7956eb18e8c5b4d49608d6a127267d12886b Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Daniel=20San=20Jos=C3=A9=20Pro?= Date: Fri, 7 Jul 2023 19:22:18 +0200 Subject: [PATCH 6/6] SOme final plotting things --- +plotting/plotPointCloud.m | 6 ++++-- test_multiview.m | 2 +- 2 files changed, 5 insertions(+), 3 deletions(-) diff --git a/+plotting/plotPointCloud.m b/+plotting/plotPointCloud.m index 77921ea..fd68777 100644 --- a/+plotting/plotPointCloud.m +++ b/+plotting/plotPointCloud.m @@ -14,16 +14,18 @@ function plotPointCloud(pointCloudInstance, camPoses, varargin) p.addOptional('usePcViewer', false, @islogical); p.addOptional('cameraSizePlotSize', 0.2, @isnumeric); p.addOptional('pcMarkerSize', 45, @isnumeric); +p.addOptional('pcViewerMarkerSize', 5, @isnumeric); p.parse(varargin{:}); usePcViewer = p.Results.usePcViewer; cameraSizePlotSize = p.Results.cameraSizePlotSize; pcMarkerSize = p.Results.pcMarkerSize; +pcViewerMarkerSize = p.Results.pcViewerMarkerSize; -hold on; if usePcViewer - pcviewer(pointCloudInstance, PointSize=1); + pcviewer(pointCloudInstance, PointSize=pcViewerMarkerSize); else + hold on; pcshow(pointCloudInstance, VerticalAxisDir='Down', MarkerSize=pcMarkerSize); % Show cameras poses if ~isempty(camPoses) diff --git a/test_multiview.m b/test_multiview.m index f34f223..9eab795 100644 --- a/test_multiview.m +++ b/test_multiview.m @@ -5,7 +5,7 @@ dataDir = "test/delivery_area_dslr_undistorted"; if exist('images','var') == 0 % Load the images if they are not already loaded yet - images = util.loadImages(dataDir + "/images", log=true, numImages=15); + images = util.loadImages(dataDir + "/images", log=true, numImages=3); end % Load the camera parameters