Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Last version of detecting #1332

Open
wants to merge 34 commits into
base: master
Choose a base branch
from
Open
Show file tree
Hide file tree
Changes from 1 commit
Commits
Show all changes
34 commits
Select commit Hold shift + click to select a range
9870abc
chore: Rename project
Oct 28, 2024
39a8bdc
Feature/base changes (#1)
Andrzej-Krzyskow Oct 30, 2024
0a4ae8f
Feature/main content (#2)
Andrzej-Krzyskow Nov 9, 2024
6dbf334
feat: Adding new main screen of App (#3)
KamilTaras Nov 9, 2024
355c927
feature: Action bar modification (#4)
Andrzej-Krzyskow Nov 9, 2024
42f7e58
feature: Connect login screen with full screen video screen (#5)
Andrzej-Krzyskow Nov 9, 2024
bd7feea
feature: Add show list button in Debug mode (#6)
Andrzej-Krzyskow Nov 11, 2024
63457a8
bugfix: Added needed xmlns (#7)
Andrzej-Krzyskow Nov 11, 2024
498f02e
feature: Added LED button and logic, Modified SignIn page and Added m…
Andrzej-Krzyskow Nov 12, 2024
c3c32ed
feat: models added
Nov 16, 2024
2152f7f
feat: opencv library
Nov 16, 2024
812fbf8
feature: Improved access to Demo List (#9)
Andrzej-Krzyskow Nov 16, 2024
4cbf902
bug
Nov 16, 2024
9881012
fix: changes for safer OpenCV usage
Nov 17, 2024
ee27295
feat: basic tree trunk detection on LiveStreamView.java, model_trunk.…
Nov 17, 2024
e052a60
fix: opencv directory
Nov 17, 2024
7ae8268
fix: opencv
Nov 17, 2024
528d757
fix: opencv
Nov 17, 2024
64bccf6
fix: gitignore for opencv
Nov 17, 2024
aa20efc
fix: gitignore for opencv
Nov 17, 2024
12968fe
fix: gitignore for opencv
Nov 17, 2024
8e2564b
feature: Circular-error-probable (#10)
KamilTaras Nov 18, 2024
720ecca
style: making CEP more appealing (#11)
KamilTaras Nov 21, 2024
04452dc
feature: Add Nose Obstacle Distance Measurements (#12)
Andrzej-Krzyskow Nov 21, 2024
e1d815d
fix: Increase Led Shoot Time
Andrzej-Krzyskow Nov 23, 2024
14abaae
feature: Improved buttons design (#13)
Andrzej-Krzyskow Nov 25, 2024
c45a6b3
feature: Add Major UI Changes (#14)
Andrzej-Krzyskow Nov 28, 2024
6ddc7e3
feature: statistics-on-the-screen (#15)
KamilTaras Nov 30, 2024
904d3f2
style: Add Minor UI Changes
Andrzej-Krzyskow Dec 1, 2024
5e592df
Merge remote-tracking branch 'origin/master' into object_detection_v2
Dec 6, 2024
c819f92
feat: slow but working object detection with deleted old code
Dec 6, 2024
cec0677
feat: added better model, button and logs for checking why detecting …
Dec 7, 2024
e425dda
feat: added more logs, better model speed
Dec 9, 2024
c320a56
feat: better bounding boxes, last errors corrected, deleted not used …
Dec 13, 2024
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
Prev Previous commit
Next Next commit
feat: added more logs, better model speed
  • Loading branch information
zuzanna_slapek committed Dec 9, 2024
commit e425ddada001f6055b47f117679fff8104e5dbb8
4 changes: 2 additions & 2 deletions ZPI-App/app/build.gradle
Original file line number Diff line number Diff line change
Expand Up @@ -128,6 +128,6 @@ dependencies {
implementation 'io.reactivex.rxjava2:rxjava:2.2.4'
implementation 'io.reactivex.rxjava2:rxandroid:2.1.0'

implementation 'org.tensorflow:tensorflow-lite:2.11.0'
implementation 'org.tensorflow:tensorflow-lite-support:0.4.0'
implementation 'org.tensorflow:tensorflow-lite:2.12.0'
implementation 'org.tensorflow:tensorflow-lite-support:0.4.3'
}
2 changes: 1 addition & 1 deletion ZPI-App/app/src/main/assets/labels.txt
Original file line number Diff line number Diff line change
@@ -1 +1 @@
trunk
tree
Original file line number Diff line number Diff line change
Expand Up @@ -3,6 +3,7 @@
import android.content.Context;
import android.content.res.AssetFileDescriptor;
import android.graphics.Bitmap;
import android.graphics.Canvas;
import android.os.Handler;
import android.util.Log;
import android.view.LayoutInflater;
Expand All @@ -24,6 +25,7 @@
import java.io.IOException;
import java.nio.MappedByteBuffer;
import java.nio.channels.FileChannel;
import java.util.Arrays;
import java.util.LinkedList;
import java.util.Queue;
import java.util.Random;
Expand All @@ -41,6 +43,7 @@
import dji.sdk.sdkmanager.DJISDKManager;

import org.tensorflow.lite.Interpreter;
import org.tensorflow.lite.TensorFlowLite;
import org.opencv.core.Mat;
import org.opencv.core.Size;
import org.opencv.imgproc.Imgproc;
Expand Down Expand Up @@ -121,6 +124,10 @@ private void init(Context context) {
Log.d("ObjectDetection", "Attempting to load TensorFlow Lite model...");
tfliteInterpreter = new Interpreter(loadModelFile(context, "best_float32.tflite"));
Log.d("ObjectDetection", "TensorFlow Lite model loaded successfully.");
// Log model input and output shapes
Log.d("ObjectDetection", "Input shape: " + Arrays.toString(tfliteInterpreter.getInputTensor(0).shape()));
Log.d("ObjectDetection", "Output shape: " + Arrays.toString(tfliteInterpreter.getOutputTensor(0).shape()));

} catch (IOException e) {
e.printStackTrace();
Log.e("ObjectDetection", "Failed to load TensorFlow Lite model!", e);
Expand Down Expand Up @@ -487,56 +494,95 @@ private void stopObjectDetection() {

private void performObjectDetection() {
Log.d("ObjectDetection", "Starting object detection process...");
String tfliteVersion = TensorFlowLite.runtimeVersion();
Log.d("TensorFlowLite", "TensorFlow Lite Runtime Version: " + tfliteVersion);

if (videoFeedView.getBitmap() != null) {
Bitmap frameBitmap = videoFeedView.getBitmap();

Log.d("ObjectDetection", "Frame captured for processing: " +
frameBitmap.getWidth() + "x" + frameBitmap.getHeight());

float[] inputTensor = preprocessFrame(frameBitmap);
Log.d("ObjectDetection", "Input tensor prepared.");
try {
// Preprocess the frame
float[] inputTensor = preprocessFrame(frameBitmap);
Log.d("ObjectDetection", "Input tensor prepared. Shape: [1, 640, 640, 3]");
Log.d("ObjectDetection", "Input tensor size: " + inputTensor.length);
Log.d("ObjectDetection", "First 10 tensor values: " +
inputTensor[0] + ", " + inputTensor[1] + ", " + inputTensor[2] + "...");

// Initialize output tensor matching the model's output shape
float[][][] outputTensor = new float[1][5][8400];

// Allocate tensors and run inference
try {
tfliteInterpreter.allocateTensors(); // Ensure tensors are allocated
Log.d("ObjectDetection", "Tensors allocated successfully.");
tfliteInterpreter.run(inputTensor, outputTensor);
Log.d("ObjectDetection", "Inference completed.");
} catch (Exception e) {
Log.e("ObjectDetection", "Error during inference: ", e);
return;
}

float[][] outputScores = new float[1][1];
float[][] outputBoxes = new float[1][4];
// Process the model output
processOutput(outputTensor);

} catch (Exception e) {
Log.e("ObjectDetection", "Error during preprocessing or detection: ", e);
}
} else {
Log.w("ObjectDetection", "No frame available for processing.");
}
}

tfliteInterpreter.run(inputTensor, new Object[]{outputBoxes, outputScores});
Log.d("ObjectDetection", "Inference completed. Score: " + outputScores[0][0]);
private void processOutput(float[][][] outputTensor) {
Log.d("ObjectDetection", "Processing output tensor...");

if (outputScores[0][0] > 0.1) { // Confidence threshold
Log.d("ObjectDetection", "Object detected with confidence: " + outputScores[0][0]);
// Iterate through detections
for (int i = 0; i < outputTensor[0][4].length; i++) {
float confidence = outputTensor[0][4][i];

float left = outputBoxes[0][0] * frameBitmap.getWidth();
float top = outputBoxes[0][1] * frameBitmap.getHeight();
float right = outputBoxes[0][2] * frameBitmap.getWidth();
float bottom = outputBoxes[0][3] * frameBitmap.getHeight();
if (confidence > 0.2) { // Adjust confidence threshold as needed
float xCenter = outputTensor[0][0][i];
float yCenter = outputTensor[0][1][i];
float width = outputTensor[0][2][i];
float height = outputTensor[0][3][i];

Log.d("ObjectDetection", "Detection " + i + ": Confidence=" + confidence);

// Convert YOLO format (x_center, y_center, width, height) to bounding box
float left = xCenter - width / 2;
float top = yCenter - height / 2;
float right = xCenter + width / 2;
float bottom = yCenter + height / 2;

Log.d("ObjectDetection", "Bounding box - Left: " + left + ", Top: " + top +
", Right: " + right + ", Bottom: " + bottom);

updateDetectionOverlay(left, top, right, bottom, "tree");
} else {
Log.d("ObjectDetection", "No object detected or confidence below threshold.");
clearDetectionOverlay();
updateDetectionOverlay(left, top, right, bottom, "Detected Object");
}
} else {
Log.w("ObjectDetection", "No frame available for processing.");
}
}


private float[] preprocessFrame(Bitmap frameBitmap) {
int modelInputSize = 640;
Bitmap resizedBitmap = Bitmap.createScaledBitmap(frameBitmap, modelInputSize, modelInputSize, false);

// Resize the frame to 640x640
Bitmap resizedBitmap = Bitmap.createScaledBitmap(frameBitmap, modelInputSize, modelInputSize, true);

// Create a tensor for the input
float[] inputTensor = new float[modelInputSize * modelInputSize * 3];
int[] pixelValues = new int[modelInputSize * modelInputSize];
resizedBitmap.getPixels(pixelValues, 0, modelInputSize, 0, 0, modelInputSize, modelInputSize);

for (int i = 0; i < pixelValues.length; i++) {
int pixel = pixelValues[i];
inputTensor[i * 3] = ((pixel >> 16) & 0xFF) / 255.0f; // R
inputTensor[i * 3 + 1] = ((pixel >> 8) & 0xFF) / 255.0f; // G
inputTensor[i * 3 + 2] = (pixel & 0xFF) / 255.0f; // B

// Normalize RGB values to [0, 1]
inputTensor[i * 3] = ((pixel >> 16) & 0xFF) / 255.0f; // Red
inputTensor[i * 3 + 1] = ((pixel >> 8) & 0xFF) / 255.0f; // Green
inputTensor[i * 3 + 2] = (pixel & 0xFF) / 255.0f; // Blue
}
return inputTensor;
}
Expand All @@ -557,5 +603,4 @@ private MappedByteBuffer loadModelFile(Context context, String modelPath) throws
long declaredLength = fileDescriptor.getDeclaredLength();
return fileChannel.map(FileChannel.MapMode.READ_ONLY, startOffset, declaredLength);
}

}
}