forked from Buushra1dm/sheep_detection
-
Notifications
You must be signed in to change notification settings - Fork 0
/
detect.py
99 lines (68 loc) · 3.03 KB
/
detect.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
from ultralytics import YOLO
import cv2
from PIL import Image
import io
import numpy as np
def detect_objects(image_bytes):
# Load the YOLO model
model = YOLO("best_yolov9_weights.pt") # Replace with your model path
# Convert bytes to a PIL Image
image = Image.open(io.BytesIO(image_bytes))
# Convert PIL Image to OpenCV format
image = cv2.cvtColor(np.array(image), cv2.COLOR_RGB2BGR)
# Perform detection
results = model(image)
# Annotate image with bounding boxes
annotated_img = results[0].plot()
# Extract categories
categories = results[0].names # Get category names
detected_classes = results[0].boxes.cls # Detected classes as indices
detected_categories = [categories[int(cls)] for cls in detected_classes] # Map indices to names
# Convert annotated image to bytes
_, buffer = cv2.imencode('.jpg', annotated_img)
img_bytes = buffer.tobytes()
# Convert categories to a string
categories_str = ', '.join(detected_categories)
return img_bytes, categories_str
def detect_objects_model2(image_bytes):
# Load the second YOLO model
model = YOLO("teeth.pt") # Replace with your second model path
# Convert bytes to a PIL Image
image = Image.open(io.BytesIO(image_bytes))
# Convert PIL Image to OpenCV format
image = cv2.cvtColor(np.array(image), cv2.COLOR_RGB2BGR)
# Perform detection
results = model(image)
# Annotate image with bounding boxes
annotated_img = results[0].plot()
# Extract categories
categories = results[0].names # Get category names
detected_classes = results[0].boxes.cls # Detected classes as indices
detected_categories = [categories[int(cls)] for cls in detected_classes] # Map indices to names
# Convert annotated image to bytes
_, buffer = cv2.imencode('.jpg', annotated_img)
img_bytes = buffer.tobytes()
# Convert categories to a string
categories_str = ', '.join(detected_categories)
return img_bytes, categories_str
def detect_objects_model3(image_bytes):
# Load the third YOLO model
model = YOLO("healthy.pt") # Replace with your third model path
# Convert bytes to a PIL Image
image = Image.open(io.BytesIO(image_bytes))
# Convert PIL Image to OpenCV format
image = cv2.cvtColor(np.array(image), cv2.COLOR_RGB2BGR)
# Perform detection
results = model(image)
# Annotate image with bounding boxes
annotated_img = results[0].plot()
# Extract categories
categories = results[0].names # Get category names
detected_classes = results[0].boxes.cls # Detected classes as indices
detected_categories = [categories[int(cls)] for cls in detected_classes] # Map indices to names
# Convert annotated image to bytes
_, buffer = cv2.imencode('.jpg', annotated_img)
img_bytes = buffer.tobytes()
# Convert categories to a string
categories_str = ', '.join(detected_categories)
return img_bytes, categories_str