From 9e07412f45747731fcc601173abfa7e61a4f69f8 Mon Sep 17 00:00:00 2001 From: Michael Lee Date: Wed, 28 Apr 2021 00:16:29 -0700 Subject: [PATCH] working hardcode controller. Behavior wrinkles (especially when centering) --- .vscode/settings.json | 2 +- main.py | 45 +++++++++--- network/HardCode_Controller.py | 124 ++++++++++++++++++++------------- requirements.txt | 3 +- 4 files changed, 114 insertions(+), 60 deletions(-) diff --git a/.vscode/settings.json b/.vscode/settings.json index af32558..2a63927 100644 --- a/.vscode/settings.json +++ b/.vscode/settings.json @@ -1,3 +1,3 @@ { - "python.pythonPath": "C:\\Users\\swimm\\Anaconda3\\envs\\Pagent\\python.exe" + "python.pythonPath": "C:\\Users\\swimm\\Anaconda3\\envs\\pagent_clone\\python.exe" } \ No newline at end of file diff --git a/main.py b/main.py index 42d6b14..f8fc71a 100644 --- a/main.py +++ b/main.py @@ -1,25 +1,52 @@ import airsim import numpy as np +from torchvision import transforms +import torch +import PIL +import os +import io from Network.HardCode_Controller import HardCode_Controller # Check if Connection with Airsim is Good client = airsim.MultirotorClient() -client = airsim.MultirotorClient() client.confirmConnection() client.enableApiControl(True) client.armDisarm(True) +# Load CNN Model +loaded_model = torch.load(os.path.join(os.path.abspath(__name__),"..","Network","object_detection","mymodel3.pt"), map_location="cuda:0") # Assumes model is stored in \P-agent\Network\Object_detection +device = torch.device('cuda') if torch.cuda.is_available() else torch.device('cpu') + #Takeoff and Prepare for instructions client.takeoffAsync().join() done = False -mem = np.zeros([3,1]) - +mem = 0 while(done == False): - # pts, confidence = CNN(client.simGetImages) # Where pts = [bottomleft, topleft, topright, bottom right] of bounding box  - pts = [] - # TODO: Confidence Threshold Check - done, mem = HardCode_Controller(client, pts, mem) # Probably have to call a method after def. - done.policy() -print("I got to the Package!") \ No newline at end of file + # Grab image and convert to RGB tensor + img = client.simGetImage("0", airsim.ImageType.Scene) + f_im = PIL.Image.open(io.BytesIO(img)).convert('RGB') + f_tensor = transforms.ToTensor()(f_im) + + # Feed image into CNN and collect prediction + mask_pred = loaded_model([f_tensor.to(device)]) + if len(mask_pred[0]['boxes']) == 0: # If empty tensor (ie no package) + box_pred = [] + pass + else: + box_pred = mask_pred[0]['boxes'][0] # First tensor is the bounding box coordinates + confidence = mask_pred[0]['scores'][0] + x0, y0, x1, y1 = [int(i) for i in box_pred.tolist()] # Turn into coordinates + box_pred = np.array([[x0,y0],[x1,y1]]) # First set of coordinates is top left of the box and second set is the bottom right + + if confidence == mask_pred[0]['scores'][0] < 0.9: # Confidence threshold check + box_pred = [] + + Controller = HardCode_Controller(client, box_pred, mem) + done, mem, collided = Controller.policy() + +if collided: + print("I hit a wall, dumdum") +if not collided: + print("I got to the Package!") \ No newline at end of file diff --git a/network/HardCode_Controller.py b/network/HardCode_Controller.py index 3db5716..48510b2 100644 --- a/network/HardCode_Controller.py +++ b/network/HardCode_Controller.py @@ -1,16 +1,5 @@ import numpy as np - -# Possibility one: Given these outputs -# class HardCode_Controller(client, im_bool, rel_position, rel_pich, rel_yaw, rel_roll): -# """ -# client: airsim controller object -# im_bool: true if image is in frame -# rel_position: relative position to box (vector) -# relative orientation -# rel_pitch -# rel_yaw -# rel_roll -# """ +import airsim class HardCode_Controller: """ @@ -28,58 +17,95 @@ class HardCode_Controller: Takes action in Unreal Environment """ - def __init__(self, client, boundingBox_pts): + def __init__(self, client, boundingBox_pts, mem): self.client = client self.pts = boundingBox_pts self.done = False - self.mem = np.zeros([3,1]) - self.image_Coords = [[0,0], [0,144], [256,144], [256,0]] - + self.mem = mem + self.image_Coords = np.array([[0,144],[256,0]]) - def center(self, pitch_rate = np.pi/2, yaw_rate = np.pi/2, z = 0, duration = 1): + def center(self, in_frame, z, dir = 1, duration = 0.2): """ Specify protocols for getting package centered in camera view. With a Field of view of 90 deg. - As it is now, drone looks down. Checks. Drone looks up. Checks. Drone straightens and pivots to right. Checks and Loops to beginning + As it is now: drone does a 360 sweep and if no package is found, moves to a lower level and checks again. + dir is the direction you will turn in fine tuning (1 = clockwise, -1 = counter clockwise) """ - # TODO: Replace - if self.mem[:-3]: - self.client.moveByAngleRatesZAsync(0, pitch_rate, 0, 0, duration) - self.mem = np.zeros([3,1]) # Reset to 0 - elif self.mem[:-2]: - self.client.moveByAngleRatesZAsync(0, -2 * pitch_rate, 0, 0, duration) - elif self.mem[:-1]: - self.client.moveByAngleRatesZAsync(0, pitch_rate, yaw_rate, 0, duration) - - def area(self): + + # If the package isn't in frame, rotate and try to get it in frame + if in_frame == False: + # Rotate and capture + self.client.moveByVelocityZAsync(0, 0, z, duration, airsim.DrivetrainType.MaxDegreeOfFreedom, airsim.YawMode(True, 90)).join() + self.mem += 1 + + # Lower the drone based on how many runs it's done + if self.mem % 18 == 0: + self.client.moveByVelocityZAsync(0, 0, z + 1, duration, airsim.DrivetrainType.MaxDegreeOfFreedom, airsim.YawMode(False, 0)).join() # Move down by 5 units + self.mem = 0 + + # If the package is in frame, do small turns until it's centered + else: + #TODO: If the distace of the max is less towards one side, move in that direction ISSUE WITH THIS! + self.client.moveByVelocityZAsync(0, 0, 0, duration, airsim.DrivetrainType.MaxDegreeOfFreedom, airsim.YawMode(False, dir * 20 )).join() + + # def move(): + #TODO: move function will just move the drone and correct for that movement in the orientation of the camera and then just use the camera orientation to direct itself. + + def area(self, coords): """ - Calculate the area of the bounding box + Calculate the area of the bounding box given a (2,2) Numpy array """ - area = (self.pts[0]-self.pts[1])*(self.pts[1]-self.pts[2]) + area = (coords[1,0] - coords[0,0]) * (coords[1,1] - coords[0,1]) return area - - - def policy(self, center_thresh = 50, velocity = 3): - - # Subtract the points and determine if the center is within - bb_subtract = self.boundingBox_pts - self.image_Coords - # Do this really janky check - if 0 > bb_subtract[1][1] and 0 > bb_subtract[2][1] and 0 > bb_subtract[2][0] and 0 > bb_subtract[2][1] and bb_subtract[3][0] \ - and 0 < bb_subtract[0][0] and 0 < bb_subtract[0][1] and 0 < bb_subtract[1][0] and 0 < bb_subtract[3][1]: - centered_box = True - else: + + def overlap(self, rect, ratio = .4): + # Create an "inner box" as a threshold + thresh = np.subtract([self.image_Coords[:,0]], ratio * self.image_Coords[1,0]) + thresh = np.hstack((thresh, np.subtract([self.image_Coords[:,1]], ratio * self.image_Coords[0,1]))) + thresh[0][0] = -thresh[0][0] + thresh[0][3] = -thresh[0][3] + thresh = np.reshape(thresh, (2,2)) + + # Compare threshold with the bounding box + intersect = np.sort(np.vstack((thresh, rect)), 0) + intersect = intersect[1:3,:] # Pick out the middle coordinates + intersect[:,1] = np.flip(intersect[:,1]) # swap the y coordinates (the larger one should be 0th) + int_area = self.area(intersect) + total_area = self.area(self.image_Coords) + + return int_area/total_area + + def policy(self, center_thresh = .7, velocity = 3): + + # Grab State Information + state = self.client.simGetVehiclePose() + + # Centering + if self.pts == []: centered_box = False - - if not centered_box: # This is real janky but... I hope it works. - self.center() - np.append(self.mem, 1) # Yes, we did a search and we didn't find it - self.mem = self.mem[1:] + self.center(False, state.position.z_val) # If you can't find the package at all else: - np.append(self.mem, 0) # Yes, we did a search and we found it - self.mem = self.mem[1:] + # Check overlap with a "threshold" for the middle + if self.overlap(self.pts) > center_thresh: + centered_box = True + else: + centered_box = False + # Determine the direction to turn to fine tune. + if (self.image_Coords[1,1]/2 - self.pts[0,0]) > (self.pts[1,0] - self.image_Coords[1,1]/2): + self.center(True, state.position.z_val, 1) + else: + self.center(True, state.position.z_val, -1) + + # Move + if centered_box: move(rel_position[x], rel_position[y], rel_position[z], velocity) + # Check if the drone collided or landed + collided = self.client.simGetCollisionInfo() + # landed = self.client.getMultirotorState().landed_state - return self.done, self.mem + if collided.has_collided: + self.done = True + return self.done, self.mem, collided print('hello') \ No newline at end of file diff --git a/requirements.txt b/requirements.txt index 424c1e6..0c0f291 100644 --- a/requirements.txt +++ b/requirements.txt @@ -47,7 +47,8 @@ markdown=3.3.4=py38haa95532_0 mkl=2020.2=256 mkl-service=2.3.0=py38h196d8e1_0 mkl_fft=1.3.0=py38h46781fe_0 -mkl_random=1.1.1=py38h47e9c7a_0 +mkl_rand + om=1.1.1=py38h47e9c7a_0 msgpack-python=0.5.6=pypi_0 msgpack-rpc-python=0.4.1=pypi_0 multidict=5.1.0=py38h2bbff1b_2