Skip to content

init add cnn #2

New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Open
wants to merge 2 commits into
base: feature/shao/add_a_star
Choose a base branch
from
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
79 changes: 72 additions & 7 deletions gym-duckietown/generate_motions.py
Original file line number Diff line number Diff line change
@@ -1,9 +1,9 @@
import argparse
from ctypes import alignment
from genericpath import getctime
from logging import FATAL
from logging import FATAL, NullHandler
from math import nan
from os import curdir, spawnlpe, strerror
from os import curdir, strerror
# from hybrid_planner import *
# from motion_planner import *
import cv2
Expand All @@ -12,9 +12,16 @@
from gym_duckietown.simulator import *
import pyglet
import time
import torch
from PIL import Image
from nnModel import predict

# declare the arguments
parser = argparse.ArgumentParser()

# python generate_motions.py --max_steps 5000 --map-name map3_0 --seed 0 --start-tile 5,7 --goal-tile 2,2


# Do not change this
parser.add_argument('--max_steps', type=int, default=5000, help='max_steps')

Expand All @@ -28,8 +35,8 @@
goals = {"map1_0":{"seed":[1],"start":[0,1],"goal":[5,1],"path":[[0,1],[1,1],[2,1],[3,1],[4,1],[5,1]]},"map1_1":{"seed":[0],"start":[0,1],"goal":[70,1],"path":[[0,1],[1,1],[2,1],[3,1],[4,1],[5,1],[6,1],[7,1],[8,1],[9,1],[10,1],[11,1],[12,1],[13,1],[14,1],[15,1],[16,1],[17,1],[18,1],[19,1],[20,1],[21,1],[22,1],[23,1],[24,1],[25,1],[26,1],[27,1],[28,1],[29,1],[30,1],[31,1],[32,1],[33,1],[34,1],[35,1],[36,1],[37,1],[38,1],[39,1],[40,1],[41,1],[42,1],[43,1],[44,1],[45,1],[46,1],[47,1],[48,1],[49,1],[50,1],[51,1],[52,1],[53,1],[54,1],[55,1],[56,1],[57,1],[58,1],[59,1],[60,1],[61,1],[62,1],[63,1],[64,1],[65,1],[66,1],[67,1],[68,1],[69,1],[70,1]]},"map1_2":{"seed":[2],"start":[2,1],"goal":[21,1],"path":[[2,1],[3,1],[4,1],[5,1],[6,1],[7,1],[8,1],[9,1],[10,1],[11,1],[12,1],[13,1],[14,1],[15,1],[16,1],[17,1],[18,1],[19,1],[20,1],[21,1]]},"map1_3":{"seed":[6],"start":[5,1],"goal":[65,1],"path":[[5,1],[6,1],[7,1],[8,1],[9,1],[10,1],[11,1],[12,1],[13,1],[14,1],[15,1],[16,1],[17,1],[18,1],[19,1],[20,1],[21,1],[22,1],[23,1],[24,1],[25,1],[26,1],[27,1],[28,1],[29,1],[30,1],[31,1],[32,1],[33,1],[34,1],[35,1],[36,1],[37,1],[38,1],[39,1],[40,1],[41,1],[42,1],[43,1],[44,1],[45,1],[46,1],[47,1],[48,1],[49,1],[50,1],[51,1],[52,1],[53,1],[54,1],[55,1],[56,1],[57,1],[58,1],[59,1],[60,1],[61,1],[62,1],[63,1],[64,1],[65,1]]},"map1_4":{"seed":[5],"start":[50,1],"goal":[90,1],"path":[[50,1],[51,1],[52,1],[53,1],[54,1],[55,1],[56,1],[57,1],[58,1],[59,1],[60,1],[61,1],[62,1],[63,1],[64,1],[65,1],[66,1],[67,1],[68,1],[69,1],[70,1],[71,1],[72,1],[73,1],[74,1],[75,1],[76,1],[77,1],[78,1],[79,1],[80,1],[81,1],[82,1],[83,1],[84,1],[85,1],[86,1],[87,1],[88,1],[89,1],[90,1]]},"map2_0":{"seed":[1],"start":[7,7],"goal":[1,1],"path":[[7,7],[6,7],[5,7],[5,6],[5,5],[4,5],[3,5],[3,4],[3,3],[2,3],[1,3],[1,2],[1,1]]},"map2_1":{"seed":[2],"start":[3,6],"goal":[7,1],"path":[[3,6],[3,5],[3,4],[3,3],[4,3],[5,3],[5,2],[5,1],[6,1],[7,1]]},"map2_2":{"seed":[5],"start":[1,6],"goal":[3,4],"path":[[1,6],[1,5],[2,5],[3,5],[3,4]]},"map2_3":{"seed":[4],"start":[1,2],"goal":[5,4],"path":[[1,2],[1,3],[2,3],[3,3],[4,3],[5,3],[5,4]]},"map2_4":{"seed":[4],"start":[7,4],"goal":[4,7],"path":[[7,4],[7,5],[6,5],[5,5],[5,6],[5,7],[4,7]]},"map3_0":{"seed":[1],"start":[5,7],"goal":[2,2],"path":[[5,7],[5,6],[5,5],[5,4],[4,4],[3,4],[2,4],[2,3],[2,2]]},"map3_1":{"seed":[2],"start":[5,11],"goal":[1,7],"path":[[5,11],[5,10],[5,9],[5,8],[4,8],[3,8],[2,8],[1,8],[1,7]]},"map3_2":{"seed":[3],"start":[10,5],"goal":[7,11],"path":[[10,5],[11,5],[11,6],[11,7],[10,7],[10,8],[10,9],[9,9],[8,9],[8,10],[8,11],[7,11]]},"map3_3":{"seed":[4],"start":[2,4],"goal":[9,1],"path":[[2,4],[3,4],[4,4],[5,4],[6,4],[7,4],[7,3],[7,2],[8,2],[8,1],[9,1]]},"map3_4":{"seed":[12],"start":[5,5],"goal":[10,11],"path":[[5,5],[5,6],[5,7],[5,8],[5,9],[5,10],[5,11],[6,11],[7,11],[8,11],[9,11],[10,11]]},"map4_0":{"seed":[4],"start":[10,4],"goal":[3,3],"path":[[10,4],[9,4],[9,5],[8,5],[8,6],[8,7],[7,7],[6,7],[6,6],[6,5],[5,5],[5,4],[5,3],[4,3],[3,3]]},"map4_1":{"seed":[4],"start":[7,7],"goal":[1,12],"path":[[7,7],[6,7],[5,7],[4,7],[3,7],[3,8],[3,9],[3,10],[3,11],[2,11],[1,11],[1,12]]},"map4_2":{"seed":[4],"start":[4,1],"goal":[11,11],"path":[[4,1],[3,1],[3,2],[3,3],[4,3],[5,3],[5,4],[5,5],[6,5],[6,6],[6,7],[6,8],[6,9],[5,9],[5,10],[5,11],[6,11],[7,11],[7,10],[8,10],[9,10],[10,10],[10,11],[11,11]]},"map4_3":{"seed":[6],"start":[1,8],"goal":[13,8],"path":[[1,8],[1,7],[2,7],[3,7],[4,7],[5,7],[6,7],[7,7],[8,7],[8,6],[8,5],[9,5],[9,4],[10,4],[11,4],[12,4],[12,5],[12,6],[13,6],[13,7],[13,8]]},"map4_4":{"seed":[8],"start":[5,10],"goal":[11,4],"path":[[5,10],[5,9],[6,9],[6,8],[6,7],[7,7],[8,7],[8,6],[8,5],[9,5],[9,4],[10,4],[11,4]]},"map5_0":{"seed":[0],"start":[10,4],"goal":[2,9],"path":[[10,4],[11,4],[11,5],[11,6],[11,7],[10,7],[9,7],[8,7],[8,6],[7,6],[7,5],[6,5],[5,5],[4,5],[3,5],[2,5],[1,5],[1,6],[1,7],[1,8],[2,8],[2,9]]},"map5_1":{"seed":[0],"start":[6,8],"goal":[4,13],"path":[[6,8],[7,8],[7,9],[8,9],[8,10],[8,11],[8,12],[8,13],[7,13],[6,13],[5,13],[4,13]]},"map5_2":{"seed":[2],"start":[10,7],"goal":[10,1],"path":[[10,7],[11,7],[11,6],[11,5],[11,4],[10,4],[9,4],[8,4],[8,3],[8,2],[9,2],[10,2],[10,1]]},"map5_3":{"seed":[4],"start":[1,6],"goal":[12,15],"path":[[1,6],[1,7],[1,8],[2,8],[2,9],[3,9],[3,10],[4,10],[4,11],[5,11],[6,11],[6,12],[6,13],[7,13],[8,13],[9,13],[10,13],[11,13],[12,13],[12,14],[12,15]]},"map5_4":{"seed":[5],"start":[3,10],"goal":[15,9],"path":[[3,10],[4,10],[4,11],[5,11],[6,11],[6,12],[6,13],[7,13],[8,13],[9,13],[10,13],[11,13],[12,13],[12,12],[12,11],[13,11],[14,11],[15,11],[15,10],[15,9]]}}

###########
index = 'map3_0'
###########3
index = 'map3_3'
###########

seed = goals[index]['seed'][0]
start = goals[index]['start']
Expand Down Expand Up @@ -59,10 +66,12 @@
total_reward = 0
dts = np.array([], np.int32)


rewardList = []
predicted_pos = start_pos
trig_target = None

obs = []

def generate_action(robot_theta, target_theta):
if target_theta == -1:
return 0
Expand Down Expand Up @@ -180,6 +189,7 @@ def get_vw(m, t):
break
actions.append([speed, steering])
obs, reward, done, info = env.step([speed, steering])
rewardList.append(reward)
initial_reward += reward
env.render()
print(reward)
Expand Down Expand Up @@ -236,6 +246,7 @@ def get_vw(m, t):
print(predicted_pos, trig_target)
actions.append([speed, steering])
obs, reward, done, info = env.step([speed, steering])
rewardList.append(reward)
total_reward += reward
d = [int(env.cur_pos[0] * 50), int(env.cur_pos[2] * 50)]
dts = np.append(dts,d)
Expand All @@ -248,9 +259,63 @@ def get_vw(m, t):
if done or (goal == [math.floor(env.cur_pos[0]), math.floor(env.cur_pos[2])]):
break

count = 0
cam_angle = env.unwrapped.cam_angle
cam_angle[0] = 10

while True:
time.sleep(1)
move = False
im = Image.fromarray(obs)
prediction = predict(im) # change
count += 1
im.save('../result/images/%s.png' % (count))
print(count, prediction)
if prediction == 2:
speed = 1
steering = 0
move = True

elif prediction == 1:
speed = 1
steering = 8
move = True

elif prediction == 3:
speed = 1
steering = -8
move = True

elif prediction == 4:
cam_angle = env.unwrapped.cam_angle
cam_angle[0] -= 5

elif prediction == 0:
break

if move == True:
actions.append([speed, steering])
obs, reward, done, info = env.step([speed, steering])
rewardList.append(reward)
total_reward += reward
d = [int(env.cur_pos[0] * 50), int(env.cur_pos[2] * 50)]
dts = np.append(dts,d)
dts = dts.reshape((-1,1,2))
map_img = cv2.polylines(map_img,[dts],False,(0,0,255), thickness=3)
cv2.imshow("map", map_img)
cv2.waitKey(10)
print(reward)

env.render()
# INTENTION_MAPPING = {'front': 2, 'left': 1, 'right': 3,'up': 4, 'stop': 0 }




predicted_pos = [math.floor(env.cur_pos[0]), math.floor(env.cur_pos[2])]
np.savetxt(f'/home/marshall/Desktop/duckietown/{index}_seed{seed}_start_{start[0]},{start[1]}_goal_{end[0]},{end[1]}.txt',
np.savetxt(f'../result/{index}_seed{seed}_start_{start[0]},{start[1]}_goal_{end[0]},{end[1]}.txt',
actions, delimiter=',')

np.savetxt(f'../result/{index}_seed{seed}_start_{start[0]},{start[1]}_goal_{end[0]},{end[1]}_reward.txt',
rewardList, delimiter=',')
print("done", predicted_pos, total_reward, "initial", initial_reward)
pyglet.app.run()
93 changes: 93 additions & 0 deletions gym-duckietown/nnModel.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,93 @@
import os
import cv2
import random
import math
import pandas as pd
from PIL import Image
import torch
from torch.utils.data import Dataset
from torch.utils.data import Sampler
from torchvision import transforms
from torchvision.transforms import *
import sys
import torch.nn as nn
import torch.nn.functional as F


# class Flatten(nn.Module):
# def forward(self, x):
# return x.view(x.size(0), -1)

class MyModel(nn.Module):

def __init__(self, num_bins=5):
super().__init__()
self.num_bins = num_bins

# Build the CNN feature extractor
self.cnn = nn.Sequential(

nn.Conv2d(in_channels=3, out_channels=32, kernel_size=3, stride=1, padding=0),
nn.MaxPool2d((2, 2)),
nn.ReLU(inplace=True),

nn.Conv2d(in_channels=32, out_channels=64, kernel_size=3, stride=1, padding=1),
nn.BatchNorm2d(64),
nn.ReLU(inplace=True),

nn.Conv2d(in_channels=64, out_channels=128, kernel_size=3, stride=1, padding=1),
nn.BatchNorm2d(128),
nn.ReLU(inplace=True),

nn.AdaptiveMaxPool2d(output_size=(1, 1))
)

# Build a FC heads, taking both the image features and the intention as input
self.fc = nn.Sequential(
nn.Linear(in_features=128, out_features=32),
nn.Linear(in_features=32, out_features=num_bins))



def forward(self, x):
x = self.cnn(x)
x = x.view(x.size(0), -1)

return x


def read_image(path):
return Image.open(path)

def predict(im):
my_model = MyModel()
my_model = nn.DataParallel(my_model.cuda().float())
my_model.eval()

my_model.load_state_dict(torch.load('../model/model_update.pt'))
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
my_model = my_model.to(device)


preprocessor = Compose([
Resize((160, 120)),
ToTensor()
])

image = preprocessor(im)[None, ...]

with torch.no_grad():
prediction = my_model(image)
_,score = torch.max(prediction, 1)
return score.item()

# for i, image in enumerate(test_data.__getitem__()[None, ...]):
# image = image.cuda()
# with torch.no_grad():
# prediction = my_model(image)
# print(prediction)

if __name__ == "__main__":
for i in range(1,25):
img = read_image('../result/images/{}.png'.format(i))
print('test predict function : '+str(predict(img)))
119 changes: 119 additions & 0 deletions gym-duckietown/nnModel.py.bk
Original file line number Diff line number Diff line change
@@ -0,0 +1,119 @@
import os
import cv2
import random
import math
import pandas as pd
from PIL import Image
import torch
from torch.utils.data import Dataset
from torch.utils.data import Sampler
from torchvision import transforms
from torchvision.transforms import *
import sys
import torch.nn as nn
import torch.nn.functional as F


class Flatten(nn.Module):
def forward(self, x):
return x.view(x.size(0), -1)

class MyModel(nn.Module):

def __init__(self, num_bins=6):
super().__init__()
self.num_bins = num_bins

# Build the CNN feature extractor
self.cnn = nn.Sequential(

nn.Conv2d(in_channels=3, out_channels=32, kernel_size=3, stride=2, padding=0),
nn.ReLU(inplace=True),

nn.Conv2d(in_channels=32, out_channels=64, kernel_size=3, stride=2, padding=1),
nn.ReLU(inplace=True),

nn.Conv2d(in_channels=64, out_channels=192, kernel_size=3, stride=2, padding=1),
nn.ReLU(inplace=True),

nn.Dropout(p=0.05),

nn.AdaptiveMaxPool2d(output_size=(1, 1))
)

# Build a FC heads, taking both the image features and the intention as input
self.fc = nn.Sequential(
nn.Linear(in_features=192+3, out_features=32),
nn.Linear(in_features=32, out_features=num_bins))


def forward(self, x):
x = self.cnn(x)
x = x.view(x.size(0), -1)
return x





class MyData(Dataset):

INTENTION_MAPPING = {'front': 5, 'Hleft': 4, 'Hright': 6,'up': 8, 'down': 2, 'stop': 0 }


def __init__(self, img):

self.preprocess = Compose([
Resize((160, 160)),
ToTensor(),
Normalize(mean=[0.5071, 0.4866, 0.4409], std=[0.2675, 0.2565, 0.2761])
])
self.image_binary = img

def __getitem__(self):
image = self.preprocess(self.image_binary) # change
return image

def __len__(self):
return len(self.data)

def read_image(path):
return Image.open(path)

def predict(im):
my_model = MyModel()
my_model = nn.DataParallel(my_model.cuda().float())
my_model.eval()

my_model.load_state_dict(torch.load('../model/model_update.pt'))
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
my_model = my_model.to(device)


preprocessor = Compose([
Resize((160, 160)),
ToTensor(),
Normalize(mean=[0.5071, 0.4866, 0.4409], std=[0.2675, 0.2565, 0.2761])
])

image = preprocessor(im)[None, ...]
# print(image2.cuda().shape)
# test_data = MyData(im)
# image = test_data.__getitem__()[None, ...]
# image = image.cuda()
# print(image.shape)
with torch.no_grad():
prediction = my_model(image)
print(prediction)
score = torch.argmax(prediction, dim=1).item()
return score

# for i, image in enumerate(test_data.__getitem__()[None, ...]):
# image = image.cuda()
# with torch.no_grad():
# prediction = my_model(image)
# print(prediction)

if __name__ == "__main__":
img = read_image('../test_20211118-022828/map2_4.jpg')
print('test predict function'+str(predict(img)))
Binary file added model/model_update.pt
Binary file not shown.
Loading