-
Notifications
You must be signed in to change notification settings - Fork 0
Expand file tree
/
Copy pathTask4_Testing
More file actions
778 lines (615 loc) · 34.7 KB
/
Task4_Testing
File metadata and controls
778 lines (615 loc) · 34.7 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587
588
589
590
591
592
593
594
595
596
597
598
599
600
601
602
603
604
605
606
607
608
609
610
611
612
613
614
615
616
617
618
619
620
621
622
623
624
625
626
627
628
629
630
631
632
633
634
635
636
637
638
639
640
641
642
643
644
645
646
647
648
649
650
651
652
653
654
655
656
657
658
659
660
661
662
663
664
665
666
667
668
669
670
671
672
673
674
675
676
677
678
679
680
681
682
683
684
685
686
687
688
689
690
691
692
693
694
695
696
697
698
699
700
701
702
703
704
705
706
707
708
709
710
711
712
713
714
715
716
717
718
719
720
721
722
723
724
725
726
727
728
729
730
731
732
733
734
735
736
737
738
739
740
741
742
743
744
745
746
747
748
749
750
751
752
753
754
755
756
757
758
759
760
761
762
763
764
765
766
767
768
769
770
771
772
773
774
775
776
777
778
#!/usr/bin/env python3
import cv2 #for importing opencv
import numpy as np #useful in the array operations
import roslib #roslib is an internal library to write tools
import sys #module to handle runtime environment
import rospy #for use of pytho with ROS
from geometry_msgs.msg import Twist
from sensor_msgs.msg import Image #importing the image type sensor message
from sensor_msgs.msg import LaserScan
from nav_msgs.msg import Odometry
from tf.transformations import euler_from_quaternion
from cv_bridge import CvBridge, CvBridgeError #for importing CvBridge used to convert the ROS format images to opencv format
import tf2_ros #imporing tf2
import geometry_msgs.msg #for geometry messages
import tf_conversions #importing tf_conversions
import moveit_commander #moveit commander for motion planning
import moveit_msgs.msg #moveit messages
import actionlib #for importing for providing standard interface e.g. for returning point cloud
import math #for importing the math library
######## COLOR THRESHOLDING AND FILTERING ########
'''
h_min ----> Min value of Hue
s_min ----> Min value of Saturation
v_min ----> Min value of Value
h_max ----> Max value of Hue
s_max ----> Max value of Saturation
v_max ----> Max value of Value
'''
tomatoColor = [[0, 37, 117, 0, 250, 255]] #Defining the HSV color space of red tomatoes with format [h_min, s_min, v_min, h_max, s_max, v_max]
###################################################
### GLOBAL VARIABLE ####
imgContour = None #imgContour is declared of the type None and it will hold the final image
depth_image = np.empty((480,640),float) #this is a 2D array that hold the depth value of every pixel of the frame
midX = 0 #this is the middle value of the entire span of the contour in the x - axis
midY = 0 #this is the middle value of the entire span of the contour in the y - axis
linear_vel = 0.0
angular_vel = 0.0
regions = {
'bright': 0.0 ,
'fright': 0.0 ,
'front': 0.0 ,
'fleft': 0.0 ,
'bleft': 0.0 ,
}
flag1 = True
flag2 = False
ArucoId = -1
pose = [0.0] * 4 #initializing the pose variable
flagDetect = False
flagExecution = True
# Flag Variables to handle the stage of motion of the agribot
flag3 = True
flag4 = False
flag5 = False
flag6 = False
flag7 = False
flag8 = False
flag9 = False
flag10 = False
flag11 = False
flag12 = False
flag13 = False
flag14 = False
flag15 = False
flag16 = False
#######################
class Ur5Moveit:
# Constructor
def __init__(self):
self._planning_group = "arm" #for defining arm as the planning group as arm for moving the arm
self._commander = moveit_commander.roscpp_initialize(sys.argv) #initializing the moveit commander
self._robot = moveit_commander.RobotCommander()
self._scene = moveit_commander.PlanningSceneInterface()
self._group = moveit_commander.MoveGroupCommander(self._planning_group) #setting the planning group
self._hand_group = moveit_commander.MoveGroupCommander("gripper") #this is to initialize a new group called the _hand_group so that the opening and closing of the gripper can be controlled
self._display_trajectory_publisher = rospy.Publisher(
'/move_group/display_planned_path', moveit_msgs.msg.DisplayTrajectory, queue_size=1)
self._exectute_trajectory_client = actionlib.SimpleActionClient(
'execute_trajectory', moveit_msgs.msg.ExecuteTrajectoryAction)
self._exectute_trajectory_client.wait_for_server()
self._planning_frame = self._group.get_planning_frame()
self._eef_link = self._group.get_end_effector_link()
self._group_names = self._robot.get_group_names()
self._box_name = ''
# Current State of the Robot is needed to add box to planning scene
self._curr_state = self._robot.get_current_state()
rospy.loginfo(
'\033[94m' + "Planning Group: {}".format(self._planning_frame) + '\033[0m')
rospy.loginfo(
'\033[94m' + "End Effector Link: {}".format(self._eef_link) + '\033[0m')
rospy.loginfo(
'\033[94m' + "Group Names: {}".format(self._group_names) + '\033[0m')
rospy.loginfo('\033[94m' + " >>> Ur5Moveit init done." + '\033[0m')
def gripper_robot(self,state): #this function is used to control the gripper and it accepts the state variable for setting the state of the gripper
if state == 1: #state 1 means that the gripper will be in the closed state
self._hand_group.set_named_target("close") #this is to set the target to a pre-defined pose of the gripper as close
plan2 = self._hand_group.go() #this line plans and executes the instructions given by moveit
elif state == 0: #state 0 means that the gripper will be in the open state
self._hand_group.set_named_target("open") #this sets the state to open
plan2 = self._hand_group.go()
def arm_robot(self,state): #this function is for taking the arm to predefined pose
if state == 1: #When the state of the arm is set to 1 then the arm goes to the Drop_Pose
self._group.set_named_target("Drop_Pose") #Drop_Pose has been designed to drop the picked up tomatoes effectively in the bucket
plan3 = self._group.go() #For planning and execution of the Drop_Pose ---> go() function handles both the work
elif state == 0: #State 0 is for the Plant_Perception pose ---> using this pose the TF of all the tomatoes of the plant is collected at time 0
self._group.set_named_target("Plant_Perception")
plan3 = self._group.go()
def go_to_pose(self, arg_pose): #this function takes the arm to a specific pose
global flagExecution
pose_values = self._group.get_current_pose().pose #for displaying the current pose of the arm
rospy.loginfo('\033[94m' + ">>> Current Pose:" + '\033[0m')
rospy.loginfo(pose_values)
self._group.set_pose_target(arg_pose) #for setting the target pose of the arm
flag_plan = self._group.go(wait=True) # wait=False for Async Move #for taking the arm to the planned pose
pose_values = self._group.get_current_pose().pose
rospy.loginfo('\033[94m' + ">>> Final Pose:" + '\033[0m')
rospy.loginfo(pose_values)
list_joint_values = self._group.get_current_joint_values() #for getting the current joint values of the joints
rospy.loginfo('\033[94m' + ">>> Final Joint Values:" + '\033[0m')
rospy.loginfo(list_joint_values)
if (flag_plan == True): #this flag is used to display whether the planning and the execution was successful
flagExecution = True
rospy.loginfo(
'\033[94m' + ">>> go_to_pose() Success" + '\033[0m')
else:
flagExecution = False
rospy.logerr(
'\033[94m' + ">>> go_to_pose() Failed. Solution for Pose not Found." + '\033[0m')
return flag_plan
# Destructor
def __del__(self):
moveit_commander.roscpp_shutdown() #for shutting down the moveit commander
rospy.loginfo(
'\033[94m' + "Object of class Ur5Moveit Deleted." + '\033[0m')
def callback_depth(data_depth): #this callback is for accessing the depth image
global depth_image #this is to update the value in the global depth_image variable
try:
bridgeDepth = CvBridge() #for creating the bridge object
depth_image = bridgeDepth.imgmsg_to_cv2(data_depth, "32FC1") #for converting the depth image into depth matrix if 32FC1 encoding ----> each pixel value is stored as one channel floating point with single precision
except CvBridgeError as e: #for handling errors
print(e)
#$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$
def callback_Aruco(data):
global ArucoId
try:
bridge = CvBridge()
frame = bridge.imgmsg_to_cv2(data, "bgr8")
frame = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)
# load the dictionary that was used to generate the markers
dictionary = cv2.aruco.Dictionary_get(cv2.aruco.DICT_7X7_1000)
# initializing the detector parameters with default values
parameters = cv2.aruco.DetectorParameters_create()
# detect the markers in the frame
corners, ids, rejectedCandidates = cv2.aruco.detectMarkers(frame, dictionary, parameters=parameters)
if len(corners) > 0:
# Flatten the ArUco IDs list
ids = ids.flatten()
# loop over the detected ArUCo corners
for (markerCorner, markerID) in zip(corners, ids):
ArucoId = str(markerID)
# rospy.loginfo(int(markerID))
except CvBridgeError as e:
print(e)
#$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$
def getContours(img): #this function is used to draw contours in their appropriate places
global imgContour
global midX
global midY
global depth_image
global flagDetect
depth = 0 #initial value of depth is taken to be 0
cx = 320.5 #the mid point of the x-axis is 320.5
cy = 240.5 #the mid point of the y-axis is 240.5
fx = 554.387 #the focal length for x is 554.387
fy = 554.387 #the focal length for y is 554.387
contours, hierarchy = cv2.findContours(img, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_NONE) #this is for getting all the contours that can be formed on the image
x, y, w, h = 0, 0, 0, 0 #x, y, w, h are for getting the bounding rectangle dimensions for the drawing the contour
midX = 0 #mid of x is set to 0
midY = 0 #mid of y is set to 0
counter = 0 #this counter is used to label the child frame ------> that means it's value is used to give unique label to every contour and hence giving unique value to every tomato
for cnt in contours: #for loop is for iterating over all the contours that needs to be drawn
area = cv2.contourArea(cnt) #this function is used to return the area of every contour that can be drawn on the image ---> that means every red tomato visible currently
if area > 600: #this is for filtering the noise ---> that means if the area is >10 then it means we are looking at something substantial and we need to draw contour on that and broadcast the TF for that
flagDetect = True
peri = cv2.arcLength(cnt, True) #this is to get the perimeter of the contour
approx = cv2.approxPolyDP(cnt, 0.02 * peri, True) #0.02 is a factor ---> it can be adjusted for better detection
x, y, w, h = cv2.boundingRect(approx) #this returns and stores the value of x, y, w, h
cv2.circle(imgContour, (x+(w // 2), y+(h // 2)), max(h // 2, w // 2), (255, 0, 0), 2) #this is the circle drawing function that draws image on imgContour with center x+(w/2) and y+(h/2) the next parameter is to get the radius of the circle that is the max of the entire spread
cv2.circle(imgContour, (x+(w // 2), y+(h // 2)), 1, (255, 0, 0), -1) #the (255, 0, 0) ----> is in the BGR format so max of blue and 0 values of red and green makes the circle blue in colour
midX = x + w // 2 #this is the mid of the circle
midY = y + h // 2 #this is the mid of the circle
if midX != 0 and midY != 0 and midX <= 640 and midY <= 480: #as the frame of the output image is 640 x 480 pixels so this is to prevent accessing values that are out of bound by any case
depth = depth_image[midY][midX] #the depth value is registered for the point (midX, midY)
# rospy.loginfo(depth)
X = depth*((midX-cx)/fx) #conversion to the world coordinates
Y = depth*((midY-cy)/fy) #conversion to the world coordinates
Z = depth #conversion to the world coordinates
br = tf2_ros.TransformBroadcaster() #setting up the TF2 broadcaster
t = geometry_msgs.msg.TransformStamped() #broadcasting is stamped for every object
t.header.stamp = rospy.Time.now() #the head stamp is the current time that we use this makes it unique
t.header.frame_id = "camera_link2" #as the camera on the arm has the camera_link2 so we are using that
t.child_frame_id = "obj"+str(counter) #this is the naming convention where the is given as obj + value of the counter -----> obj1, obj2 etc.
cv2.putText(imgContour, t.child_frame_id, #this function is used to put text on the imgContour, the text is the child_frame_id, at the point (midX, midY)
(midX, midY), cv2.FONT_HERSHEY_SIMPLEX, #cv2.FONT_HERSHEY_SIMPLEX ----> is the font used to label the image
0.5, (255, 0, ), 2) #0.5 is the font scale, (255, 0, 0) is for giving blue colour and 2 is the thickness
depth = depth_image[midY][midX-w//2]
rospy.loginfo(depth)
if depth <= 0.75:
t.transform.translation.x = Z #this is for transforming the world coordinates to the camera frame that is on the arm
t.transform.translation.y = -X #this is for transforming the world coordinates to the camera frame that is on the arm
t.transform.translation.z = -Y #this is for transforming the world coordinates to the camera frame that is on the arm
q = tf_conversions.transformations.quaternion_from_euler(0, 0, 0) #for conversion euler to quaternion
t.transform.rotation.x = q[0]
t.transform.rotation.y = q[1]
t.transform.rotation.z = q[2]
t.transform.rotation.w = q[3]
br.sendTransform(t) #for broadcating the TF
counter = counter+1 #as this loop loops through the number of times equal to the number of unique contours that can be drawn then if the counter is incremented same number of times it will have unique value starting from 1 for every contour
else:
flagDetect = False
cv2.imshow("Result",imgContour) #this is for displaying the final image with all the contours on it
cv2.waitKey(1) #this is for adding a 1 millisecond delay
return x + w // 2, y + h // 2 # for mid of the box is returned using this
def callback(data): #this callback is for color detection
global imgContour
try:
bridge = CvBridge()
frame = bridge.imgmsg_to_cv2(data, "bgr8") #the ROS format image is converted to bgr8 format -----> that is the format that is used in opencv
imgContour = frame.copy() #this function is used to copy the frame image to the imgContour
imgHSV = cv2.cvtColor(frame, cv2.COLOR_BGR2HSV) #this is used to convert the image to HSV image
x, y = 0, 0
for color in tomatoColor: #this for loop goes through the tomatoColor list and gets the h_min, h_max, s_min, s_max, v_min, v_max value for the red colour
lower = np.array(color[0:3]) #this is for forming the lower range of the HSV colour space ----> i.e. h_min, s_min, v_min
upper = np.array(color[3:6]) #this is for forming the upper range of the HSV colour space ----> i.e. h_max, s_max, v_max
mask = cv2.inRange(imgHSV, lower, upper) #for masking the image
x, y = getContours(mask)
except CvBridgeError as e: #for handling the error
print(e)
def moveStraight():
global linear_vel
global angular_vel
global flag3
global flag4
global flag5
global flag6
global flag7
global flag8
global flag9
global flag10
global flag11
global flag12
global flag13
global flag14
global flag15
global flag16
global pose
global flagDetect
if flag3 is True:
if pose[1] >= -0.56:
flag3 = False
flag4 = True
else:
linear_vel = 1.0
angular_vel = 0.0
elif flag4 is True:
if pose[1] >= 7.22:
flag4 = False
flag5 = True
else:
if flagDetect is True:
linear_vel = 0.1
angular_vel = 0.0
Pcontroller()
else:
linear_vel = 0.5
angular_vel = 0.0
Pcontroller()
elif flag5 is True:
if pose[2] <= 0:
flag5 = False
flag6 = True
else:
if pose[1] <=8.27:
linear_vel = 1.0
angular_vel = 0.0
elif pose[2] > 0:
linear_vel = 0.0
angular_vel = 1.7
elif flag6 is True:
if pose[0] < -0.647 and pose[2] > -1.6 and pose[2] < -1.5:
flag6 = False
flag7 = True
else:
if pose[0] >= -0.647:
linear_vel = 1.0
angular_vel = 0.0
elif pose[2] < -1.6 or pose[2] > -1.5:
linear_vel = 0.0
angular_vel = 1.7
elif flag7 is True:
if pose[1] < 7.7:
flag7 = False
flag8 = True
else:
linear_vel = 1.0
angular_vel = 0.0
elif flag8 is True:
if pose[1] < 0.0:
flag8 = False
flag9 = True
else:
if flagDetect is True:
linear_vel = 0.1
angular_vel = 0.0
Pcontroller()
else:
linear_vel = 0.5
angular_vel = 0.0
Pcontroller()
elif flag9 is True:
if pose[1] < -1.3 and pose[2] > 0:
flag9 = False
flag10 = True
else:
if pose[1] > -1.3 and pose[2] < -1.0:
linear_vel = 1.0
angular_vel = 0.0
elif pose[2] < 0:
linear_vel = 0.0
angular_vel = 1.7
elif flag10 is True:
if pose[0] > 2.3:
flag10 = False
flag11 = True
else:
if pose[0] <= 2.3:
linear_vel = 1.0
angular_vel = 0.0
elif flag11 is True:
if pose[2] > 1.56:
flag11 = False
flag12 = True
else:
linear_vel = 0.0
angular_vel = 1.7
elif flag12 is True:
if pose[1] >= 7.22:
flag12 = False
flag13 = True
else:
if pose[1] < -0.56:
linear_vel = 1.0
angular_vel = 0.0
elif flagDetect is True:
linear_vel = 0.1
angular_vel = 0.0
Pcontroller()
else:
linear_vel = 0.5
angular_vel = 0.0
Pcontroller()
elif flag13 is True:
if pose[2] < 0:
flag13 = False
flag14 = True
else:
if pose[1] <=8.27:
linear_vel = 1.0
angular_vel = 0.0
elif pose[2] > 0:
linear_vel = 0.0
angular_vel = 1.7
elif flag14 is True:
if pose[2] > -1.6 and pose[2] < -1.4:
flag14 = False
flag15 = True
else:
if pose[0] >= 0.97:
linear_vel = 1.0
angular_vel = 0.0
elif pose[2] > -1.4 or pose[2] < -1.6:
linear_vel = 0.0
angular_vel = 1.7
elif flag15 is True:
if pose[1] < 7.7:
flag15 = False
flag16 = True
else:
linear_vel = 1.0
angular_vel = 0.0
elif flag16 is True:
if pose[1] < 0.0:
flag16 = False
else:
if flagDetect is True:
linear_vel = 0.1
angular_vel = 0.0
Pcontroller()
else:
linear_vel = 0.5
angular_vel = 0.0
Pcontroller()
else:
stopMotion()
def stopMotion():
global linear_vel
global angular_vel
linear_vel = 0.0
angular_vel = 0.0
# def laser_callback(msg): #callback function for laser
# global regions
# regions = {
# 'bright': min(min(msg.ranges[0:20]),3.2) ,
# 'fright': min(min(msg.ranges[40:60]),3.2) ,
# 'front': min(min(msg.ranges[80:100]),3.2) ,
# 'fleft': min(min(msg.ranges[120:140]),3.2) ,
# 'bleft': min(min(msg.ranges[160:180]),3.2) ,
# }
# rospy.loginfo("#########################################")
# rospy.loginfo(regions['bright'])
# rospy.loginfo(regions['fright'])
# rospy.loginfo(regions['front'])
# rospy.loginfo(regions['fleft'])
# rospy.loginfo(regions['bleft'])
# rospy.loginfo("$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$")
def odom_callback(data): #callback funtion for odom
global pose
x = data.pose.pose.orientation.x
y = data.pose.pose.orientation.y
z = data.pose.pose.orientation.z
w = data.pose.pose.orientation.w
pose = [data.pose.pose.position.x, data.pose.pose.position.y, euler_from_quaternion([x,y,z,w])[2]]
# rospy.loginfo("############################")
# rospy.loginfo(pose[0])
# rospy.loginfo(pose[1])
# rospy.loginfo(pose[2])
# rospy.loginfo("$$$$$$$$$$$$$$$$$$$$$$$$$$$$$")
def laser_callback(msg): #callback function for laser
global regions
regions = {
'bright': min(min(msg.ranges[0:40]),3.2) ,
'fright': min(min(msg.ranges[80:120]),3.2) ,
'front': min(min(msg.ranges[160:200]),3.2) ,
'fleft': min(min(msg.ranges[240:280]),3.2) ,
'bleft': min(min(msg.ranges[680:720]),3.2) ,
}
# rospy.loginfo("#########################################")
# rospy.loginfo(regions['bright'])
# rospy.loginfo(regions['fright'])
# rospy.loginfo(regions['front'])
# rospy.loginfo(regions['fleft'])
# rospy.loginfo(regions['bleft'])
# rospy.loginfo("$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$")
# def Pcontroller(stage): #function for the proportional controller --> this has been used so that the bot is able to correct its path if it gets deflected form its path
# global angular_vel
# global regions
# kp = -1 #correction factor
# if stage == 1: #correction for stage 1
# angular_vel = kp * (regions['bleft']-0.95) #it reads bleft laser values and 0.82 is the expected value and the difference gives the deflection and on the bases of that it returns angular velocity
# elif stage == 3: #correction for stage 3
# angular_vel = kp * (regions['bleft']-2.5)
# elif stage == 5: #correction for stage 5
# angular_vel = kp* (regions['bleft']-2.3)
def Pcontroller(): #function for the proportional controller --> this has been used so that the bot is able to correct its path if it gets deflected form its path
global angular_vel
global regions
kp = 10
angular_vel = kp * (regions['bleft']-0.65)
def main(args): #this is the main method
rospy.init_node('object_detection_manipulation', anonymous=True) #for initializing the node with name object_detection
tfBuffer = tf2_ros.Buffer() #for setting the buffer
listener = tf2_ros.TransformListener(tfBuffer) #for using the TF listener
ur5 = Ur5Moveit() #for setting up the ur5 object of Ur5Moveit class
depth_sub = rospy.Subscriber("/camera/depth/image_raw2", Image, callback_depth) #for setting the depth image subscriber
image_sub = rospy.Subscriber("/camera/color/image_raw2", Image, callback) #for setting the color image subscriber
image_sub_aruco = rospy.Subscriber("/ebot/camera1/image_raw", Image, callback_Aruco)
rospy.Subscriber('/ebot/laser/scan',LaserScan, laser_callback) #setting up the subscriber
rospy.Subscriber('/odom', Odometry, odom_callback) #setting up the subscriber
pub = rospy.Publisher('/cmd_vel',Twist,queue_size=10) #setting up the publisher
ur5_pose_1 = geometry_msgs.msg.Pose() #for using the pose
velocity_msg = Twist()
velocity_msg.linear.x = 0
velocity_msg.linear.y = 0
pub.publish(velocity_msg)
global linear_vel
global angular_vel
global flag1
global flag2
global ArucoId
global flagDetect
global flagExecution
while not rospy.is_shutdown(): #while loop
# ####FOR TAKING THE ARM TO THE PLANT PERCEPTION POSE########
if flag1 is True:
flag1 = False
ur5.arm_robot(0)
rospy.sleep(0.1)
flag2 = True
##########################################################
elif flag2 is True:
try: #try to handle exceptions
# trans = tfBuffer.lookup_transform('base_link', 'obj1', rospy.Time(0)) #for getting the TF of obj1 ---> tomato in the scene
trans1 = tfBuffer.lookup_transform('base_link', 'obj0', rospy.Time(0)) #for getting the TF of obj0 ----> tomato in the scene
except (tf2_ros.LookupException, tf2_ros.ConnectivityException, tf2_ros.ExtrapolationException):
moveStraight()
# Pcontroller()
velocity_msg.linear.x = linear_vel
velocity_msg.angular.z = angular_vel
pub.publish(velocity_msg) #publishing the value to the bot
ArucoId = '-1'
continue #to continue with the next iteration if we get some exception
# stopMotion()
# velocity_msg.linear.x = linear_vel
# velocity_msg.angular.z = angular_vel
# pub.publish(velocity_msg)
try:
# transAruco = tfBuffer.lookup_transform('sjcam_link', 'aruco15', rospy.Time(0))
transAruco = tfBuffer.lookup_transform('sjcam_link', 'aruco'+ArucoId, rospy.Time(0))
except (tf2_ros.LookupException, tf2_ros.ConnectivityException, tf2_ros.ExtrapolationException):
velocity_msg.linear.x = 0.1
velocity_msg.angular.z = 0.0
pub.publish(velocity_msg)
continue
# transAruco = tfBuffer.lookup_transform('sjcam_link', 'aruco4', rospy.Time(0))
while transAruco.transform.translation.y > 0.01 or transAruco.transform.translation.y < -0.01:
if transAruco.transform.translation.y > 0.01:
velocity_msg.linear.x = -0.1
velocity_msg.angular.z = 0.0
pub.publish(velocity_msg)
elif transAruco.transform.translation.y < -0.01:
velocity_msg.linear.x = 0.1
velocity_msg.angular.z = 0.0
pub.publish(velocity_msg)
# transAruco = tfBuffer.lookup_transform('sjcam_link', 'aruco15', rospy.Time(0))
transAruco = tfBuffer.lookup_transform('sjcam_link', 'aruco'+ArucoId, rospy.Time(0))
stopMotion()
velocity_msg.linear.x = linear_vel
velocity_msg.angular.z = angular_vel
pub.publish(velocity_msg)
trans1 = tfBuffer.lookup_transform('base_link', 'obj0', rospy.Time(0))
# velocity_msg.linear.x = linear_vel
# velocity_msg.angular.z = angular_vel
# pub.publish(velocity_msg) #publishing the value to the bot
#************************************************************VARIABLE DEFINITIONS******************************************************#
# tomato1_Position_X_Transform = trans.transform.translation.x #for storing the x value of transform for the tomato 1
# tomato1_Position_Y_Transform = trans.transform.translation.y #for storing the y value of transform for the tomato 1
# tomato1_Position_Z_Transform = trans.transform.translation.z #for storing the z value of transform for the tomato 1
tomato2_Position_X_Transform = trans1.transform.translation.x #for storing the x value of transform for the tomato 2
tomato2_Position_Y_Transform = trans1.transform.translation.y #for storing the y value of transform for the tomato 2
tomato2_Position_Z_Transform = trans1.transform.translation.z #for storing the z value of transform for the tomato 2
base_link_Position_X = 0.16 #this is the x position of the base_link ----> our reference point
base_link_Position_Y = 0 #this is the y position of the base_link ----> our reference point
base_link_Position_Z = 0.53 #this is the z position of the base_link ----> our reference point
#######OFFSETS FOR INCORPORTING THE OFFSETS IN CALCULATION OF THE ACTUAL POSE AND THE POSE OF THE END EFFECTOR##########
'''These offsets are calculated so that the gripper exactly reached the tomatoes and does not go to some other location because the actual perception is from either the camera or
the depth camera and all the calculations of distance ----> in this case depth is from the depth camera ----> but the gripper is not exactly at the location of the depth camera
so to remove the error we are calculate these offsets'''
offset_x_1 = 0.16
offset_y_1 = 0.4
offset_z_1 = 0.02
offset_x_2 = 0.1
offset_y_2 = 0.25
########################################################################################################################
####TO SET THE ORIENTATION OF THE GRIPPER######
ur5_pose_1.orientation.x = -0.20426466049594807
ur5_pose_1.orientation.y = 0.9759094471343439
ur5_pose_1.orientation.z = -0.07502044797426752
ur5_pose_1.orientation.w = 0.01576806431223178
###############################################
#*****************************************************************************************************************************************#
###############################ACTUATION FOR THE FIRST TOMATO#######################################
'''For calculating the pose of the tomatoes the value of the position of the base_link is transformed using the trnsformation matrix and after considering the offsets'''
# ur5_pose_1.position.x = base_link_Position_X + tomato1_Position_X_Transform + offset_x_1 #x pose of the tomato
# ur5_pose_1.position.y = base_link_Position_Y + tomato1_Position_Y_Transform - offset_y_1 #y pose of the tomato
# ur5_pose_1.position.z = base_link_Position_Z + tomato1_Position_Z_Transform #z pose of the tomato
# ur5.go_to_pose(ur5_pose_1) #for taking the arm to a location close to the tomato but not exactly at the pose of the tomato to make the actuation precise
# rospy.sleep(0.1) #to add time delay
# ur5_pose_1.position.x = base_link_Position_X + tomato1_Position_X_Transform + offset_x_2
# ur5_pose_1.position.y = base_link_Position_Y + tomato1_Position_Y_Transform - offset_y_2
# ur5_pose_1.position.z = base_link_Position_Z + tomato1_Position_Z_Transform + offset_z_1
# ur5.go_to_pose(ur5_pose_1) #the actuation is done in two steps so that the actuation is precise
# rospy.sleep(0.1)
# ur5.gripper_robot(1) #state 1 of the gripper is to close the gripper
# rospy.sleep(0.1)
# ur5.arm_robot(1) #to bring the arm to the bucket
# rospy.sleep(0.1)
# ur5.gripper_robot(0) #to open the gripper
# rospy.sleep(0.1)
######################################################################################################
###############################ACTUATION FOR THE SECOND TOMATO#######################################
ur5_pose_1.position.x = base_link_Position_X + tomato2_Position_X_Transform + offset_x_1
ur5_pose_1.position.y = base_link_Position_Y + tomato2_Position_Y_Transform - offset_y_1
ur5_pose_1.position.z = base_link_Position_Z + tomato2_Position_Z_Transform
ur5.go_to_pose(ur5_pose_1)
if flagExecution is False:
continue
rospy.sleep(0.1)
ur5_pose_1.position.x = base_link_Position_X + tomato2_Position_X_Transform + offset_x_2
ur5_pose_1.position.y = base_link_Position_Y + tomato2_Position_Y_Transform - offset_y_2
ur5_pose_1.position.z = base_link_Position_Z + tomato2_Position_Z_Transform + offset_z_1
ur5.go_to_pose(ur5_pose_1) #actuation is done in 2 steps to make it precise
rospy.sleep(0.1)
ur5.gripper_robot(1)
rospy.sleep(0.1)
ur5.arm_robot(1)
rospy.sleep(0.1)
ur5.gripper_robot(0)
rospy.sleep(0.1)
######################################################################################################
flag1 = True
flag2 = False
# break #to break out of the loop
del ur5 #to delete the ur5 object
if __name__ == '__main__':
main(sys.argv) #for calling the main method