diff --git a/jsk_pcl_ros/scripts/color_histogram_visualizer.py b/jsk_pcl_ros/scripts/color_histogram_visualizer.py index 3449b46cd1..6882492f01 100755 --- a/jsk_pcl_ros/scripts/color_histogram_visualizer.py +++ b/jsk_pcl_ros/scripts/color_histogram_visualizer.py @@ -111,7 +111,7 @@ def plot_hist_hue(self, hist): plt.subplot(gs[1], facecolor='silver') else: # matplotlib version < 2.0.0 plt.subplot(gs[1], axisbg='silver') - bars = plt.bar(range(2), hist[-2:], label=["white", "black"], + bars = plt.bar(list(range(2)), hist[-2:], label=["white", "black"], width=1.0, linewidth=2.0) bars[0].set_facecolor((1.0, 1.0, 1.0, 1.0)) bars[1].set_facecolor((0.0, 0.0, 0.0, 1.0)) diff --git a/jsk_pcl_ros/scripts/draw_3d_circle.py b/jsk_pcl_ros/scripts/draw_3d_circle.py index f86559892f..b36860ad12 100755 --- a/jsk_pcl_ros/scripts/draw_3d_circle.py +++ b/jsk_pcl_ros/scripts/draw_3d_circle.py @@ -43,7 +43,7 @@ def publish(self): point_array = PointArrayStamped() point_array.header.frame_id = self.frame_id point_array.header.stamp = now - for i in range(self.RESOLUTION + 1) + [0]: + for i in list(range(self.RESOLUTION + 1)) + [0]: theta = 2 * math.pi / self.RESOLUTION * i x = self.radius * math.cos(theta) y = self.radius * math.sin(theta) diff --git a/jsk_pcl_ros/scripts/extract_top_polygon_likelihood.py b/jsk_pcl_ros/scripts/extract_top_polygon_likelihood.py index e4761a2125..857daadd39 100755 --- a/jsk_pcl_ros/scripts/extract_top_polygon_likelihood.py +++ b/jsk_pcl_ros/scripts/extract_top_polygon_likelihood.py @@ -29,7 +29,7 @@ def unsubscribe(self): def callback(self, msg, msg_coef): if len(msg.polygons) > 0: #self._pub.publish(msg.histograms[0]) - max_index = max(xrange(len(msg.polygons)), key=lambda i: msg.likelihood[i]) + max_index = max(list(range(len(msg.polygons))), key=lambda i: msg.likelihood[i]) res = PolygonArray() res.header = msg.header res.polygons = [msg.polygons[max_index]] diff --git a/jsk_perception/node_scripts/draw_classification_result.py b/jsk_perception/node_scripts/draw_classification_result.py index 4175fe81aa..05ed16ec72 100755 --- a/jsk_perception/node_scripts/draw_classification_result.py +++ b/jsk_perception/node_scripts/draw_classification_result.py @@ -39,7 +39,7 @@ def _draw(self, cls_msg, imgmsg): rgb = bridge.imgmsg_to_cv2(imgmsg, desired_encoding='rgb8') n_results = len(cls_msg.labels) - for i in xrange(n_results): + for i in range(n_results): label = cls_msg.labels[i] color = self.cmap[label % len(self.cmap)] * 255 legend_size = int(rgb.shape[0] * 0.1) diff --git a/jsk_perception/node_scripts/rect_array_to_image_marker.py b/jsk_perception/node_scripts/rect_array_to_image_marker.py index e4541b50b5..048edd99e4 100755 --- a/jsk_perception/node_scripts/rect_array_to_image_marker.py +++ b/jsk_perception/node_scripts/rect_array_to_image_marker.py @@ -24,7 +24,7 @@ def convert(self, msg): marker.type = ImageMarker2.LINE_LIST n_colors = min(len(msg.rects), 256) cmap = jsk_recognition_utils.color.labelcolormap(n_colors) - for rect, rect_i in zip(msg.rects, range(n_colors)): + for rect, rect_i in zip(msg.rects, list(range(n_colors))): points = [(rect.x, rect.y), (rect.x, rect.y + rect.height), (rect.x + rect.width, rect.y + rect.height), diff --git a/jsk_perception/node_scripts/ssd_object_detector.py b/jsk_perception/node_scripts/ssd_object_detector.py index 0ceec4bd2b..e95d189f67 100755 --- a/jsk_perception/node_scripts/ssd_object_detector.py +++ b/jsk_perception/node_scripts/ssd_object_detector.py @@ -174,7 +174,7 @@ def image_cb(self, msg): xmin = max(0, int(np.floor(bbox[1]))) ymax = min(H, int(np.ceil(bbox[2]))) xmax = min(W, int(np.ceil(bbox[3]))) - indices = [range(W*y+xmin, W*y+xmax) for y in range(ymin, ymax)] + indices = [list(range(W*y+xmin, W*y+xmax)) for y in range(ymin, ymax)] indices = np.array(indices, dtype=np.int32).flatten() indices_msg = PointIndices(header=msg.header, indices=indices) cluster_indices_msg.cluster_indices.append(indices_msg) diff --git a/jsk_perception/scripts/create_db_for_feature_based_object_recognition.py b/jsk_perception/scripts/create_db_for_feature_based_object_recognition.py index 2112037d9a..0fad240f1d 100755 --- a/jsk_perception/scripts/create_db_for_feature_based_object_recognition.py +++ b/jsk_perception/scripts/create_db_for_feature_based_object_recognition.py @@ -111,7 +111,7 @@ def main(): knn.fit(X, y) y_pred = knn.predict(X) # validation: must be all 1.0 - print(classification_report(y, y_pred, labels=range(len(target_names)), + print(classification_report(y, y_pred, labels=list(range(len(target_names))), target_names=target_names)) diff --git a/jsk_recognition_utils/node_scripts/rect_array_to_cluster_point_indices.py b/jsk_recognition_utils/node_scripts/rect_array_to_cluster_point_indices.py index 435fbe6927..170084b0fa 100755 --- a/jsk_recognition_utils/node_scripts/rect_array_to_cluster_point_indices.py +++ b/jsk_recognition_utils/node_scripts/rect_array_to_cluster_point_indices.py @@ -57,7 +57,7 @@ def _convert(self, rects_msg, img_height=None, img_width=None): xmin = max(0, int(np.floor(rect.x))) ymax = min(H, int(np.ceil(rect.y + rect.height))) xmax = min(W, int(np.ceil(rect.x + rect.width))) - indices = [range(W*y+xmin, W*y+xmax) for y in range(ymin, ymax)] + indices = [list(range(W*y+xmin, W*y+xmax)) for y in range(ymin, ymax)] indices_msg.indices = np.array(indices, dtype=np.int32).flatten() cpi_msg.cluster_indices.append(indices_msg) self.pub.publish(cpi_msg) diff --git a/jsk_recognition_utils/python/jsk_recognition_utils/mask.py b/jsk_recognition_utils/python/jsk_recognition_utils/mask.py index 86f11d585e..73d5f71c03 100644 --- a/jsk_recognition_utils/python/jsk_recognition_utils/mask.py +++ b/jsk_recognition_utils/python/jsk_recognition_utils/mask.py @@ -13,7 +13,7 @@ def bounding_rect_of_mask(img, mask): def descent_closing(mask, init_selem, n_times): S = init_selem.shape - for i in xrange(n_times): + for i in range(n_times): selem = np.ones((S[0] * (n_times - i), S[1] * (n_times - i))) mask = binary_closing(mask, selem=selem) return mask diff --git a/posedetectiondb/src/GatherDetectionResults.py b/posedetectiondb/src/GatherDetectionResults.py index cafd02f311..8ee62b1994 100755 --- a/posedetectiondb/src/GatherDetectionResults.py +++ b/posedetectiondb/src/GatherDetectionResults.py @@ -78,7 +78,7 @@ def UniformlySampleSpace(self,bandwidth,bandthresh,delta=0.02): kdtree = pyANN.KDTree(self.measurements/bandwidth) sampledists = zeros(samplepoints.shape[0]) goodpoints = [] - for i in xrange(samplepoints.shape[0]): + for i in range(samplepoints.shape[0]): neighs,dists,kball = kdtree.kFRSearchArray(samplepoints[i:(i+1),:],5.0**2,32,0.0001) sampledists[i] = sum(exp(-dists[neighs>=0])) uniformpoints = samplepoints[sampledists>bandthresh,:]*bandwidth @@ -90,7 +90,7 @@ def Prune(self,rawposes, nsize, thresh2, neighsize,giveupiters=100): """rawposes is Nx7""" iter = 1 poses = array(rawposes) - indices = range(poses.shape[0]) + indices = list(range(poses.shape[0])) N = poses.shape[0] nochange=0 while N > nsize: diff --git a/posedetectiondb/src/ObjectProjection.py b/posedetectiondb/src/ObjectProjection.py index 6a64ff3b0e..b683d0193c 100644 --- a/posedetectiondb/src/ObjectProjection.py +++ b/posedetectiondb/src/ObjectProjection.py @@ -69,7 +69,7 @@ def Compute3DPositionImage(self,Tcamera, KK, imagewidth,imageheight,ROI=None,buf newKK = array(KK) newKK[0,2] -= offset[0] newKK[1,2] -= offset[1] - inds = array(range(width*height)) + inds = array(list(range(width*height))) imagepoints = array((mod(inds,width),floor(inds/width))) camerapoints = transpose(dot(linalg.inv(newKK), r_[imagepoints,ones((1,imagepoints.shape[1]))])) hitindices,hitpositions = self.Get3DPointsFromImageRays(camerapoints, Tcamera) diff --git a/sound_classification/scripts/create_dataset.py b/sound_classification/scripts/create_dataset.py index af74c870c6..e85b194193 100755 --- a/sound_classification/scripts/create_dataset.py +++ b/sound_classification/scripts/create_dataset.py @@ -113,7 +113,7 @@ def split(): # copy train and test data # resize and augment data (multiple args.augment times) image_num_per_class = min(args.number, file_num) - selected_images = random.sample(range(file_num), image_num_per_class) + selected_images = random.sample(list(range(file_num)), image_num_per_class) for i, file_name in enumerate(np.array(file_names)[selected_images]): if file_name.endswith('.png') is not True: continue diff --git a/sound_classification/scripts/draw_classification_result.py b/sound_classification/scripts/draw_classification_result.py index c2cd8b8a46..923446692a 100755 --- a/sound_classification/scripts/draw_classification_result.py +++ b/sound_classification/scripts/draw_classification_result.py @@ -67,7 +67,7 @@ def _draw(self, cls_msg, imgmsg): rgb = bridge.imgmsg_to_cv2(imgmsg, desired_encoding='rgb8') n_results = len(cls_msg.labels) - for i in xrange(n_results): + for i in range(n_results): label = cls_msg.labels[i] color = self.cmap[label % len(self.cmap)] * 255 legend_size = int(rgb.shape[0] * 0.1)