diff --git a/README.md b/README.md
index 33dddfd..a7619fe 100644
--- a/README.md
+++ b/README.md
@@ -77,15 +77,13 @@ map and robot trajctory for the Lidar scan and odometry data in the log file
you can also try the log2png.py script to generate a
a PNG file instead.
-If you have installed OpenCV for Python, you can see a “live” animation
+If you have installed Matplotlib, you can see a “live” animation
by doing
-make cvmovie
+make movie
-There is also a Pyplot version (logdemoplt.py) in the works.
-
You can turn off odometry by setting the USE_ODOMETRY
parameter at the top of the Makefile to 0 (zero). You can turn off
the particle-filter (Monte Carlo position estimation) by commenting-out
diff --git a/examples/Makefile b/examples/Makefile
index 156bccc..30f824d 100644
--- a/examples/Makefile
+++ b/examples/Makefile
@@ -33,8 +33,8 @@ all: log2pgm Log2PGM.class
pltmovie:
./logdemoplt.py exp1 1 9999
-cvmovie:
- ./logdemocv.py exp1 1 9999
+movie:
+ ./logmovie.py exp1 1 9999
pytest:
./log2pgm.py $(DATASET) $(USE_ODOMETRY) $(RANDOM_SEED)
diff --git a/examples/cvslamshow.py b/examples/cvslamshow.py
deleted file mode 100644
index 161533c..0000000
--- a/examples/cvslamshow.py
+++ /dev/null
@@ -1,196 +0,0 @@
-'''
-cvslamshow.py - OpenCV classes for displaying maps and robots in SLAM projects
-
-Copyright (C) 2014 Simon D. Levy
-
-This code is free software: you can redistribute it and/or modify
-it under the terms of the GNU Lesser General Public License as
-published by the Free Software Foundation, either version 3 of the
-License, or (at your option) any later version.
-
-This code is distributed in the hope that it will be useful,
-but WITHOUT ANY WARRANTY without even the implied warranty of
-MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-GNU General Public License for more details.
-
-You should have received a copy of the GNU Lesser General Public License
-along with this code. If not, see .
-'''
-
-# Robot display params
-ROBOT_COLOR_BGR = (0, 0, 255)
-ROBOT_HEIGHT = 16
-ROBOT_WIDTH = 10
-
-# Scan point display params
-SCANPOINT_RADIUS = 1
-SCANPOINT_COLOR_BGR = (0, 255, 0)
-
-# Display params for odometry-based velocity
-SENSOR_V_MAX_MM = 1000
-SENSOR_THETA_MAX_DEG = 20
-SENSOR_BAR_X = 150
-SENSOR_BAR_Y_OFFSET = 3
-SENSOR_BAR_WIDTH = 20
-SENSOR_BAR_MAX_HEIGHT = 200
-SENSOR_TEXT_X = 20
-SENSOR_V_Y = 30
-SENSOR_THETA_Y = 80
-SENSOR_LABEL_COLOR_BGR = (255,0,0)
-SENSOR_POSITIVE_COLOR_BGR = (0,255,0)
-SENSOR_NEGATIVE_COLOR_BGR = (0,0,255)
-
-# Trajectory display params
-TRAJECTORY_COLOR_BGR = (255, 0, 0)
-
-import cv
-
-# Arbitrary font for OpenCV
-FONT_FACE = cv.CV_FONT_HERSHEY_COMPLEX
-
-from math import sin, cos, radians
-
-class SlamShow(object):
-
- def __init__(self, map_size_pixels, map_scale_mm_per_pixel, window_name):
-
- # Store constants for update
- self.map_size_pixels = map_size_pixels
- self.map_scale_mm_per_pixel = map_scale_mm_per_pixel
- self.window_name = window_name
-
- # Create a byte array to display the map with a color overlay
- self.bgrbytes = bytearray(map_size_pixels * map_size_pixels * 3)
-
- # Create an empty OpenCV image to be filled with map bytes
- self.image = cv.CreateImageHeader((map_size_pixels,map_size_pixels), cv.IPL_DEPTH_8U, 3)
-
- # Create an OpenCV window for displaying the map
- cv.NamedWindow(window_name)
-
- # Set up font for displaying velocities
- self.font = cv.InitFont(FONT_FACE, 1, 1)
-
- # Display initial empty image
- cv.SetData(self.image, self.bgrbytes, self.map_size_pixels*3)
- cv.ShowImage(self.window_name, self.image)
-
-
- def displayMap(self, mapbytes):
-
- # Interleave the grayscale map bytes into the color bytes
- self.bgrbytes[0::3] = mapbytes
- self.bgrbytes[1::3] = mapbytes
- self.bgrbytes[2::3] = mapbytes
-
- # Put color bytes into image
- cv.SetData(self.image, self.bgrbytes, self.map_size_pixels*3)
-
-
- def displayRobot(self, (x_mm, y_mm, theta_deg), color=ROBOT_COLOR_BGR, scale=1, line_thickness=1):
-
- # Get a polyline (e.g. triangle) to represent the robot icon
- robot_points = self.robot_polyline(scale)
-
- # Rotate the polyline by the current angle
- robot_points = map(lambda pt: rotate(pt, theta_deg), robot_points)
-
- # Convert the robot position from meters to pixels
- x_pix, y_pix = self.mm2pix(x_mm), self.mm2pix(y_mm)
-
- # Move the polyline to the current robot position
- robot_points = map(lambda pt: (x_pix+pt[0], y_pix+pt[1]), robot_points)
-
- # Add an icon for the robot
- cv.PolyLine(self.image, [robot_points], True, color, line_thickness)
-
- def displayScan(self, scan, offset_mm = (0,0), color=SCANPOINT_COLOR_BGR):
-
- for point in scan:
- cv.Circle(self.image, (self.mm2pix(point[0]+offset_mm[0]), self.mm2pix(point[1]+offset_mm[1])), \
- SCANPOINT_RADIUS, color)
-
-
- def displayVelocities(self, dxy_mm, dtheta_deg):
-
- # Add velocity bars
- self.show_velocity(dxy_mm, SENSOR_V_MAX_MM, ' dXY', SENSOR_V_Y)
- self.show_velocity(dtheta_deg, SENSOR_THETA_MAX_DEG, 'dTheta', SENSOR_THETA_Y)
-
- def displayTrajectory(self, trajectory):
-
- for k in range(1, len(trajectory)):
-
- x1_mm, y1_mm = trajectory[k-1]
- x2_mm, y2_mm = trajectory[k]
-
- cv.Line(self.image,
- (self.mm2pix(x1_mm), self.mm2pix(y1_mm)), \
- (self.mm2pix(x2_mm), self.mm2pix(y2_mm)), \
- TRAJECTORY_COLOR_BGR)
-
- def refresh(self):
-
- # Display image
- cv.ShowImage(self.window_name, self.image)
-
- # Force image display, returning any key hit
- key = cvdisplay()
- return key if key > -1 else None
-
-
- def waitkey(self, action):
-
- print('Hit any key to %s ...' % action)
-
- key = -1
-
- while True:
-
- key = cvdisplay()
- if key > -1:
- break
-
- return key
-
-
- # Puts text in the image to label the velocity display
- def show_velocity(self, value, valspan, label, y):
- cv.PutText(self.image, label+':', (SENSOR_TEXT_X, y), self.font, SENSOR_LABEL_COLOR_BGR)
- bar_x1 = SENSOR_BAR_X + SENSOR_BAR_MAX_HEIGHT
- bar_y1 = y + SENSOR_BAR_Y_OFFSET
- bar_x2 = bar_x1 + int(value / valspan * SENSOR_BAR_MAX_HEIGHT)
- bar_y2 = y - SENSOR_BAR_WIDTH + SENSOR_BAR_Y_OFFSET
- bar_color = SENSOR_NEGATIVE_COLOR_BGR if value < 0 else SENSOR_POSITIVE_COLOR_BGR
- cv.Rectangle(self.image, (bar_x1, bar_y1), (bar_x2, bar_y2), bar_color, cv.CV_FILLED)
-
-
- # Builds an array of points for a polyline representing the robot, pointing
- # rightward and centered at (0,0).
- # Currently builds an isoceles triangle pointing rightward
- def robot_polyline(self, scale):
- xlft = -ROBOT_HEIGHT / 2 * scale
- xrgt = ROBOT_HEIGHT / 2 * scale
- ybot = ROBOT_WIDTH / 2 * scale
- ytop = -ROBOT_HEIGHT / 2 * scale
- return [(xlft,ybot), (xrgt,0), (xlft,ytop)]
-
- # Converts millimeters to pixels
- def mm2pix(self, mm):
- return int(mm / float(self.map_scale_mm_per_pixel))
-
-# Helpers -------------------------------------------------------------
-
-# Forces OpenCV image display, returning id of key it or -1 if none
-def cvdisplay():
- return cv.WaitKey(1)
-
-# Rotates a point by a specified number of degrees
-def rotate(pt, deg):
- rad = radians(deg)
- c = cos(rad)
- s = sin(rad)
- x,y = pt
- return int(x*c - y*s), int(x*s + y*c)
-
-
diff --git a/examples/logdemocv.py b/examples/logdemocv.py
deleted file mode 100755
index c0772db..0000000
--- a/examples/logdemocv.py
+++ /dev/null
@@ -1,132 +0,0 @@
-#!/usr/bin/env python
-
-'''
-logdemo.py : BreezySLAM Python demo. Reads logfile with odometry and scan data
- from Paris Mines Tech and displays showing robot pose and map in
- real time.
-
-For details see
-
- @inproceedings{coreslam-2010,
- author = {Bruno Steux and Oussama El Hamzaoui},
- title = {CoreSLAM: a SLAM Algorithm in less than 200 lines of C code},
- booktitle = {11th International Conference on Control, Automation,
- Robotics and Vision, ICARCV 2010, Singapore, 7-10
- December 2010, Proceedings},
- pages = {1975-1979},
- publisher = {IEEE},
- year = {2010}
- }
-
-Copyright (C) 2016 Simon D. Levy and Matt Lubas
-
-This code is free software: you can redistribute it and/or modify
-it under the terms of the GNU Lesser General Public License as
-published by the Free Software Foundation, either version 3 of the
-License, or (at your option) any later version.
-
-This code is distributed in the hope that it will be useful,
-but WITHOUT ANY WARRANTY without even the implied warranty of
-MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-GNU General Public License for more details.
-
-You should have received a copy of the GNU Lesser General Public License
-along with this code. If not, see .
-'''
-
-# Map size, scale
-MAP_SIZE_PIXELS = 800
-MAP_SIZE_METERS = 32
-
-from breezyslam.algorithms import Deterministic_SLAM, RMHC_SLAM
-from mines import MinesLaser, Rover, load_data
-from cvslamshow import SlamShow
-
-from sys import argv, exit
-from time import sleep
-
-def main():
-
- # Bozo filter for input args
- if len(argv) < 3:
- print('Usage: %s ' % argv[0])
- print('Example: %s exp2 1 9999' % argv[0])
- exit(1)
-
- # Grab input args
- dataset = argv[1]
- use_odometry = True if int(argv[2]) else False
- seed = int(argv[3]) if len(argv) > 3 else 0
-
- # Load the data from the file
- timestamps, lidars, odometries = load_data('.', dataset)
-
- # Build a robot model if we want odometry
- robot = Rover() if use_odometry else None
-
- # Create a CoreSLAM object with laser params and optional robot object
- slam = RMHC_SLAM(MinesLaser(), MAP_SIZE_PIXELS, MAP_SIZE_METERS, random_seed=seed) \
- if seed \
- else Deterministic_SLAM(MinesLaser(), MAP_SIZE_PIXELS, MAP_SIZE_METERS)
-
- # Report what we're doing
- nscans = len(lidars)
-
- # Create a byte array to receive the computed maps
- mapbytes = bytearray(MAP_SIZE_PIXELS * MAP_SIZE_PIXELS)
-
- # Set up a SLAM display
- display = SlamShow(MAP_SIZE_PIXELS, MAP_SIZE_METERS*1000/MAP_SIZE_PIXELS, 'SLAM')
-
- # Store previous timestamp to create delay for realistic movie
- prevtime = 0
-
- # Loop over scans
- for scanno in range(nscans):
-
- if use_odometry:
-
- # Convert odometry to velocities
- velocities = robot.computeVelocities(odometries[scanno])
-
- # Update SLAM with lidar and velocities
- slam.update(lidars[scanno], velocities)
-
- else:
-
- # Update SLAM with lidar alone
- slam.update(lidars[scanno])
-
- # Get new position
- x_mm, y_mm, theta_degrees = slam.getpos()
-
-
- #print(scanno, x_mm, y_mm, theta_degrees)
-
- # Get current map
- slam.getmap(mapbytes)
-
- # Display map and robot pose
- display.displayMap(mapbytes)
- display.displayRobot((x_mm, y_mm, theta_degrees))
-
- # Exit gracefully if user closes display
- key = display.refresh()
- if key != None and (key&0x1A):
- exit(0)
-
- # Add delay for real-time plot
- currtime = timestamps[scanno] / 1.e6 # Convert usec to sec
- if prevtime > 0:
- sleep(currtime-prevtime)
- prevtime = currtime
-
-
-# Helpers ---------------------------------------------------------
-
-def mm2pix(mm):
-
- return int(mm / (MAP_SIZE_METERS * 1000. / MAP_SIZE_PIXELS))
-
-
-main()
diff --git a/examples/logdemoplt.py b/examples/logmovie.py
similarity index 100%
rename from examples/logdemoplt.py
rename to examples/logmovie.py