# Setup notebook for calculations and plotting
# Plotting
# Comment following line (%matplotlib inline) for interactive inline plots
# %matplotlib inline
import matplotlib
# Uncomment following line for interactive inline plots
matplotlib.use('nbAgg')
from mpl_toolkits.mplot3d import axes3d
import matplotlib.pyplot as plt
import matplotlib.patches as mpatches
import matplotlib.cm as cm
# Numerical simulations
import math
import numpy as np
import scipy.stats as stats
import pandas as pd
# import collections
# # Matrix calculations
# import sympy
# from IPython.display import display
# sympy.init_printing(use_latex=True)
# Saving grids to a file
# import pickle
# Debugging
from IPython.core.debugger import set_trace
- Implement Exercise 2 using particle filters instead of histograms, and plot and discuss the results. Investigate the effect of varying numbers of particles on the result.
See notebook 08 for the results of exercise 4-2. Exercise 4-2 is an extension of exercise 3-4 in notebook 06, which uses an extended Kalman filter (EKF).
We noted in the text that the EKF linearization is an approximation. To see how bad this approximation is, we ask you to work out an example. Suppose we have a mobile robot operating in a planar environment. Its state is its x-y-location and its global heading direction $\theta$. Suppose we know $x$ and $y$ with high certainty, but the orientation $\theta$ is unknown. This is reflected by our initial estimate
$$ \tag{1} \vec \mu = \lgroup \begin{matrix}0 & 0 & 0 \end{matrix} \rgroup $$
$$ \tag{2} \mathbf{\Sigma} = \left\lgroup \begin{matrix}0.01 & 0 & 0 \cr 0 & 0.01 & 0 \cr 0 & 0 & 10000 \end{matrix} \right\rgroup $$
For this exercise, we assume the robot moves flawlessly without any noise. Thus, the expected location of the robot after motion will be
$$ \tag{3} \left\lgroup \begin{matrix} x^\prime \cr y^\prime \cr \theta^\prime \end{matrix} \right\rgroup = \left\lgroup \begin{matrix} x + \cos \theta \cr y + \sin \theta \cr \theta \end{matrix} \right\rgroup $$
Our measurement shall be a noisy projection of the x-coordinate of the robot, with covariance $Q = 0.01$. Specify the measurement model. Now apply the measurement both to your intuitive posterior, and formally to the EKF estimate using the standard EKF machinery. Give the exact result of the EKF, and compare it with the result of your intuitive analysis.
Our particle set $X$ will consist of 250 particles.
M = 1000 # Number of particles
The class PSet uses a numpy structured array to represent a particle set. It also contains array columns for weights and an empirical cumulative distribution function that will be used for the measurement update portion of the PF algorithm.
class PSet(object):
def __init__(self, num_particles):
"""Initializes a PSet object with all particles set to (0, 0)"""
self.num_particles = num_particles
self.ptcs = np.zeros(num_particles,
dtype=[("x", np.float16),
("y", np.float16),
("theta", np.float16),
("weights", np.float32),
("cdf", np.float32)])
self.belief = False
self.measurement = None
self.posterior = False
def __getitem__(self, key):
return self.ptcs[key]
def __setitem__(self, key, value):
self.ptcs[key] = value
@property
def shape(self):
return self.ptcs.shape
def set_cdf(self):
"""Populates the 'cdf' column, based on particle weights.
"""
weight_sum = 0
for row in self.ptcs:
weight_sum = row["weights"] + weight_sum
row["cdf"] = weight_sum
def resample(self):
"""Returns new Posterior PSet resulting from PF resampling step
"""
sum_weights = np.sum(self["weights"])
rand_samples = np.sort(np.random.random_sample(size=self.num_particles)
* sum_weights)
num_ptcl = self.num_particles
posterior = PSet(num_ptcl)
post_idx = 0
bel_idx = 0
while bel_idx < num_ptcl:
while ((post_idx < num_ptcl and bel_idx < num_ptcl) and
rand_samples[post_idx] < self["cdf"][bel_idx]):
posterior.ptcs["x"][post_idx] = self["x"][bel_idx]
posterior.ptcs["y"][post_idx] = self["y"][bel_idx]
posterior.ptcs["theta"][post_idx] = self["theta"][bel_idx]
post_idx = post_idx + 1
bel_idx = bel_idx + 1
posterior.posterior = True
posterior.belief = False
return posterior
def clear_weights(self):
"""Sets weights and CDF to zero."""
self.ptcs["weights"] = 0
self.ptcs["cdf"] = 0
def covariance(self):
"""Calculates the coviance matrix of the particle set."""
cov_array = np.array([self.ptcs["x"], self.ptcs["y"],
self.ptcs["theta"]])
return np.cov(cov_array)
def mean(self):
"""Returns the means for the particle set"""
return (np.mean(self["x"]), np.mean(self["y"]), np.mean(self["theta"]))
def theta_x(self):
return np.cos(self["theta"])
def theta_y(self):
return np.sin(self["theta"])
def scatter_plot(self, title, lims=None):
fig = plt.figure()
plt.plot(self["x"], self["y"], 'b.')
plt.title(title)
plt.xlabel("x")
plt.ylabel("y")
if lims is not None:
plt.xlim(lims[0])
plt.ylim(lims[1])
plt.grid()
plt.show()
plt.close()
At time $t = 0$ we know the x and y position of the robot precisely, but the heading $\theta$ is unknown.
pset0 = PSet(M)
pset0["theta"] = np.random.random_sample(M) * 2 * math.pi
The state transition portion of the generic Bayes filter algorithm is addressed by lines 3 and 4 in the particle filter algorithm. We will use equation (3) for this update, as implemented in the next_step
function.
def next_step(pset):
"""Applies a state transition to creates a new particle set for time t + 1.
"""
num_particles = pset.num_particles
pset_next = PSet(num_particles)
pset_next["x"] = pset["x"] + np.cos(pset["theta"])
pset_next["y"] = pset["y"] + np.sin(pset["theta"])
pset_next["theta"] = pset["theta"]
pset_next.belief = True
return pset_next
pset1 = next_step(pset0)
pset1.mean()
pset1.covariance()
def plot_grid(pset, title, xlim=(-1.2, 1.2), ylim=(-1.2, 1.2)):
fig2 = plt.figure()
plt.quiver(pset["x"], pset["y"], pset.theta_x(), pset.theta_y(), scale=40)
plt.xlabel("x position")
plt.ylabel("y position")
plt.xlim(xlim)
plt.ylim(ylim)
plt.grid()
plt.title(title)
plt.show()
plt.close()
plot_grid(pset1, "Position at Time 1")
MEASUREMENT_VARIANCE = 0.01
def weight(x_coord, meas, var):
return stats.norm.pdf(meas, loc=x_coord, scale=math.sqrt(var))
weights = np.vectorize(weight, excluded=["meas", "var"], otypes=[np.float16])
Now we have to:
pset1["weights"] = weights(pset1["x"], 1,
MEASUREMENT_VARIANCE) # Assign weights
pset1.set_cdf() # Create empirical CDF from weights.
pset1.measurement = 1
Let's take a look at the first few lines of the particle set:
pset1[0:10]
The closer the position (1st column) is to 5, the higher the weights (3rd column), as expected. The 4th column contains the empirical probability distribution function that will be used for resampling.
Finally, we'll complete lines 8 through 11 of the PF algorithm via the PSet.resample()
method.
post1_m1 = pset1.resample()
Here are the results:
post1_m1.mean()
post1_m1.covariance()
The covariance for $\theta$ is incorrect. It is counterintuitive that the covariance would have increased from the pre-measurement belief. For a measurement of $x = 1$, $\theta$ is centered on 0, with half the values between 0 and about $\pi/4$ and half the values between $3\pi/4$ and $2\pi$. We can get an accurate value for the covariance of $\theta$ by subtracting $2\pi$ from all values of $\theta$ that are greater than $\pi$, but I think I'll just calculate the covariance matrix for a measurement of -1 instead.
pset1.clear_weights()
pset1["weights"] = weights(pset1["x"], -1,
MEASUREMENT_VARIANCE) # Assign weights
pset1.set_cdf() # Create empirical CDF from weights.
pset1.measurement = -1
post1_mneg1=pset1.resample()
post1_mneg1.mean()
post1_mneg1.covariance()
Much better.
Measurement at $x = -1$.
plot_grid(post1_mneg1, "Time = 1, Measurement = -1")
Let's try $x = 0$
pset1.clear_weights()
pset1["weights"] = weights(pset1["x"], 0,
MEASUREMENT_VARIANCE) # Assign weights
pset1.set_cdf() # Create empirical CDF from weights.
pset1.measurement = 0
post1_0=pset1.resample()
post1_0.mean()
post1_0.covariance()
plot_grid(post1_0, "Time = 1, Measurement = 0")
The measurement of $x = 0$ results in a bimodal belief, which the particle filter is able to represent. Note that the variance for $x$ is low compared to the variance for $y$. Also, the mean is not a good estimate of the position, in the sense that the robot is not at the mean state. Finally, note that the mean $\theta$ is approximately $\pi$ (pointing to the left).
Stacy Irwin, August 25 2018