Python Forum
Kalman Filter for predicting randomized chaos in Jupyter Labs
Thread Rating:
  • 0 Vote(s) - 0 Average
  • 1
  • 2
  • 3
  • 4
  • 5
Kalman Filter for predicting randomized chaos in Jupyter Labs
#1
As a start to this forum Ive included this wonderful peice of Vanilla code - The Kalman Filter
I downloaded from github this wonderfully elegant Kalman filter : https://github.com/hbcbh1999/kalman-filter Its a really nice and simple implementation using the equations you can find on wikipedia https://en.wikipedia.org/wiki/Kalman_filter It looks really awsome when applied to the Arneodo XS dimension Chaotic Attractor with added noise. Enjoy.

Works in Python 2.7 or Python 3.0+

#!/usr/bin/env python
# coding: utf-8

# In[1]:


get_ipython().magic(u'matplotlib inline')
import numpy as np


# In[2]:


class KalmanFilter(object):
    def __init__(self, F = None, B = None, H = None, Q = None, R = None, P = None, x0 = None):

        if(F is None or H is None):
            raise ValueError("Set proper system dynamics.")

        self.n = F.shape[1]
        self.m = H.shape[1]

        self.F = F
        self.H = H
        self.B = 0 if B is None else B
        self.Q = np.eye(self.n) if Q is None else Q
        self.R = np.eye(self.n) if R is None else R
        self.P = np.eye(self.n) if P is None else P
        self.x = np.zeros((self.n, 1)) if x0 is None else x0

    def predict(self, u = 0):
        self.x = np.dot(self.F, self.x) + np.dot(self.B, u)
        self.P = np.dot(np.dot(self.F, self.P), self.F.T) + self.Q
        return self.x

    def update(self, z):
        y = z - np.dot(self.H, self.x)
        S = self.R + np.dot(self.H, np.dot(self.P, self.H.T))
        K = np.dot(np.dot(self.P, self.H.T), np.linalg.inv(S))
        self.x = self.x + np.dot(K, y)
        I = np.eye(self.n)
        self.P = np.dot(np.dot(I - np.dot(K, self.H), self.P), 
        	(I - np.dot(K, self.H)).T) + np.dot(np.dot(K, self.R), K.T)
        return y
        

def arneodo(x, y, z, a=-5.5, b=3.5, c=-1):
    '''
    Given:
       x, y, z: a point of interest in three dimensional space
       s, r, b: parameters defining the lorenz attractor
    Returns:
       x_dot, y_dot, z_dot: values of the lorenz attractor's partial
           derivatives at the point x, y, z
    '''
    x_dot = y
    y_dot = z
    z_dot = -a*x-b*y-z+c*(x**3)
    return x_dot, y_dot, z_dot


dt = 0.01
num_steps = 7000

# Need one more for the initial values
xs = np.empty(num_steps + 1)
ys = np.empty(num_steps + 1)
zs = np.empty(num_steps + 1)

# Set initial values
xs[0], ys[0], zs[0] = (0.1, 0, 0.1)

# Step through "time", calculating the partial derivatives at the current point
# and using them to estimate the next point
for i in range(num_steps):
    x_dot, y_dot, z_dot = arneodo(xs[i], ys[i], zs[i])
    xs[i + 1] = xs[i] + (x_dot * dt)
    ys[i + 1] = ys[i] + (y_dot * dt)
    zs[i + 1] = zs[i] + (z_dot * dt)

def example():
    dt = 1.0/60
    F = np.array([[1, dt, 0], [0, 1, dt], [0, 0, 1]]).reshape(3,3)
    H = np.array([1, 0, 0]).reshape(1, 3)
    Q = np.array([[0.05, 0.05, 0.0], [0.05, 0.05, 0.0], [0.0, 0.0, 0.0]]).reshape(3,3)
    R = np.array([0.5]).reshape(1, 1)
    Error = np.empty(7002)
      
    x = np.linspace(-10, 10, 100)
    measurements = xs + np.random.normal(0, 2, 7001)
#- (x**2 + 2*x - 2)
    kf = KalmanFilter(F = F, H = H, Q = Q, R = R)
    predictions = []

    n=0
    for z in measurements:
        n+=1
        predictions.append(np.dot(H,  kf.predict())[0])
        Error[n] =kf.update(z)
         
        

    import matplotlib.pyplot as plt
    plt.plot(range(len(Error)), Error, label = 'Error')
    plt.plot(range(len(measurements)), measurements, label = 'Measurements')
    plt.plot(range(len(predictions)), np.array(predictions), label = 'Kalman Filter Prediction')
    plt.plot(range(len(xs)), xs, label = 'Signal')
    plt.legend()
    plt.show()
    
if __name__ == '__main__':
    example()
The proceeding discussions will be about the use of the following matrices in the Kalman Filter, what form they may take and how they can be derived from the distribution we are modelling :

dt = 1.0/60
F = np.array([[1, dt, 0], [0, 1, dt], [0, 0, 1]]).reshape(3,3) #State Transition Model
H = np.array([1, 0, 0]).reshape(1, 3) #Observation Model
Q = np.array([[0.05, 0.05, 0.0], [0.05, 0.05, 0.0], [0.0, 0.0, 0.0]]).reshape(3,3) #Covariance Matrix
R = np.array([0.5]).reshape(1, 1)[/code] #Covariance Matrix

ARIMA - Wikipedia

[Image: output_4_0.png]

Original source:
http://spacetripping.just4books.co.uk/Hi...hp?f=7&t=3
Reply


Possibly Related Threads…
Thread Author Replies Views Last Post
  Help with chaos function dbrown8978 11 2,096 Aug-25-2023, 12:02 AM
Last Post: dbrown8978

Forum Jump:

User Panel Messages

Announcements
Announcement #1 8/1/2020
Announcement #2 8/2/2020
Announcement #3 8/6/2020