Stochastic Differential Equation:Monte Carlo方法浅谈(笔记)

227 阅读2分钟

网课内容来源于b站up主:Love小矫情Forever

代码1:

import numpy as np
#simulate many samples
counter = 0
N = 1000000
for i in range(N):
    x = 2 * np.random.rand() - 1  # U[-1.1]
    y = 2 * np.random.rand() - 1  # U[-1.1]
    if x ** 2 + y ** 2 < 1:
        counter += 1
proba = counter / N
print(proba)

S_Circle = proba * 4.0
print(S_Circle) 

代码2:

import numpy as np
# x ~ N(0, 2^2)  E(X^4)=48
N = 1000000
list_MC = []
for i in range(N):
    X_Sample = 2 * np.random.randn()
    list_MC.append(X_Sample)
sum = 0.0
for j in list_MC:
    sum += j ** 4
estimate = sum / N
print(estimate)

代码3:

import numpy as np
import scipy.stats as scista
import matplotlib.pyplot as plt
S0 = 10.0
K = 8.0
r = 0.03
sigma = 0.15
T = 0.5 #year

#computing the estimated price by Monto Carlo methods
ST_list = []
Steps = 100
Paths = 10000
dt = T / Steps
for i in range(Paths):
    S = S0
    for j in range(Steps):
        Z = np.random.randn()
        S = S + r * S * dt + sigma * S * Z * np.sqrt(dt)
    ST_list.append(S)
sum_ = 0.0
for element in ST_list: # element->ST
    sum_ += np.max([0.0,element - K])
EQ = sum_ / Paths
MCPrice = np.exp(-r * T) * EQ
print("Monte Carlo price of the European call option is:",MCPrice)

#visualization of stock price process
#save the stock price on a single path
list_of_processes = []
for h in range(100):
    S = S0
    Stock_Price_Process = []
    Stock_Price_Process.append(S)
    for i in range(Steps):
        Z = np.random.randn()
        S = S + r * S * dt + sigma * S * Z * np.sqrt(dt)
        Stock_Price_Process.append(S)
    list_of_processes.append(Stock_Price_Process)
time = np.arange(0, 505, 5)
time = time / 1000
plt.figure(figsize = (20, 10))
for h in range(100):
    plt.plot(time,list_of_processes[h],"black")
plt.show()

#visualization of stock price process
#save the stock price on a single path
list_of_ST = []
for h in range(10000):
    S = S0
    for i in range(Steps):
        Z = np.random.randn()
        S = S + r * S * dt + sigma * S * Z * np.sqrt(dt)
    list_of_ST.append(S)
time = np.arange(0, 505, 5)
time = time / 1000
plt.hist(list_of_ST,bins = 1000,density="true")
plt.show()

代码4:

import numpy as np
import tensorflow as tf
from tensorflow import keras
import torch
from torch.autograd import Variable

Iter_times = 1000
x_min = Variable(torch.FloatTensor(10.0 * np.ones(1)),requires_grad = True)
alpha = 0.2
for i in range(Iter_times):
    f = (x_min - 5) ** 2
    f.backward()
    grad = x_min.grad.data # Obtain the gradient at point x^(k)
    x_min.data = x_min.data - alpha * grad #update the current value of x_min to make it be more closer to the optimal value
    print(i,"times of iteration have been completed!")
    print("   -> Now x_min =",x_min)
    print("========================")

    #Eliminate the data of gradient in this loop
    x_min.grad.data[:] = 0.0

代码5:

import numpy as np
import tensorflow as tf
from tensorflow import keras
import torch
from torch.autograd import Variable

x = np.array([1,2,3,4,5,6,7,8,9,10])
y = 3 * x ** 2 + 5
# Initialize the coefficients A and B
x = Variable(torch.FloatTensor(x))
y = Variable(torch.FloatTensor(y))
A = Variable(torch.FloatTensor(10.0 * np.ones(1)),requires_grad = True)
B = Variable(torch.FloatTensor(10.0 * np.ones(1)),requires_grad = True)

# Construct the loss function
# torch.mean((A * x ** 2 + B - y) ** 2)
Iter_times = 10000
alpha = 0.00035
for i in range(Iter_times):
    loss = torch.mean((A * x ** 2 + B - y) ** 2)
    loss.backward()
    grad_A = A.grad.data # Obtain the gradient at A
    grad_B = B.grad.data # Obtain the gradient at B
    A.data = A.data - alpha * grad_A # update A
    B.data = B.data - alpha * grad_B # update B
    print(i,"times of iteration have been completed!")
    print("   -> Now A =",A)
    print("   -> Now B =",B)
    print("========================")

    A.grad.data[:] = 0.0
    B.grad.data[:] = 0.0

代码6:

import numpy as np
import scipy.stats as scista
import tensorflow as tf
from tensorflow import keras
import torch
from torch.autograd import Variable

def BSPrice(S0, K, T, r, sigma):
    d1 = (np.log(S0 / k) + (r + 0.5 * sigma ** 2) * T) / (sigma * np.sqrt(T))
    d2 = d1 - sigma * np.sqrt(T)
    Price = S0 * scista.norm.cdf(d1) - K * np.exp(-r * T) * scista.norm.cdf(d2)
    return Price

# European Call(We do NOT know sigma, and we want to estimate sigma -> implied volatility)
S0 = Variable(torch.FloatTensor(10.0 * np.ones(1)))
K = Variable(torch.FloatTensor(8.0 * np.ones(1)))
T = Variable(torch.FloatTensor(0.5 * np.ones(1)))
r = Variable(torch.FloatTensor(0.03 * np.ones(1)))
P_market = Variable(torch.FloatTensor(3.2 * np.ones(1))) # market price
sigma = Variable(torch.FloatTensor(0.15 * np.ones(1)),requires_grad = True)

def MCPrice(S0, K, T, r, sigma, Paths, Steps):
    S = Variable(torch.FloatTensor(S0)) * Variable(torch.FloatTensor(np.ones((Paths,1))))
    dt = T / Steps
    for i in range(Steps):
        Z = Variable(torch.randn(Paths,1))
        S = S + r * S * dt + sigma * S * Z * torch.sqrt(dt)
    payoff_vec = torch.relu(S - K)
    price_vec = torch.exp(-r * T) * payoff_vec
    Price = torch.mean(price_vec)
    return Price

Iter_times = 10000
alpha = 0.001
for i in range(Iter_times):
    MCP = MCPrice(S0, K, T, r, sigma, 3000  ,100)
    loss = (MCP - P_market) ** 2
    loss.backward()
    grad = sigma.grad.data
    alpha_temp = alpha / (1 + 0.001 * i)
    sigma.data = sigma.data - alpha_temp * grad
    print(i,"times of iteration have been completed!")
    print("   -> sigma =",sigma)
    print("   -> loss =",loss)
    print("========================")

    sigma.grad.data[:] = 0.0

代码7:

import numpy as np
import tensorflow as tf
from tensorflow import keras
import torch
from torch.autograd import Variable

Iter_times = 10000
x_min = Variable(torch.FloatTensor(10.0 * np.ones(1)),requires_grad = True)
alpha = 0.2
for i in range(Iter_times):
    f = 0.2 + x_min * 0.3 - x_min ** 2 * 0.5
    f.backward()
    grad = x_min.grad.data # Obtain the gradient at point x^(k)
    x_min.data = x_min.data - alpha * grad #update the current value of x_min to make it be more closer to the optimal value
    print(i,"times of iteration have been completed!")
    print("   -> Now x_min =",x_min)
    print("========================")

    #Eliminate the data of gradient in this loop
    x_min.grad.data[:] = 0.0