(最大似然估计)scipy.optimize.minimize错误

问题描述 投票:0回答:1

我收到了错误

  File "C:/Users/", line 70, in <module>
    cal_PIN()
  File "C:/Users/", line 67, in cal_PIN
    cal_likelihood(selling_5_tick, buying_5_tick)
  File "C:/Users/", line 48, in cal_likelihood
    raise valueErr(result.message)
valueErr: Desired error not necessarily achieved due to precision loss.

我想估计引脚模型中的参数。对数转换似然函数与附加照片相同。要估计的参数是(α,δ,μ,εB,εS)。我编写了3步骤语句来设置初始值。我尝试使用scipy.optimize.minimize通过应用最大似然估计来估计参数。

import time
import scipy
from scipy.optimize import minimize

def f(params, *args):
    # args={k1, k2, k3, kmi, buying_array[i], selling_array[i]}
    k1 = args[0]
    k2 = args[1]
    k3 = args[2]
    kmi = args[3]
    buying_array = args[4]
    selling_array = args[5]

    ini_a, ini_h, ini_eS, ini_eB = params
    return (-1) * (ini_a * ini_h * scipy.exp(k1 - kmi) + ini_a * (1 - ini_h) * scipy.exp(k2 - kmi) + (
                1 - ini_a) * scipy.exp(k3 - kmi) +
                   (buying_array * scipy.log(ini_eB + ini_h) + selling_array * scipy.log(ini_eS + ini_h) - (
                               ini_eB + ini_eS) + kmi))


def cal_likelihood(selling_array, buying_array):
    for ini_a in [0.1, 0.3, 0.5, 0.7, 0.9]:
        for ini_h in [0.1, 0.3, 0.5, 0.7, 0.9]:
            for z in [0.1, 0.3, 0.5, 0.7, 0.9]:
                time.sleep(1)
                i = 0
                for i in range(0, len(buying_array)):
                    ini_eB = z * selling_array[i]

                    cal_u = (buying_array[i] - ini_eB) / (ini_a * (1 - ini_h))
                    ini_eS = selling_array[i] - (ini_a * ini_h * cal_u)

                    k1 = ((-1.0) * (cal_u) - buying_array[i] * scipy.log(1 + (cal_u / ini_eB)))
                    k2 = ((-1.0) * (cal_u) - selling_array[i] * scipy.log(1 + (cal_u / ini_eS)))
                    k3 = (-1.0) * buying_array[i] * scipy.log(1 +
                                                              (cal_u / ini_eB)) - selling_array[i] * scipy.log(
                        1 + (cal_u / ini_eS))
                    kmi = max(k1, k2, k3)

                    initial_guess = [ini_a, ini_h, ini_eB, ini_eS]

                    result = minimize(f, initial_guess, args=(k1, k2,
                                                              k3, kmi, buying_array[i], selling_array[i]))
                    if result.success:
                        fitted_params = result.x
                        print(fitted_params[0])
                    else:
                        raise ValueError(result.message)


def cal_PIN():
    buying_5_tick = []
    selling_5_tick = []

    buying_5_tick.append(4035)
    buying_5_tick.append(3522)
    buying_5_tick.append(4073)
    buying_5_tick.append(3154)
    buying_5_tick.append(9556)

    selling_5_tick.append(1840)
    selling_5_tick.append(2827)
    selling_5_tick.append(4095)
    selling_5_tick.append(2602)
    selling_5_tick.append(2230)

    cal_likelihood(selling_5_tick, buying_5_tick)

我期望0 <α<1和0 <δ<1的值,但有些错误。

python scipy minimize quantitative-finance mle
1个回答
0
投票

好吧,当你自己提出错误时很明显,由于Warning: Desired error not necessarily achieved due to precision loss错误,最小化失败了。

来自a scipy issue

当线搜索在一定次数的迭代中找不到满足Wolfe条件和Polak-Ribiere下降条件的步长时,会发生此警告。

你的最小化的结果似乎没有任何约束,因为你的目标函数只是*-1的一些值。这导致相当大的衍生物,并可能导致一些病态的Hessian。这样的病态矩阵然后导致linesearch-fail。一种选择是将目标回报改为

return 1 / (ini_a * ini_h * scipy.exp(k1 - kmi) + ini_a * (1 - ini_h) * scipy.exp(k2 - kmi) + (
            1 - ini_a) * scipy.exp(k3 - kmi) +
               (buying_array * scipy.log(ini_eB + ini_h) + selling_array * scipy.log(ini_eS + ini_h) - (
                           ini_eB + ini_eS) + kmi))

这导致结果在0 <值<1的所需范围内。

如果这不是您的最佳解决方案,请尝试更改求解器。在documentation中找到一些选项。

还有一些提示和技巧为您编程。您可以使用itertools.product来避免此类嵌套循环。而不是附加每个值,只需声明一个列表。

以下是建议和工作代码。

import time
import scipy
from scipy.optimize import minimize
import itertools

def f(params, *args):
    # args={k1, k2, k3, kmi, buying_array[i], selling_array[i]}
    k1 = args[0]
    k2 = args[1]
    k3 = args[2]
    kmi = args[3]
    buying_array = args[4]
    selling_array = args[5]

    ini_a, ini_h, ini_eS, ini_eB = params
    return 1 / (ini_a * ini_h * scipy.exp(k1 - kmi) + ini_a * (1 - ini_h) * scipy.exp(k2 - kmi) + (
                1 - ini_a) * scipy.exp(k3 - kmi) +
                   (buying_array * scipy.log(ini_eB + ini_h) + selling_array * scipy.log(ini_eS + ini_h) - (
                               ini_eB + ini_eS) + kmi))


def cal_likelihood(selling_array, buying_array):
    list_iteration = [0.1, 0.3, 0.5, 0.7, 0.9]
    for (ini_a, ini_h, z) in itertools.product(*[list_iteration,list_iteration,list_iteration]):
        time.sleep(1)
        for i in range(0, len(buying_array)):
            ini_eB = z * selling_array[i]

            cal_u = (buying_array[i] - ini_eB) / (ini_a * (1 - ini_h))
            ini_eS = selling_array[i] - (ini_a * ini_h * cal_u)

            k1 = ((-1.0) * (cal_u) - buying_array[i] * scipy.log(1 + (cal_u / ini_eB)))
            k2 = ((-1.0) * (cal_u) - selling_array[i] * scipy.log(1 + (cal_u / ini_eS)))
            k3 = (-1.0) * buying_array[i] * scipy.log(1 +
                                                      (cal_u / ini_eB)) - selling_array[i] * scipy.log(
                1 + (cal_u / ini_eS))
            kmi = max(k1, k2, k3)

            initial_guess = [ini_a, ini_h, ini_eB, ini_eS]

            result = minimize(f, initial_guess, args=(k1, k2,
                                                      k3, kmi, buying_array[i], selling_array[i]))
            if result.success:
                fitted_params = result.x
                print(fitted_params[0])
            else:
                raise ValueError(result.message)


def cal_PIN():
    buying_5_tick = [4035, 3522, 4073, 3154, 9556]
    selling_5_tick = [1840, 2827, 4095, 2602, 2230]

    cal_likelihood(selling_5_tick, buying_5_tick)

cal_PIN()
© www.soinside.com 2019 - 2024. All rights reserved.