变量输入函数scipy.curve_fit
Variable input function scipy.curve_fit
我正在研究我的实验数据的峰值反卷积,我想生成一个 Python 脚本,我可以在其中轻松改变非线性曲线 fitting/peak 反卷积的方程。使用高斯曲线和线性偏移,scipy.optimize.curve_fit 适用于以下代码:
def Combined(x,*params):
off = Linear(x,params[0],params[1])
peak1 = Gaussian(x,params[2],params[3],params[4])
peak2 = Gaussian(x,params[5],params[6],params[7])
peak3 = Gaussian(x,params[8],params[9],params[10])
return off + peak1 + peak2 + peak3
popt, pcov = opt.curve_fit(Combined, data[10][0], data[10][1], method='lm', check_finite=True, p0=[0.1, 0.1, 115, 508.33, 7.1,130, 508.33, 7.1, 165.84, 508.33, 7.1])
所有方程式都已在函数中预先定义:
def ZeroOrder(x,a):
return a
def Linear(x,a,b):
return a+b*x
def SecondOrder(x,a,b,c):
return a+b*x+c*x**2
我想创建一个函数 Combine(x,baseline='ZeroOrder',peak1='Gaussian',peak2='Gaussian',peak3='Gaussian')
,我可以在其中轻松分配不同的峰值函数,而不必为每个组合创建特定函数。然而,在我的理解中,curve_fit 函数非常严格,需要输入函数 Combined(x,*params)
。我怎样才能更改我的代码以使其在所需的功能中工作?
对于这种情况,我会使用更通用的东西,例如 scipy 中的 f_min。
下面是如何为实验数据制作通用回归器的示例。
我创建了一个抽象 class 只是为了定义形状函数的接口,这有助于创建更通用的方法
在下面的例子中,求解器得到的系数与原函数不一样,但是对于确定的域,结果函数满足收敛准则,这一定是因为自由度很大对于我创建的函数。
from abc import ABC, abstractmethod
from typing import List
import matplotlib.pyplot as plt
import numpy as np
from scipy.optimize import fmin
from pprint import pprint
class ShapeFunction(ABC):
def __init__(self, params_count: int):
self.params_count = params_count
self.params = [0] * params_count
@property
def params(self):
return self.__params
@params.setter
def params(self, params: List[float]):
if len(params) != self.params_count:
raise ValueError(f"params count must be {self.params_count}")
self.__params = params
@abstractmethod
def evaluate(self, t: List[float]) -> List[float]:
pass
class Gaussian(ShapeFunction):
def __init__(self):
params_count = 3
super().__init__(params_count)
def evaluate(self, t: List[float]) -> List[float]:
return self.Gaussian(t, *self.params)
@staticmethod
def Gaussian(x, mu, sigma, scale):
return scale/(sigma*np.sqrt(2*np.pi))*np.exp(-0.5*((x-mu)/sigma)**2)
class ZeroOrder(ShapeFunction):
def __init__(self):
params_count = 1
super().__init__(params_count)
def evaluate(self, t: List[float]) -> List[float]:
return self.ZeroOrder(t, *self.params)
@staticmethod
def ZeroOrder(x, a):
return a
class Linear(ShapeFunction):
def __init__(self):
params_count = 2
super().__init__(params_count)
def evaluate(self, t: List[float]) -> List[float]:
return self.Linear(t, *self.params)
@staticmethod
def Linear(x, a, b):
return a+b*x
class Quadratic(ShapeFunction):
def __init__(self):
params_count = 3
super().__init__(params_count)
def evaluate(self, t: List[float]) -> List[float]:
return self.Quadratic(t, *self.params)
@staticmethod
def Quadratic(x, a, b, c):
return a+b*x+c*x**2
f1 = Gaussian()
f1.params = [0, 1, 10]
f2 = ZeroOrder()
f2.params = [1]
f3 = Linear()
f3.params = [1, 1]
f4 = Quadratic()
f4.params = [1, 1, 1]
shape_functions = [f1, f2, f3, f4]
coefs_count = sum([func.params_count for func in shape_functions])
original_coefs = []
for func in shape_functions:
original_coefs.extend(func.params)
x = np.linspace(-5, 5, 100)
sample = f1.evaluate(x) + f2.evaluate(x) + f3.evaluate(x) + f4.evaluate(x)
def cost_function(coefs: list[float], *params):
funcs: List[ShapeFunction] = params[0]
parametric_values: List[float] = params[1]
obj_y: List[float] = params[2]
y = [0] * len(parametric_values)
for func in funcs:
func.params = coefs[:func.params_count]
y += func.evaluate(parametric_values)
coefs = coefs[func.params_count:]
return np.sum((y - obj_y)**2)
x0 = [1] * coefs_count
solution = fmin(cost_function, x0, args=(shape_functions, x, sample))
for sol, coef in zip(solution, original_coefs):
print(f"Original: {coef:.2f} -> solution: {sol:.2f}")
y_sol = np.sum([func.evaluate(x) for func in shape_functions], axis=0)
plt.plot(x, sample, label="Original")
plt.plot(x, y_sol, label="Solution", alpha=0.5, marker=".")
plt.legend()
plt.show()
我正在研究我的实验数据的峰值反卷积,我想生成一个 Python 脚本,我可以在其中轻松改变非线性曲线 fitting/peak 反卷积的方程。使用高斯曲线和线性偏移,scipy.optimize.curve_fit 适用于以下代码:
def Combined(x,*params):
off = Linear(x,params[0],params[1])
peak1 = Gaussian(x,params[2],params[3],params[4])
peak2 = Gaussian(x,params[5],params[6],params[7])
peak3 = Gaussian(x,params[8],params[9],params[10])
return off + peak1 + peak2 + peak3
popt, pcov = opt.curve_fit(Combined, data[10][0], data[10][1], method='lm', check_finite=True, p0=[0.1, 0.1, 115, 508.33, 7.1,130, 508.33, 7.1, 165.84, 508.33, 7.1])
所有方程式都已在函数中预先定义:
def ZeroOrder(x,a):
return a
def Linear(x,a,b):
return a+b*x
def SecondOrder(x,a,b,c):
return a+b*x+c*x**2
我想创建一个函数 Combine(x,baseline='ZeroOrder',peak1='Gaussian',peak2='Gaussian',peak3='Gaussian')
,我可以在其中轻松分配不同的峰值函数,而不必为每个组合创建特定函数。然而,在我的理解中,curve_fit 函数非常严格,需要输入函数 Combined(x,*params)
。我怎样才能更改我的代码以使其在所需的功能中工作?
对于这种情况,我会使用更通用的东西,例如 scipy 中的 f_min。 下面是如何为实验数据制作通用回归器的示例。
我创建了一个抽象 class 只是为了定义形状函数的接口,这有助于创建更通用的方法
在下面的例子中,求解器得到的系数与原函数不一样,但是对于确定的域,结果函数满足收敛准则,这一定是因为自由度很大对于我创建的函数。
from abc import ABC, abstractmethod
from typing import List
import matplotlib.pyplot as plt
import numpy as np
from scipy.optimize import fmin
from pprint import pprint
class ShapeFunction(ABC):
def __init__(self, params_count: int):
self.params_count = params_count
self.params = [0] * params_count
@property
def params(self):
return self.__params
@params.setter
def params(self, params: List[float]):
if len(params) != self.params_count:
raise ValueError(f"params count must be {self.params_count}")
self.__params = params
@abstractmethod
def evaluate(self, t: List[float]) -> List[float]:
pass
class Gaussian(ShapeFunction):
def __init__(self):
params_count = 3
super().__init__(params_count)
def evaluate(self, t: List[float]) -> List[float]:
return self.Gaussian(t, *self.params)
@staticmethod
def Gaussian(x, mu, sigma, scale):
return scale/(sigma*np.sqrt(2*np.pi))*np.exp(-0.5*((x-mu)/sigma)**2)
class ZeroOrder(ShapeFunction):
def __init__(self):
params_count = 1
super().__init__(params_count)
def evaluate(self, t: List[float]) -> List[float]:
return self.ZeroOrder(t, *self.params)
@staticmethod
def ZeroOrder(x, a):
return a
class Linear(ShapeFunction):
def __init__(self):
params_count = 2
super().__init__(params_count)
def evaluate(self, t: List[float]) -> List[float]:
return self.Linear(t, *self.params)
@staticmethod
def Linear(x, a, b):
return a+b*x
class Quadratic(ShapeFunction):
def __init__(self):
params_count = 3
super().__init__(params_count)
def evaluate(self, t: List[float]) -> List[float]:
return self.Quadratic(t, *self.params)
@staticmethod
def Quadratic(x, a, b, c):
return a+b*x+c*x**2
f1 = Gaussian()
f1.params = [0, 1, 10]
f2 = ZeroOrder()
f2.params = [1]
f3 = Linear()
f3.params = [1, 1]
f4 = Quadratic()
f4.params = [1, 1, 1]
shape_functions = [f1, f2, f3, f4]
coefs_count = sum([func.params_count for func in shape_functions])
original_coefs = []
for func in shape_functions:
original_coefs.extend(func.params)
x = np.linspace(-5, 5, 100)
sample = f1.evaluate(x) + f2.evaluate(x) + f3.evaluate(x) + f4.evaluate(x)
def cost_function(coefs: list[float], *params):
funcs: List[ShapeFunction] = params[0]
parametric_values: List[float] = params[1]
obj_y: List[float] = params[2]
y = [0] * len(parametric_values)
for func in funcs:
func.params = coefs[:func.params_count]
y += func.evaluate(parametric_values)
coefs = coefs[func.params_count:]
return np.sum((y - obj_y)**2)
x0 = [1] * coefs_count
solution = fmin(cost_function, x0, args=(shape_functions, x, sample))
for sol, coef in zip(solution, original_coefs):
print(f"Original: {coef:.2f} -> solution: {sol:.2f}")
y_sol = np.sum([func.evaluate(x) for func in shape_functions], axis=0)
plt.plot(x, sample, label="Original")
plt.plot(x, y_sol, label="Solution", alpha=0.5, marker=".")
plt.legend()
plt.show()