如何使用 scipy 从数组计算概率分布
How to calculate a probability distribution from an array with scipy
我的实际目标是计算两个直方图之间的差异。为此,我想使用 Kullback-Leibler-Divergenz。在此线程中 Calculating KL Divergence in Python it was said that Scipy's entropy function will calculate KL divergence. For this I need a probability distribution of my datasets. I tried to follow the answers and instructions given in those 2 threads How do I calculate PDF (probability density function) in Python? and 。不幸的是,我总是出错。
在这里您可以看到我的代码,其中我将数据细分为 3 个部分(训练、验证和测试数据集),旨在计算这 3 个集合的数据分布之间的成对差异。
import scipy
from scipy.stats import norm
from scipy.stats import rv_histogram
import numpy as np
import pandas as pd
#Reading the data
df = pd.read_csv("C:/Users/User1/Desktop/TestData_Temperatures.csv", sep=';')
#Build training, validation and test data set
timeslot_x_train_end = int(len(df)* 0.7)
timeslot_x_valid_end = int(len(df)* 0.9)
data_histogram = df[['temperatures']].values
data_train_histogram = data_histogram [:timeslot_x_train_end]
data_valid_histogram = data_histogram [timeslot_x_train_end:timeslot_x_valid_end]
data_test_histogram = data_histogram [timeslot_x_valid_end:]
#Make histogram out of numpy array
histogram_train = rv_histogram(np.histogram(data_train_histogram, bins='auto'))
histogram_valid = rv_histogram(np.histogram(data_valid_histogram, bins='auto'))
histogram_test = rv_histogram(np.histogram(data_test_histogram, bins='auto'))
#Make probability distribution out of the histogram
pdfs_train = norm.cdf(histogram_train, histogram_train.mean(), histogram_train.std())
pdfs_valid = norm.cdf(histogram_valid, histogram_valid.mean(), histogram_valid.std())
pdfs_test = norm.cdf(histogram_test, histogram_valid.mean(), histogram_valid.std())
#Calculate the entropy between the different datasets
entropy_train_valid = scipy.special.rel_entr(pdfs_train, pdfs_valid)
entropy_train_test = scipy.special.rel_entr(pdfs_train, pdfs_test)
entropy_valid_test = scipy.special.rel_entr(pdfs_valid, pdfs_test)
#Calculate the Kullback–Leibler divergence between the different datasets
kl_div_train_valid = np.sum(entropy_train_valid)
kl_div_train_test = np.sum(entropy_train_test)
kl_div_valid_test = np.sum(entropy_valid_test)
#Print the values of the Kullback–Leibler divergence
print(f"Kullback–Leibler divergence between training and validation dataset: {kl_div_train_valid}")
print(f"Kullback–Leibler divergence between training and test dataset: {kl_div_train_test}")
print(f"Kullback–Leibler divergence between validation and test dataset: {kl_div_valid_test}")
在此设置中,我收到由第 pdfs_train = norm.cdf(histogram_train, histogram_train.mean(), histogram_train.std())
行。在这里你可以看到测试数据集TestData。您知道我为什么会收到此错误以及如何计算数组的概率分布(以及最终的 Kullback–Leibler 散度)吗?
温馨提示:有意见吗?我会感谢每一条评论。
在样本足以捕捉总体分布的假设下,样本的直方图可以近似分布的 pdf。因此,当您使用 histogram_train = rv_histogram(np.histogram(data_train_histogram, bins='auto'))
时,它会生成由直方图给出的分布。它有一个 .pdf
方法来评估 pdf,还有 .rvs
来生成遵循此分布的值。因此,要计算两个分布之间的 Kullback–Leibler 散度,您可以执行以下操作:
#Reading the data
df = pd.read_csv("C:/Users/User1/Desktop/TestData_Temperatures.csv", sep=';')
#Build training, validation
timeslot_x_train_end = int(len(df)* 0.7)
timeslot_x_valid_end = int(len(df)* 0.9)
data = df[['temperatures']].values
data_train = data[:timeslot_x_train_end]
data_valid = data[timeslot_x_train_end:timeslot_x_valid_end]
#Make distribution objects of the histograms
histogram_dist_train = rv_histogram(np.histogram(data_train, bins='auto'))
histogram_dist_valid = rv_histogram(np.histogram(data_valid, bins='auto'))
#Generate arrays of pdf evaluations
X1 = np.linspace(np.min(data_train), np.max(data_train), 1000)
X2 = np.linspace(np.min(data_valid), np.max(data_valid), 1000)
rvs_train = [histogram_dist_train.pdf(x) for x in X1]
rvs_valid = [histogram_dist_valid.pdf(x) for x in X2]
#Calculate the Kullback–Leibler divergence between the different datasets
entropy_train_valid = scipy.special.rel_entr(rvs_train, rvs_valid)
kl_div_train_valid = np.sum(entropy_train_valid)
#Print the values of the Kullback–Leibler divergence
print(f"Kullback–Leibler divergence between training and validation dataset: {kl_div_train_valid}")
另一方面,如果您假设数据服从正态分布,那么您必须执行以下操作:
#Build training, validation
timeslot_x_train_end = int(len(df)* 0.7)
timeslot_x_valid_end = int(len(df)* 0.9)
data = df[['temperatures']].values
data_train = data[:timeslot_x_train_end]
data_valid = data[timeslot_x_train_end:timeslot_x_valid_end]
#Make normal distribution objects from data mean and standard deviation
norm_dist_train = norm(data_train.mean(), data_train.std())
norm_dist_valid = norm(data_valid.mean(), data_valid.std())
#Generate arrays of pdf evaluations
X1 = np.linspace(np.min(data_train), np.max(data_train), 1000)
X2 = np.linspace(np.min(data_valid), np.max(data_valid), 1000)
rvs_train = [norm_dist_train.pdf(x) for x in X1]
rvs_valid = [norm_dist_valid.pdf(x) for x in X2]
#Calculate the Kullback–Leibler divergence between the different datasets
entropy_train_valid = scipy.special.rel_entr(rvs_train, rvs_valid)
kl_div_train_valid = np.sum(entropy_train_valid)
#Print the values of the Kullback–Leibler divergence
print(f"Kullback–Leibler divergence between training and validation dataset: {kl_div_train_valid}")
我的实际目标是计算两个直方图之间的差异。为此,我想使用 Kullback-Leibler-Divergenz。在此线程中 Calculating KL Divergence in Python it was said that Scipy's entropy function will calculate KL divergence. For this I need a probability distribution of my datasets. I tried to follow the answers and instructions given in those 2 threads How do I calculate PDF (probability density function) in Python? and
在这里您可以看到我的代码,其中我将数据细分为 3 个部分(训练、验证和测试数据集),旨在计算这 3 个集合的数据分布之间的成对差异。
import scipy
from scipy.stats import norm
from scipy.stats import rv_histogram
import numpy as np
import pandas as pd
#Reading the data
df = pd.read_csv("C:/Users/User1/Desktop/TestData_Temperatures.csv", sep=';')
#Build training, validation and test data set
timeslot_x_train_end = int(len(df)* 0.7)
timeslot_x_valid_end = int(len(df)* 0.9)
data_histogram = df[['temperatures']].values
data_train_histogram = data_histogram [:timeslot_x_train_end]
data_valid_histogram = data_histogram [timeslot_x_train_end:timeslot_x_valid_end]
data_test_histogram = data_histogram [timeslot_x_valid_end:]
#Make histogram out of numpy array
histogram_train = rv_histogram(np.histogram(data_train_histogram, bins='auto'))
histogram_valid = rv_histogram(np.histogram(data_valid_histogram, bins='auto'))
histogram_test = rv_histogram(np.histogram(data_test_histogram, bins='auto'))
#Make probability distribution out of the histogram
pdfs_train = norm.cdf(histogram_train, histogram_train.mean(), histogram_train.std())
pdfs_valid = norm.cdf(histogram_valid, histogram_valid.mean(), histogram_valid.std())
pdfs_test = norm.cdf(histogram_test, histogram_valid.mean(), histogram_valid.std())
#Calculate the entropy between the different datasets
entropy_train_valid = scipy.special.rel_entr(pdfs_train, pdfs_valid)
entropy_train_test = scipy.special.rel_entr(pdfs_train, pdfs_test)
entropy_valid_test = scipy.special.rel_entr(pdfs_valid, pdfs_test)
#Calculate the Kullback–Leibler divergence between the different datasets
kl_div_train_valid = np.sum(entropy_train_valid)
kl_div_train_test = np.sum(entropy_train_test)
kl_div_valid_test = np.sum(entropy_valid_test)
#Print the values of the Kullback–Leibler divergence
print(f"Kullback–Leibler divergence between training and validation dataset: {kl_div_train_valid}")
print(f"Kullback–Leibler divergence between training and test dataset: {kl_div_train_test}")
print(f"Kullback–Leibler divergence between validation and test dataset: {kl_div_valid_test}")
在此设置中,我收到由第 pdfs_train = norm.cdf(histogram_train, histogram_train.mean(), histogram_train.std())
行。在这里你可以看到测试数据集TestData。您知道我为什么会收到此错误以及如何计算数组的概率分布(以及最终的 Kullback–Leibler 散度)吗?
温馨提示:有意见吗?我会感谢每一条评论。
在样本足以捕捉总体分布的假设下,样本的直方图可以近似分布的 pdf。因此,当您使用 histogram_train = rv_histogram(np.histogram(data_train_histogram, bins='auto'))
时,它会生成由直方图给出的分布。它有一个 .pdf
方法来评估 pdf,还有 .rvs
来生成遵循此分布的值。因此,要计算两个分布之间的 Kullback–Leibler 散度,您可以执行以下操作:
#Reading the data
df = pd.read_csv("C:/Users/User1/Desktop/TestData_Temperatures.csv", sep=';')
#Build training, validation
timeslot_x_train_end = int(len(df)* 0.7)
timeslot_x_valid_end = int(len(df)* 0.9)
data = df[['temperatures']].values
data_train = data[:timeslot_x_train_end]
data_valid = data[timeslot_x_train_end:timeslot_x_valid_end]
#Make distribution objects of the histograms
histogram_dist_train = rv_histogram(np.histogram(data_train, bins='auto'))
histogram_dist_valid = rv_histogram(np.histogram(data_valid, bins='auto'))
#Generate arrays of pdf evaluations
X1 = np.linspace(np.min(data_train), np.max(data_train), 1000)
X2 = np.linspace(np.min(data_valid), np.max(data_valid), 1000)
rvs_train = [histogram_dist_train.pdf(x) for x in X1]
rvs_valid = [histogram_dist_valid.pdf(x) for x in X2]
#Calculate the Kullback–Leibler divergence between the different datasets
entropy_train_valid = scipy.special.rel_entr(rvs_train, rvs_valid)
kl_div_train_valid = np.sum(entropy_train_valid)
#Print the values of the Kullback–Leibler divergence
print(f"Kullback–Leibler divergence between training and validation dataset: {kl_div_train_valid}")
另一方面,如果您假设数据服从正态分布,那么您必须执行以下操作:
#Build training, validation
timeslot_x_train_end = int(len(df)* 0.7)
timeslot_x_valid_end = int(len(df)* 0.9)
data = df[['temperatures']].values
data_train = data[:timeslot_x_train_end]
data_valid = data[timeslot_x_train_end:timeslot_x_valid_end]
#Make normal distribution objects from data mean and standard deviation
norm_dist_train = norm(data_train.mean(), data_train.std())
norm_dist_valid = norm(data_valid.mean(), data_valid.std())
#Generate arrays of pdf evaluations
X1 = np.linspace(np.min(data_train), np.max(data_train), 1000)
X2 = np.linspace(np.min(data_valid), np.max(data_valid), 1000)
rvs_train = [norm_dist_train.pdf(x) for x in X1]
rvs_valid = [norm_dist_valid.pdf(x) for x in X2]
#Calculate the Kullback–Leibler divergence between the different datasets
entropy_train_valid = scipy.special.rel_entr(rvs_train, rvs_valid)
kl_div_train_valid = np.sum(entropy_train_valid)
#Print the values of the Kullback–Leibler divergence
print(f"Kullback–Leibler divergence between training and validation dataset: {kl_div_train_valid}")