为什么我每次 运行 自动编码器时都会在编码数据帧中得到不稳定的值?

Why do I get unstable values in an encoded dataframe for each time I run an autoencoder?

我试图在使用 KMeans 时通过肘部方法和剪影得分在我的数据上找到最佳聚类数。虽然,我正在使用降维来测试这些方法。

如果我多次尝试 PCA,我每次都会得到相同的肘法和轮廓图。但是,如果我出于相同目的尝试使用具有神经网络结构的编码器,我每次都会得到不同的图形。因此,我没有信心使用这种编码器技术,因为它会产生不同的最佳簇数。

为什么会这样?即使我对数据进行标准化,结果也会不断变化。

如何正确使用这种编码器技术?我知道我可以为此简单地选择 PCA,但我想了解并查看我是否做错了什么。

这是我的代码,您可以 运行 多次查看我在说什么。我以鸢尾花数据集为例:

import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
from sklearn.cluster import KMeans
from sklearn import datasets
from sklearn.metrics import silhouette_score, silhouette_samples

import tensorflow as tf
from tensorflow.keras.layers import Dense
from tensorflow.keras import backend as K

iris = datasets.load_iris()
X = pd.DataFrame(iris.data)

def autoencoding(data):
    n_input_layer = data.shape[1]
    n_encoding_layer = 2
    n_output_layer = n_input_layer

    # AUTOENCODER
    autoencoder = tf.keras.models.Sequential([
        # ENCODER
        Dense(n_input_layer, input_shape = (n_input_layer,), activation = 'relu'),   # Input layer    
    
        # CENTRAL LAYER
        Dense(n_encoding_layer, activation = 'relu', name = 'central_layer'), 
    
        # DECODER
        Dense(n_output_layer, activation = 'relu')  # Output layer
    ])

    n_epochs = 2000
    loss = tf.keras.losses.MeanSquaredError()
    optimizer = tf.optimizers.Adam(learning_rate = 0.001, decay = 0.0001, clipvalue = 0.5)

    loss_history = []  # save loss improvement

    data = np.array(data, dtype=np.float)

    for epoch in range(n_epochs):
    
        with tf.GradientTape() as tape:
            current_loss = loss(autoencoder(data), data)
        
        gradients = tape.gradient(current_loss, autoencoder.trainable_variables)    # get the gradient of the loss function
        optimizer.apply_gradients(zip(gradients, autoencoder.trainable_variables))  # update the weights
    
        loss_history.append(current_loss.numpy())  # save current loss in its history
    
        # show loss improvement every 200 epochs
        if (epoch+1) % 200 == 0:
            print(str(epoch+1) + '.\tLoss: ' + str(current_loss.numpy()))

    print('\nEncoding complete')
    return autoencoder

X_autoencoded = autoencoding(X)

# ENCODER EXTRACTION
def encoded(autoencoder, data):

    # create a Keras function
    extract_encoded_data = K.function(inputs = autoencoder.layers[0].input, 
                                  outputs = autoencoder.layers[1].output)
    # extract encoded dataframe
    encoded_dataframe = extract_encoded_data(data.values)
    encoded_data = pd.DataFrame(encoded_dataframe)
    return encoded_data

X_encoded = encoded(X_autoencoded, X)

# ELBOW METHOD AND SILHOUETTE SCORE
inertia =[]
sil =[]

for k in range(2,14):
    kmeans_rand = KMeans(n_clusters=k, init='k-means++', random_state=42)
    kmeans_rand.fit(X_encoded)
    y_pred = kmeans_rand.predict(X_encoded)

    inertia.append(kmeans_rand.inertia_)
    sil.append((k, silhouette_score(X_encoded, y_pred)))

sil_samples = silhouette_samples(X_encoded, y_pred)

fig, ax = plt.subplots(1, 2, figsize=(12,4))
ax[0].plot(range(2,14), inertia)
ax[0].set_title('Elbow Method')
ax[0].set_xlabel('Number of clusters')
ax[0].set_ylabel('Inertia')

x_sil = [x[0] for x in sil]
y_sil = [x[1] for x in sil]
ax[1].plot(x_sil, y_sil)
ax[1].set_xlabel('Number of Clusters')
ax[1].set_ylabel('Silhouetter Score')
ax[1].set_title('Silhouetter Score Curve')

尝试使用代码顶部的这一行来设置种子:

tf.random.set_seed(33)
os.environ['PYTHONHASHSEED'] = str(33)
np.random.seed(33)
random.seed(33)

session_conf = tf.compat.v1.ConfigProto(
    intra_op_parallelism_threads=1, 
    inter_op_parallelism_threads=1
)
sess = tf.compat.v1.Session(
    graph=tf.compat.v1.get_default_graph(), 
    config=session_conf
)
tf.compat.v1.keras.backend.set_session(sess)

我使用的是 tf.keras (TF 2.2) 没有 gpu,我每次 运行

都获得相同的结果

https://colab.research.google.com/drive/1S9iB7AsLLkdTAY827eOBN_VRRi2EVWRA?usp=sharing