在 kmeans python 中分别从集群中提取图像

extract images from clusters separately in kmeans python

我已经对图像数据集进行了 K 均值聚类,之后我有 5 个聚类。现在我想从每个集群中提取图像并分别保存。我不知道该怎么做。我试过这样做,但我无法访问图像。

这是我的代码

import matplotlib.pyplot
import matplotlib.pyplot as plt
import pandas as pd
from sklearn.externals import joblib
import numpy as np
import cv2 
import sys
import pickle
from sklearn.manifold import TSNE
from sklearn.decomposition import PCA
import os 
from skimage.feature import local_binary_pattern 
# To calculate a normalized histogram
from scipy.stats import itemfreq 
from sklearn.preprocessing import normalize 
import cvutils 
import csv 
import numpy 
from matplotlib.pyplot import imshow
from PIL import Image
import time
from sklearn.cluster import KMeans

start_time=time.time()
  ############################################################################################
dir_unknown = 'UntitledFolder'
trainingSet='/home/irum/Desktop/Face-Recognition/thakarrecog            /UntitledFolder/UntitledFolder1'
imageLabels='/home/irum/Desktop/Face-Recognition/thakarrecog/class_train'
path='/home/irum/Desktop/Face-Recognition/thakarrecog/Clusters'
#Create CSV File
images_names = []
SEPARATOR=" "
print"start"
'''
for (dirname, dirnames, filenames) in os.walk(dir_unknown):
    for subdirname in dirnames:
        subject_path = os.path.join(dirname, subdirname)
        for filename in os.listdir(subject_path):
        abs_path = "%s/%s" % (subject_path, filename)

        #csv_path = "%s%s%d" % (abs_path, SEPARATOR, label)
        #print "%s%s%d" % (abs_path, SEPARATOR, label)
        images_names.append("%s%s%d" % (abs_path, SEPARATOR, label))
        #print images_names 
        with open('class_train1', 'w') as myfile:
               wr = csv.writer(myfile,delimiter=' ', doublequote=False , quotechar=None, lineterminator='\r\n', skipinitialspace=True)
           wr.writerow(imageLabels)
        label = label + 1
'''
# Store the path of training images in train_images
train_images = cvutils.imlist(trainingSet)
print "Total Images",len(train_images)

# Dictionary containing image paths as keys and corresponding label as value
train_dic = {}
with open('/home/irum/Desktop/Face-Recognition/thakarrecog/class_train', 'rb') as csvfile:
    reader = csv.reader(csvfile, delimiter=' ')
    for row in reader:
        train_dic[row[0]] = row[1]

# List for storing the LBP Histograms, address of images and the corresponding label 
X_test = []
X_name = []
y_test = []

print"Calculating LBP Histograms"
h1 = time.time()
# For each image in the training set calculate the LBP histogram
# and update X_test, X_name and y_test
for train_image in train_images:
    # Read the image
    im = cv2.imread(train_image)

    # Convert to grayscale as LBP works on grayscale image
    im_gray = cv2.cvtColor(im, cv2.COLOR_BGR2GRAY)

    radius = 3
    # Number of points to be considered as neighbourers 
    no_points = 8 * radius

    # Uniform LBP is used
    lbp = local_binary_pattern(im_gray, no_points, radius, method='uniform')

    # Calculate the histogram
    x = itemfreq(lbp.ravel())


    # Normalize the histogram
    hist = x[:, 1]/sum(x[:, 1])


    # Append image path in X_name
    X_name.append(os.path.join(train_image))

    # Append histogram to X_name
    X_test.append(os.path.join(hist))

    # Append class label in y_test
    #y_test.append(train_dic[os.path.split(images_names)[1]])

h2 = time.time()
t = (h2 - h1)
print"Time taken by LBPH",t

# Dump the  data
joblib.dump((X_name, X_test), "lbp.pkl", compress=3)

p1 = time.time()
print"Applying PCA on LBP Histograms"
X_test = np.array(X_test)
pca = PCA(n_components=26)
pca.fit(X_test)
pca_activations = pca.transform(X_test)
p2 = time.time()
t = (p2 - p1)
print"Time taken by PCA",t

t1 = time.time()
print"Applying t-SNE on PCA"
# then run the PCA-projected activations through t-SNE to get our final embedding
X = np.array(pca_activations)
tsne = TSNE(n_components=2, learning_rate=500, perplexity=50, verbose=2, angle=0.2, early_exaggeration=7.0).fit_transform(X)
print "t-SNE Type", type(tsne)
print"tsne",tsne
t2 = time.time()
t = (t2 - t1)
print"Time taken by t-SNE",t

n1 = time.time()
print"normalize t-sne points to {0,1}"
tx, ty = tsne[:,0], tsne[:,1]
tx = (tx-np.min(tx)) / (np.max(tx) - np.min(tx))
ty = (ty-np.min(ty)) / (np.max(ty) - np.min(ty))

n2 = time.time()
t = (n2 - n1)
print "Normalization completed in time",t

width = 5000
height = 5000
max_dim = 100

print "displaying"
full_image = Image.new('RGB', (width, height))
for img, x, y in zip(X_name, tx, ty):
    #print "for loop"
    tile = Image.open(img)
    rs = max(1, tile.width/max_dim, tile.height/max_dim)
    tile = tile.resize((tile.width/rs, tile.height/rs), Image.ANTIALIAS)
    full_image.paste(tile, (int((width-max_dim)*x), int((height-max_dim)*y)))
full_image.save("myTSNE.png")

#matplotlib.pyplot.figure(figsize = (12,12))
#plt.imshow(full_image)

print "K-Means clustering"
#Convert Images to Float32 
images = np.asarray(tsne, np.float32)
N = len(images)
images = images.reshape(N,-1)

#using kmeans clustring having 5 clusters
kmeans = KMeans(n_clusters=5)

#passing images to kmeans 
kmeans.fit(images)

centroids = kmeans.cluster_centers_
labels = kmeans.labels_

colors = 10*['r.','g.','b.','c.','k.','y.','m.']

#I want to Move each cluster to seperate folder (5 clusters means 5 folders)

for i in range(len(images)):
    print("coordinate:",images[i], "label:", labels[i])
    plt.plot(images[i][0], images[i][1], colors[labels[i]], markersize = 10)

    img = cv2.convertScaleAbs(images[i])
    print "Images Type", img.dtype

    pin=sorted([int(n[:n.find('.')]) for n in os.listdir(path)
               if n[0]!='.' ]+[0])[-1] + 1
        cv2.imwrite('%s/%s.png' % (path, pin), img)




plt.scatter(centroids[:, 0],centroids[:, 1], marker = "x", s=150, linewidths = 5, zorder = 10)

plt.show()

end_time=time.time()
total_time=t = (end_time - start_time)
print"Total execution time in seconds",total_time

我想在这里提取集群,但失败了。我需要单独聚类的图像作为输出,以便我可以进一步操作它们。

`#我想将每个簇移动到单独的文件夹(5 个簇意味着 5 个文件夹)

For i in range(len(images)):
    print("coordinate:",images[i], "label:", labels[i])
    plt.plot(images[i][0], images[i][1], colors[labels[i]], markersize = 10)

    img = cv2.convertScaleAbs(images[i])
    print "Images Type", img.dtype

`

我想要红色簇中的图像分开,蓝色簇中的图像分开等等,实际上在不同的文件夹中。 5 个集群 5 个文件夹。

我访问过这样的图片:

for i,j in zip(images, labels):
    if labels[j] == 1:
        #print "Images Type", images.dtype
        img = images[i]
        pin=sorted([int(n[:n.find('.')]) for n in os.listdir(path)
                if n[0]!='.' ]+[0])[-1] + 1
        cv2.imwrite('%s/%s.png' % (path, pin), img)

但我得到的图像变形且尺寸非常小。 我得到这样的输出

像这样的图片

从您的代码看来,您的图像 images 和变量 labels 是一个具有相同维度的数组,包含 class 标签。

如果您想获取名为 myclass 的 class 的所有图像,那么只需执行以下操作:

images_in_myclass = [i for i,j in zip(images, labels) where j=='myclass']

zip 允许您按元素迭代两个数组,并且您只返回满足标签条件的图像。

在您的代码中,images 不包含图片。

这是一个坐标数组:

images = np.asarray(tsne, np.float32)

将坐标数组写入图像文件当然会产生这么小的故障。如果要原图,请复制原图文件。

请注意,tSNE 是一种可视化技术。将这种可视化用于聚类可能不是一个好主意;正如 Van der Maaten 和 Hinton 所说:"it is unclear how t-SNE performs on the more general dimensionality reduction tasks"。对于聚类,使用原始数据(以及比 k-means 更好的算法)可能是明智的;并仅将 tSNE 用于可视化和验证结果。