写入 HDF5 并洗牌大数据阵列

Write to HDF5 and shuffle big arrays of data

我已经下载了Caltech101。它的结构是:

#Caltech101 dir #class1 dir #images of class1 jpgs #class2 dir #images of class2 jpgs ... #class100 dir #images of class100 jpgs

我的问题是我无法在内存中保存两个形状为 (9144, 240, 180, 3)(9144) 的 np 数组 xy。所以我的解决方案是过度分配一个 h5py 数据集,将它们分成 2 个块加载,然后一个接一个地写入文件。准确地说:

from __future__ import print_function
import os
import glob
from scipy.misc import imread, imresize
from sklearn.utils import shuffle
import numpy as np
import h5py
from time import time


def load_chunk(images_dset, labels_dset, chunk_of_classes, counter, type_key, prev_chunk_length):
    # getting images and processing
    xtmp = []
    ytmp = []
    for label in chunk_of_classes:
        img_list = sorted(glob.glob(os.path.join(dir_name, label, "*.jpg")))
        for img in img_list:
            img = imread(img, mode='RGB')
            img = imresize(img, (240, 180))
            xtmp.append(img)
            ytmp.append(label)
        print(label, 'done')

    x = np.concatenate([arr[np.newaxis] for arr in xtmp])
    y = np.array(ytmp, dtype=type_key)
    print('x: ', type(x), np.shape(x), 'y: ', type(y), np.shape(y))

    # writing to dataset
    a = time()
    images_dset[prev_chunk_length:prev_chunk_length+x.shape[0], :, :, :] = x
    print(labels_dset.shape)
    print(y.shape, y.shape[0])
    print(type(y), y.dtype)
    print(prev_chunk_length)
    labels_dset[prev_chunk_length:prev_chunk_length+y.shape[0]] = y
    b = time()
    print('Chunk', counter, 'written in', b-a, 'seconds')
    return prev_chunk_length+x.shape[0]


def write_to_file(remove_DS_Store):
    if os.path.isfile('caltech101.h5'):
        print('File exists already')
        return
    else:
        # the name of each dir is the name of a class
        classes = os.listdir(dir_name)
        if remove_DS_Store:
            classes.pop(0)  # removes .DS_Store - may not be used on other terminals

        # need the dtype of y in order to initialize h5 dataset
        s = ''
        key_type_y = s.join(['S', str(len(max(classes, key=len)))])
        classes = np.array(classes, dtype=key_type_y)

        # number of chunks in which the dataset must be divided
        nb_chunks = 2
        nb_chunks_loaded = 0
        prev_chunk_length = 0
        # open file and allocating a dataset
        f = h5py.File('caltech101.h5', 'a')
        imgs = f.create_dataset('images', shape=(9144, 240, 180, 3), dtype='uint8')
        labels = f.create_dataset('labels', shape=(9144,), dtype=key_type_y)
        for class_sublist in np.array_split(classes, nb_chunks):
            # loading chunk by chunk in a function to avoid memory overhead
            prev_chunk_length = load_chunk(imgs, labels, class_sublist, nb_chunks_loaded, key_type_y, prev_chunk_length)
            nb_chunks_loaded += 1
        f.close()
        print('Images and labels saved to \'caltech101.h5\'')
    return

dir_name = '../Datasets/Caltech101'
write_to_file(remove_DS_Store=True)

这很好用,而且阅读实际上也足够快。问题是我需要洗牌数据集。

观察:

你能不能想个办法在写之前洗牌?我也对重新考虑写作过程的解决方案持开放态度,只要它不使用大量内存即可。

您可以在读取图像数据之前打乱文件路径。

创建属于数据集的所有文件路径的列表,而不是在内存中混洗图像数据。然后打乱文件路径列表。现在您可以像以前一样创建 HDF5 数据库了。

例如,您可以使用 glob 创建用于随机播放的文件列表:

import glob
import random

files = glob.glob('../Datasets/Caltech101/*/*.jpg')
shuffeled_files = random.shuffle(files)

然后您可以从路径中检索 class 标签和图像名称:

import os

for file_path in shuffeled_files:
    label = os.path.basename(os.path.dirname(file_path))
    image_id = os.path.splitext(os.path.basename(file_path))[0]