OpenImaj - 识别形状

OpenImaj - identify a shape

我的目的是在训练 classifer 之后识别形状,类似于 The OpenIMAJ Tutorial http://openimaj.org/tutorial/classification101.html 第 12 章中所做的。 第 12 章使用 Caltech101 class 这对我没有帮助,因为我想使用我自己的一组图像来训练 classifier。我创建了这个基于第 12 章的工作代码:

package com.mycompany.video.analytics;

import de.bwaldvogel.liblinear.SolverType;
import org.openimaj.data.DataSource;
import org.openimaj.data.dataset.Dataset;
import org.openimaj.data.dataset.GroupedDataset;
import org.openimaj.data.dataset.ListDataset;
import org.openimaj.data.dataset.VFSGroupDataset;
import org.openimaj.experiment.dataset.sampling.GroupSampler;
import org.openimaj.experiment.dataset.sampling.GroupedUniformRandomisedSampler;
import org.openimaj.experiment.dataset.split.GroupedRandomSplitter;
import org.openimaj.experiment.evaluation.classification.ClassificationEvaluator;
import org.openimaj.experiment.evaluation.classification.ClassificationResult;
import org.openimaj.experiment.evaluation.classification.analysers.confusionmatrix.CMAnalyser;
import org.openimaj.experiment.evaluation.classification.analysers.confusionmatrix.CMResult;
import org.openimaj.feature.DoubleFV;
import org.openimaj.feature.FeatureExtractor;
import org.openimaj.feature.SparseIntFV;
import org.openimaj.feature.local.data.LocalFeatureListDataSource;
import org.openimaj.feature.local.list.LocalFeatureList;
import org.openimaj.image.FImage;
import org.openimaj.image.ImageUtilities;
import org.openimaj.image.feature.dense.gradient.dsift.ByteDSIFTKeypoint;
import org.openimaj.image.feature.dense.gradient.dsift.DenseSIFT;
import org.openimaj.image.feature.dense.gradient.dsift.PyramidDenseSIFT;
import org.openimaj.image.feature.local.aggregate.BagOfVisualWords;
import org.openimaj.image.feature.local.aggregate.BlockSpatialAggregator;
import org.openimaj.io.IOUtils;
import org.openimaj.ml.annotation.ScoredAnnotation;
import org.openimaj.ml.annotation.linear.LiblinearAnnotator;
import org.openimaj.ml.clustering.ByteCentroidsResult;
import org.openimaj.ml.clustering.assignment.HardAssigner;
import org.openimaj.ml.clustering.kmeans.ByteKMeans;
import org.openimaj.util.pair.IntFloatPair;

import java.io.File;
import java.io.IOException;
import java.util.ArrayList;
import java.util.Date;
import java.util.List;
import java.util.Map;

/**
 * Created by yschondorf on 5/29/2018.
 */
public class Chapter12Generic {
    private static String IMAGES_PATH = "C:\Development\Video Analytics\tpImages";

    public static void main(String[] args) {

        try {
            LiblinearAnnotator<FImage, String> trainer = null;
            VFSGroupDataset<FImage> allData = null;
            allData = new VFSGroupDataset<FImage>(
                    IMAGES_PATH,
                    ImageUtilities.FIMAGE_READER);

            GroupedDataset<String, ListDataset<FImage>, FImage> data =
                    GroupSampler.sample(allData, 1, false);

            GroupedRandomSplitter<String, FImage> splits =
                    new GroupedRandomSplitter<String, FImage>(data, 15, 0, 15); // 15 training, 15 testing


            DenseSIFT denseSIFT = new DenseSIFT(5, 7);
            PyramidDenseSIFT<FImage> pyramidDenseSIFT = new PyramidDenseSIFT<FImage>(denseSIFT, 6f, 7);

            GroupedDataset<String, ListDataset<FImage>, FImage> sample =
                    GroupedUniformRandomisedSampler.sample(splits.getTrainingDataset(), 15);

            HardAssigner<byte[], float[], IntFloatPair> assigner = trainQuantiser(sample, pyramidDenseSIFT);

            FeatureExtractor<DoubleFV, FImage> extractor = new PHOWExtractor(pyramidDenseSIFT, assigner);

            //
            // Now we’re ready to construct and train a classifier
            //
            trainer = new LiblinearAnnotator<FImage, String>(
                    extractor, LiblinearAnnotator.Mode.MULTICLASS, SolverType.L2R_L2LOSS_SVC, 1.0, 0.00001);

            Date start = new Date();
            System.out.println("Classifier training: start");
            trainer.train(splits.getTrainingDataset());
            System.out.println("Classifier training: end");
            Date end = new Date();
            long durationSec = (end.getTime() - start.getTime()) / 1000;
            System.out.println("Classifier training duration: " + durationSec + " seconds");

            final GroupedDataset<String, ListDataset<FImage>, FImage> testDataSet = splits.getTestDataset();

            ClassificationEvaluator<CMResult<String>, String, FImage> eval =
                    new ClassificationEvaluator<CMResult<String>, String, FImage>(
                            trainer, testDataSet, new CMAnalyser<FImage, String>(CMAnalyser.Strategy.SINGLE));

            start = new Date();
            System.out.println("Classifier evaluation: start");
            Map<FImage, ClassificationResult<String>> guesses = eval.evaluate();
            System.out.println("Classifier evaluation - tp: end");
            end = new Date();
            durationSec = (end.getTime() - start.getTime()) / 1000;
            System.out.println("Classifier evaluation duration: " + durationSec + " seconds");

            CMResult<String> result = eval.analyse(guesses);
            System.out.println("Result - tp: " + result);
        } catch (IOException e) {
            e.printStackTrace();
        }
    }

    /**
     * This method extracts the first 10000 dense SIFT features from the images in the dataset, and then clusters them
     * into 300 separate classes. The method then returns a HardAssigner which can be used to assign SIFT features to
     * identifiers
     *
     * @param pyramidDenseSIFT
     * @return
     */
    static HardAssigner<byte[], float[], IntFloatPair> trainQuantiser(
            Dataset<FImage> sample,
//            VFSGroupDataset<FImage> trainingImages,
            PyramidDenseSIFT<FImage> pyramidDenseSIFT)
    {
        System.out.println("trainQuantiser: start");
        Date start = new Date();
        List<LocalFeatureList<ByteDSIFTKeypoint>> allKeys = new ArrayList<LocalFeatureList<ByteDSIFTKeypoint>>();

        int i = 0;

        int total = sample.numInstances();
//        for (FImage image: sample) {
//            ListDataset<FImage> images = trainingImages.get(key);
//            total = images.size();
//            break;
//        }
        for (FImage rec : sample) {
            i++;
            System.out.println(String.format("Analysing image %d out of %d", i, total));
            FImage img = rec.getImage();

            pyramidDenseSIFT.analyseImage(img);
            allKeys.add(pyramidDenseSIFT.getByteKeypoints(0.005f));
        }
        final int numberOfDenseSiftFeaturesToExtract = 10000;
        final int numberOfClassesInCluster = 300;
        if (allKeys.size() > numberOfDenseSiftFeaturesToExtract)
            allKeys = allKeys.subList(0, numberOfDenseSiftFeaturesToExtract);

        ByteKMeans km = ByteKMeans.createKDTreeEnsemble(numberOfClassesInCluster);
        DataSource<byte[]> dataSource = new LocalFeatureListDataSource<ByteDSIFTKeypoint, byte[]>(allKeys);
        System.out.println(String.format(
                "Clustering %d image features into %d classes...",
                numberOfDenseSiftFeaturesToExtract, numberOfClassesInCluster));
        ByteCentroidsResult result = km.cluster(dataSource);
        Date end = new Date();
        System.out.println("trainQuantiser: end");
        System.out.println("trainQuantiser duration: " + (end.getTime() - start.getTime())/1000 + " seconds");
        return result.defaultHardAssigner();
    }

    static class PHOWExtractor implements FeatureExtractor<DoubleFV, FImage> {
        PyramidDenseSIFT<FImage> pdsift;
        HardAssigner<byte[], float[], IntFloatPair> assigner;

        public PHOWExtractor(PyramidDenseSIFT<FImage> pdsift, HardAssigner<byte[], float[], IntFloatPair> assigner)
        {
            this.pdsift = pdsift;
            this.assigner = assigner;
        }

        public DoubleFV extractFeature(FImage object) {
            FImage image = object.getImage();
            pdsift.analyseImage(image);

            BagOfVisualWords<byte[]> bovw = new BagOfVisualWords<byte[]>(assigner);

            BlockSpatialAggregator<byte[], SparseIntFV> spatial = new BlockSpatialAggregator<byte[], SparseIntFV>(
                    bovw, 2, 2);

            return spatial.aggregate(pdsift.getByteKeypoints(0.015f), image.getBounds()).normaliseFV();
        }
    }
}

代码运行并产生以下输出:

trainQuantiser: start
Analysing image 1 out of 15
Analysing image 2 out of 15
Analysing image 3 out of 15
Analysing image 4 out of 15
Analysing image 5 out of 15
Analysing image 6 out of 15
Analysing image 7 out of 15
Analysing image 8 out of 15
Analysing image 9 out of 15
Analysing image 10 out of 15
Analysing image 11 out of 15
Analysing image 12 out of 15
Analysing image 13 out of 15
Analysing image 14 out of 15
Analysing image 15 out of 15
Clustering 10000 image features into 300 classes...
trainQuantiser: end
trainQuantiser duration: 243 seconds
Classifier training: start
iter  1 act 6.283e-01 pre 6.283e-01 delta 1.096e+00 f 1.500e+01 |g| 1.146e+00 CG   1
iter  2 act 2.779e-05 pre 2.779e-05 delta 1.096e+00 f 1.437e+01 |g| 7.555e-03 CG   1
iter  3 act 2.175e-09 pre 2.175e-09 delta 1.096e+00 f 1.437e+01 |g| 6.702e-05 CG   1
iter  4 act 6.626e-13 pre 6.598e-13 delta 1.096e+00 f 1.437e+01 |g| 1.164e-06 CG   1
Classifier training: end
Classifier training duration: 28 seconds
Classifier evaluation: start
Classifier evaluation - tp: end
Classifier evaluation duration: 57 seconds
Result - tp:   Accuracy: 1.000
Error Rate: 0.000

我不确定我将如何离开这里。我真正想要的不是评估 classifier 的准确性 - 正如第 12 章所做的那样 - 而是使用 classifier 来确定新图像是否具有我的形状感兴趣。我没有找到说明如何执行此操作的文档或示例。非常感谢任何帮助。

除了教程,我没有找到任何重要的文档。谁能指出我在哪里?同时我只是在猜测。 我不能使用 testDataset,因为在训练 classifier 和使用它之间需要分开。所以我想训练 classifer 一次(需要很长时间 - 很多分钟)并保存结果(比如将上面的训练器对象序列化到磁盘并在以后的调用中反序列化它)。当我添加代码来执行此操作并尝试在新图像上使用 testDataset 时,我得到一个空指针异常。该异常与反序列化对象无关,因为当对象尚未在磁盘上时我也会收到异常。 新代码:

package com.mycompany.video.analytics;

import de.bwaldvogel.liblinear.SolverType;
import org.apache.commons.vfs2.FileSystemException;
import org.openimaj.data.DataSource;
import org.openimaj.data.dataset.Dataset;
import org.openimaj.data.dataset.GroupedDataset;
import org.openimaj.data.dataset.ListDataset;
import org.openimaj.data.dataset.VFSGroupDataset;
import org.openimaj.experiment.dataset.sampling.GroupSampler;
import org.openimaj.experiment.dataset.sampling.GroupedUniformRandomisedSampler;
import org.openimaj.experiment.dataset.split.GroupedRandomSplitter;
import org.openimaj.experiment.evaluation.classification.ClassificationEvaluator;
import org.openimaj.experiment.evaluation.classification.ClassificationResult;
import org.openimaj.experiment.evaluation.classification.analysers.confusionmatrix.CMAnalyser;
import org.openimaj.experiment.evaluation.classification.analysers.confusionmatrix.CMResult;
import org.openimaj.feature.DoubleFV;
import org.openimaj.feature.FeatureExtractor;
import org.openimaj.feature.SparseIntFV;
import org.openimaj.feature.local.data.LocalFeatureListDataSource;
import org.openimaj.feature.local.list.LocalFeatureList;
import org.openimaj.image.FImage;
import org.openimaj.image.ImageUtilities;
import org.openimaj.image.feature.dense.gradient.dsift.ByteDSIFTKeypoint;
import org.openimaj.image.feature.dense.gradient.dsift.DenseSIFT;
import org.openimaj.image.feature.dense.gradient.dsift.PyramidDenseSIFT;
import org.openimaj.image.feature.local.aggregate.BagOfVisualWords;
import org.openimaj.image.feature.local.aggregate.BlockSpatialAggregator;
import org.openimaj.io.IOUtils;
import org.openimaj.ml.annotation.ScoredAnnotation;
import org.openimaj.ml.annotation.linear.LiblinearAnnotator;
import org.openimaj.ml.clustering.ByteCentroidsResult;
import org.openimaj.ml.clustering.assignment.HardAssigner;
import org.openimaj.ml.clustering.kmeans.ByteKMeans;
import org.openimaj.util.pair.IntFloatPair;

import java.io.File;
import java.io.IOException;
import java.util.ArrayList;
import java.util.Date;
import java.util.List;
import java.util.Map;


public class Chapter12Generic {
    private static String IMAGES_PATH = "C:\Development\Video Analytics\tpImages";
    private static String TEST_IMAGES_PATH = "C:\Development\Video Analytics\testImages";
    private static String TRAINER_DATA_FILE_PATH = "C:\Development\Video Analytics\out\trainer.dat";

    public static void main(String[] args) throws Exception {

        LiblinearAnnotator<FImage, String> trainer = null;
        File inputDataFile = new File(TRAINER_DATA_FILE_PATH);
        if (inputDataFile.isFile()) {
            trainer = IOUtils.readFromFile(inputDataFile);
        } else {
            VFSGroupDataset<FImage> allData = null;
            allData = new VFSGroupDataset<FImage>(
                    IMAGES_PATH,
                    ImageUtilities.FIMAGE_READER);

            GroupedDataset<String, ListDataset<FImage>, FImage> data =
                    GroupSampler.sample(allData, 1, false);

            GroupedRandomSplitter<String, FImage> splits =
                    new GroupedRandomSplitter<String, FImage>(data, 15, 0, 15); // 15 training, 15 testing


            DenseSIFT denseSIFT = new DenseSIFT(5, 7);
            PyramidDenseSIFT<FImage> pyramidDenseSIFT = new PyramidDenseSIFT<FImage>(denseSIFT, 6f, 7);

            GroupedDataset<String, ListDataset<FImage>, FImage> sample =
                    GroupedUniformRandomisedSampler.sample(splits.getTrainingDataset(), 15);

            HardAssigner<byte[], float[], IntFloatPair> assigner = trainQuantiser(sample, pyramidDenseSIFT);

            FeatureExtractor<DoubleFV, FImage> extractor = new PHOWExtractor(pyramidDenseSIFT, assigner);

            //
            // Now we’re ready to construct and train a classifier
            //
            trainer = new LiblinearAnnotator<FImage, String>(
                    extractor, LiblinearAnnotator.Mode.MULTICLASS, SolverType.L2R_L2LOSS_SVC, 1.0, 0.00001);

            Date start = new Date();
            System.out.println("Classifier training: start");
            trainer.train(splits.getTrainingDataset());
            IOUtils.writeToFile(trainer, inputDataFile);

            System.out.println("Classifier training: end");
            Date end = new Date();
            long durationSec = (end.getTime() - start.getTime()) / 1000;
            System.out.println("Classifier training duration: " + durationSec + " seconds");
        }

//        final GroupedDataset<String, ListDataset<FImage>, FImage> testDataSet = splits.getTestDataset();
        VFSGroupDataset<FImage> testDataSet = new VFSGroupDataset<FImage>(
                TEST_IMAGES_PATH,
                ImageUtilities.FIMAGE_READER);

        ClassificationEvaluator<CMResult<String>, String, FImage> eval =
                new ClassificationEvaluator<CMResult<String>, String, FImage>(
                        trainer, testDataSet, new CMAnalyser<FImage, String>(CMAnalyser.Strategy.SINGLE));

        Date start = new Date();
        System.out.println("Classifier evaluation: start");
        Map<FImage, ClassificationResult<String>> guesses = eval.evaluate();
        System.out.println("Classifier evaluation - tp: end");
        Date end = new Date();
        long durationSec = (end.getTime() - start.getTime()) / 1000;
        System.out.println("Classifier evaluation duration: " + durationSec + " seconds");

        CMResult<String> result = eval.analyse(guesses);
        System.out.println("Result - tp: " + result);      
    }

    /**
     * This method extracts the first 10000 dense SIFT features from the images in the dataset, and then clusters them
     * into 300 separate classes. The method then returns a HardAssigner which can be used to assign SIFT features to
     * identifiers
     *
     * @param pyramidDenseSIFT
     * @return
     */
    static HardAssigner<byte[], float[], IntFloatPair> trainQuantiser(
            Dataset<FImage> sample,
//            VFSGroupDataset<FImage> trainingImages,
            PyramidDenseSIFT<FImage> pyramidDenseSIFT)
    {
        System.out.println("trainQuantiser: start");
        Date start = new Date();
        List<LocalFeatureList<ByteDSIFTKeypoint>> allKeys = new ArrayList<LocalFeatureList<ByteDSIFTKeypoint>>();

        int i = 0;

        int total = sample.numInstances();
//        for (FImage image: sample) {
//            ListDataset<FImage> images = trainingImages.get(key);
//            total = images.size();
//            break;
//        }
        for (FImage rec : sample) {
            i++;
            System.out.println(String.format("Analysing image %d out of %d", i, total));
            FImage img = rec.getImage();

            pyramidDenseSIFT.analyseImage(img);
            allKeys.add(pyramidDenseSIFT.getByteKeypoints(0.005f));
        }
        final int numberOfDenseSiftFeaturesToExtract = 10000;
        final int numberOfClassesInCluster = 300;
        if (allKeys.size() > numberOfDenseSiftFeaturesToExtract)
            allKeys = allKeys.subList(0, numberOfDenseSiftFeaturesToExtract);

        ByteKMeans km = ByteKMeans.createKDTreeEnsemble(numberOfClassesInCluster);
        DataSource<byte[]> dataSource = new LocalFeatureListDataSource<ByteDSIFTKeypoint, byte[]>(allKeys);
        System.out.println(String.format(
                "Clustering %d image features into %d classes...",
                numberOfDenseSiftFeaturesToExtract, numberOfClassesInCluster));
        ByteCentroidsResult result = km.cluster(dataSource);
        Date end = new Date();
        System.out.println("trainQuantiser: end");
        System.out.println("trainQuantiser duration: " + (end.getTime() - start.getTime())/1000 + " seconds");
        return result.defaultHardAssigner();
    }

    static class PHOWExtractor implements FeatureExtractor<DoubleFV, FImage> {
        PyramidDenseSIFT<FImage> pdsift;
        HardAssigner<byte[], float[], IntFloatPair> assigner;

        public PHOWExtractor(PyramidDenseSIFT<FImage> pdsift, HardAssigner<byte[], float[], IntFloatPair> assigner)
        {
            this.pdsift = pdsift;
            this.assigner = assigner;
        }

        public DoubleFV extractFeature(FImage object) {
            FImage image = object.getImage();
            pdsift.analyseImage(image);

            BagOfVisualWords<byte[]> bovw = new BagOfVisualWords<byte[]>(assigner);

            BlockSpatialAggregator<byte[], SparseIntFV> spatial = new BlockSpatialAggregator<byte[], SparseIntFV>(
                    bovw, 2, 2);

            return spatial.aggregate(pdsift.getByteKeypoints(0.015f), image.getBounds()).normaliseFV();
        }
    }
}

异常:

Exception in thread "main" java.lang.reflect.InvocationTargetException
    at sun.reflect.NativeMethodAccessorImpl.invoke0(Native Method)
    at sun.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:62)
    at sun.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43)
    at java.lang.reflect.Method.invoke(Method.java:497)
    at com.intellij.rt.execution.CommandLineWrapper.main(CommandLineWrapper.java:130)
Caused by: java.lang.NullPointerException
    at org.openimaj.experiment.evaluation.classification.analysers.confusionmatrix.CMAnalyser$Strategy.add(CMAnalyser.java:80)
    at org.openimaj.experiment.evaluation.classification.analysers.confusionmatrix.CMAnalyser.analyse(CMAnalyser.java:172)
    at org.openimaj.experiment.evaluation.classification.analysers.confusionmatrix.CMAnalyser.analyse(CMAnalyser.java:57)
    at org.openimaj.experiment.evaluation.classification.ClassificationEvaluator.analyse(ClassificationEvaluator.java:190)
    at com.mycompany.video.analytics.Chapter12Generic.main(Chapter12Generic.java:113)

调用

时发生异常
CMResult<String> result = eval.analyse(guesses);

有什么解决办法吗?

根据@jon 的回答添加代码的版本 3。现在的问题是它 class 将错误的图像识别为真实的图像。

public class Chapter12Generic_v3 {

    // contains an accordion folder with images from caltech101
    private static String TRAINING_IMAGES_PATH = "C:\Development\Video Analytics\images";

    // contains 1 airplane image from caltech101
    private static String TEST_IMAGE = "C:\Development\Video Analytics\testImages\falseImages\image_0001.jpg";

    private static String TRAINER_DATA_FILE_PATH = "C:\Development\Video Analytics\out\trainer.dat";

    public static void main(String[] args) throws Exception {
        LiblinearAnnotator<FImage, String> trainer = null;
        File inputDataFile = new File(TRAINER_DATA_FILE_PATH);
        if (inputDataFile.isFile()) {
            trainer = IOUtils.readFromFile(inputDataFile);
        } else {
            VFSGroupDataset<FImage> allData = null;
            allData = new VFSGroupDataset<FImage>(
                    TRAINING_IMAGES_PATH,
                    ImageUtilities.FIMAGE_READER);

            GroupedDataset<String, ListDataset<FImage>, FImage> data =
                    GroupSampler.sample(allData, 1, false);

            GroupedRandomSplitter<String, FImage> splits =
                    new GroupedRandomSplitter<String, FImage>(data, 15, 0, 15); // 15 training, 15 testing


            DenseSIFT denseSIFT = new DenseSIFT(5, 7);
            PyramidDenseSIFT<FImage> pyramidDenseSIFT = new PyramidDenseSIFT<FImage>(denseSIFT, 6f, 7);

            GroupedDataset<String, ListDataset<FImage>, FImage> sample =
                    GroupedUniformRandomisedSampler.sample(splits.getTrainingDataset(), 15);

            HardAssigner<byte[], float[], IntFloatPair> assigner = trainQuantiser(sample, pyramidDenseSIFT);

            FeatureExtractor<DoubleFV, FImage> extractor = new PHOWExtractor(pyramidDenseSIFT, assigner);

            //
            // Now we’re ready to construct and train a classifier
            //
            trainer = new LiblinearAnnotator<FImage, String>(
                    extractor, LiblinearAnnotator.Mode.MULTICLASS, SolverType.L2R_L2LOSS_SVC, 1.0, 0.00001);

            Date start = new Date();
            System.out.println("Classifier training: start");
            trainer.train(splits.getTrainingDataset());
            IOUtils.writeToFile(trainer, new File(TRAINER_DATA_FILE_PATH));
            System.out.println("Classifier training: end");
            Date end = new Date();
            long durationSec = (end.getTime() - start.getTime()) / 1000;
            System.out.println("Classifier training duration: " + durationSec + " seconds");
        }


        FImage query = ImageUtilities.readF(new File(TEST_IMAGE));

        final List<ScoredAnnotation<String>> scoredAnnotations = trainer.annotate(query);
        final ClassificationResult<String> classificationResult = trainer.classify(query);
        System.out.println("scoredAnnotations: " + scoredAnnotations);
        System.out.println("classificationResult: " + classificationResult);
    }

    /**
     * This method extracts the first 10000 dense SIFT features from the images in the dataset, and then clusters them
     * into 300 separate classes. The method then returns a HardAssigner which can be used to assign SIFT features to
     * identifiers
     *
     * @param pyramidDenseSIFT
     * @return
     */
    static HardAssigner<byte[], float[], IntFloatPair> trainQuantiser(
            Dataset<FImage> sample,
            PyramidDenseSIFT<FImage> pyramidDenseSIFT)
    {
        System.out.println("trainQuantiser: start");
        Date start = new Date();
        List<LocalFeatureList<ByteDSIFTKeypoint>> allKeys = new ArrayList<LocalFeatureList<ByteDSIFTKeypoint>>();

        int i = 0;

        int total = sample.numInstances();
        for (FImage rec : sample) {
            i++;
            System.out.println(String.format("Analysing image %d out of %d", i, total));
            FImage img = rec.getImage();

            pyramidDenseSIFT.analyseImage(img);
            allKeys.add(pyramidDenseSIFT.getByteKeypoints(0.005f));
        }
        final int numberOfDenseSiftFeaturesToExtract = 10000;
        final int numberOfClassesInCluster = 300;
        if (allKeys.size() > numberOfDenseSiftFeaturesToExtract)
            allKeys = allKeys.subList(0, numberOfDenseSiftFeaturesToExtract);

        ByteKMeans km = ByteKMeans.createKDTreeEnsemble(numberOfClassesInCluster);
        DataSource<byte[]> dataSource = new LocalFeatureListDataSource<ByteDSIFTKeypoint, byte[]>(allKeys);
        System.out.println(String.format(
                "Clustering %d image features into %d classes...",
                numberOfDenseSiftFeaturesToExtract, numberOfClassesInCluster));
        ByteCentroidsResult result = km.cluster(dataSource);
        Date end = new Date();
        System.out.println("trainQuantiser: end");
        System.out.println("trainQuantiser duration: " + (end.getTime() - start.getTime())/1000 + " seconds");
        return result.defaultHardAssigner();
    }

    static class PHOWExtractor implements FeatureExtractor<DoubleFV, FImage> {
        PyramidDenseSIFT<FImage> pdsift;
        HardAssigner<byte[], float[], IntFloatPair> assigner;

        public PHOWExtractor(PyramidDenseSIFT<FImage> pdsift, HardAssigner<byte[], float[], IntFloatPair> assigner)
        {
            this.pdsift = pdsift;
            this.assigner = assigner;
        }

        public DoubleFV extractFeature(FImage object) {
            FImage image = object.getImage();
            pdsift.analyseImage(image);

            BagOfVisualWords<byte[]> bovw = new BagOfVisualWords<byte[]>(assigner);

            BlockSpatialAggregator<byte[], SparseIntFV> spatial = new BlockSpatialAggregator<byte[], SparseIntFV>(
                    bovw, 2, 2);

            return spatial.aggregate(pdsift.getByteKeypoints(0.015f), image.getBounds()).normaliseFV();
        }
    }
}

如果你只想用你训练过的模型对事物进行分类,那么忽略所有 ClassificationEvaluator 的东西——它只是为了计算准确性等。

看看 http://openimaj.org/apidocs/org/openimaj/ml/annotation/linear/LiblinearAnnotator.htmltrainer 对象的类型)。由于您的 trainer 实例是在 FImageString 上键入的,因此其 annotate()classify() 方法都将接受您提供的 FImage 作为输入和提供分类结果作为输出(形式略有不同;您必须决定最适合您的需求)。