Serializing/Compiling HMM 模型到 Java 中的文件

Serializing/Compiling HMM MODEL in to FILE in Java

我用自己的POS语料库成功评估了Lingpipe对POS Tagging的HMM实现(准确率超过90%)。

My Own POS Corpus 上用于评估 POS HMM 的 Ant 文件与 Brown POS Corpus 相同:

<target name="eval-brown"
        depends="compile">
  <java classname="EvaluatePos"
        fork="true"
        maxMemory="512M">
    <jvmarg value="-server"/>
    <classpath refid="classpath.standard"/>
    <arg value="1"/>                 <!-- sent eval rate -->
    <arg value="50000"/>            <!-- toks before eval -->
    <arg value="10"/>                <!-- max n-best -->
    <arg value="8"/>                 <!-- n-gram size -->
    <arg value="128"/>               <!-- num characters -->
    <arg value="8.0"/>               <!-- interpolation ratio -->
    <arg value="BrownPosCorpus"/>  <!-- corpus implementation class -->
    <arg value="${data.pos.brown}"/>    <!-- baseline for data -->
    <arg value="true"/>            <!--  smoothe tags -->
  </java>
</target>

用于评估 HMM 词性标注器的 Class 是:EvaluatePos.java 给出如下。

 public class EvaluatePos {

    final int mSentEvalRate;
    final int mToksBeforeEval;
    final int mMaxNBest;
    final int mNGram;
    final int mNumChars;
    final double mLambdaFactor;
    final PosCorpus mCorpus;

    final Set<String> mTagSet = new HashSet<String>();
    HmmCharLmEstimator mEstimator;
    TaggerEvaluator<String> mTaggerEvaluator;
    NBestTaggerEvaluator<String> mNBestTaggerEvaluator;
    MarginalTaggerEvaluator<String> mMarginalTaggerEvaluator;

    int mTrainingSentenceCount = 0;
    int mTrainingTokenCount = 0;

    public EvaluatePos(String[] args) throws Exception {
        mSentEvalRate = Integer.valueOf(args[0]);
        mToksBeforeEval = Integer.valueOf(args[1]);
        mMaxNBest = Integer.valueOf(args[2]);
        mNGram = Integer.valueOf(args[3]);
        mNumChars = Integer.valueOf(args[4]);
        mLambdaFactor = Double.valueOf(args[5]);
        String constructorName = args[6];
        File corpusFile = new File(args[7]);
        Object[] consArgs = new Object[] { corpusFile };
        @SuppressWarnings("rawtypes") // req 2 step
        PosCorpus corpus 
            = (PosCorpus) 
            Class
            .forName(constructorName)
            .getConstructor(new Class[] { File.class })
            .newInstance(consArgs);
        mCorpus = corpus;
    }

    void run() throws IOException {
        System.out.println("\nCOMMAND PARAMETERS:");
        System.out.println("  Sent eval rate=" + mSentEvalRate);
        System.out.println("  Toks before eval=" + mToksBeforeEval);
        System.out.println("  Max n-best eval=" + mMaxNBest);
        System.out.println("  Max n-gram=" + mNGram);
        System.out.println("  Num chars=" + mNumChars);
        System.out.println("  Lambda factor=" + mLambdaFactor);

        CorpusProfileHandler profileHandler = new CorpusProfileHandler();
        parseCorpus(profileHandler);
        String[] tags = mTagSet.toArray(Strings.EMPTY_STRING_ARRAY);
        Arrays.sort(tags);
        Set<String> tagSet = new HashSet<String>();
        for (String tag : tags)
            tagSet.add(tag);

        System.out.println("\nCORPUS PROFILE:");
        System.out.println("  Corpus class=" + mCorpus.getClass().getName());
        System.out.println("  #Sentences="
                           + mTrainingSentenceCount);
        System.out.println("  #Tokens=" + mTrainingTokenCount);
        System.out.println("  #Tags=" + tags.length);
        System.out.println("  Tags=" + Arrays.asList(tags));

        System.out.println("\nEVALUATION:");
        mEstimator
            = new HmmCharLmEstimator(mNGram,mNumChars,mLambdaFactor);
        for (int i = 0; i < tags.length; ++i)
            mEstimator.addState(tags[i]);

        HmmDecoder decoder
            = new HmmDecoder(mEstimator); // no caching
        boolean storeTokens = true;
        mTaggerEvaluator
            = new TaggerEvaluator<String>(decoder,storeTokens);
        mNBestTaggerEvaluator
            = new NBestTaggerEvaluator<String>(decoder,mMaxNBest,mMaxNBest);
        mMarginalTaggerEvaluator
            = new MarginalTaggerEvaluator<String>(decoder,tagSet,storeTokens);

        LearningCurveHandler evaluationHandler
            = new LearningCurveHandler();
        parseCorpus(evaluationHandler);

        System.out.println("\n\n\nFINAL REPORT");

        System.out.println("\n\nFirst Best Evaluation");
        System.out.println(mTaggerEvaluator.tokenEval());

        System.out.println("\n\nN Best Evaluation");
        System.out.println(mNBestTaggerEvaluator.nBestHistogram());

    }

    void parseCorpus(ObjectHandler<Tagging<String>> handler) throws IOException {
        Parser<ObjectHandler<Tagging<String>>> parser = mCorpus.parser();
        parser.setHandler(handler);
        Iterator<InputSource> it = mCorpus.sourceIterator();
        while (it.hasNext()) {
            InputSource in = it.next();
            parser.parse(in);
        }
    }

    class CorpusProfileHandler implements ObjectHandler<Tagging<String>> {
        public void handle(Tagging<String> tagging) {
            ++mTrainingSentenceCount;
            mTrainingTokenCount += tagging.size();
            for (int i = 0; i < tagging.size(); ++i)
                mTagSet.add(tagging.tag(i));
        }
    }

    class LearningCurveHandler implements ObjectHandler<Tagging<String>> {
        Set<String> mKnownTokenSet = new HashSet<String>();
        int mUnknownTokensTotal = 0;
        int mUnknownTokensCorrect = 0;
        public void handle(Tagging<String> tagging) {
            if (mEstimator.numTrainingTokens() > mToksBeforeEval
                && mEstimator.numTrainingCases() % mSentEvalRate == 0) {

                mTaggerEvaluator.handle(tagging);
                mNBestTaggerEvaluator.handle(tagging);
                mMarginalTaggerEvaluator.handle(tagging);
                System.out.println("\nTest Case "
                                   + mTaggerEvaluator.numCases());
                System.out.println("First Best Last Case Report");
                System.out.println(mTaggerEvaluator.lastCaseToString(mKnownTokenSet));
                System.out.println("N-Best Last Case Report");
                System.out.println(mNBestTaggerEvaluator.lastCaseToString(5));
                System.out.println("Marginal Last Case Report");
                System.out.println(mMarginalTaggerEvaluator.lastCaseToString(5));
                System.out.println("Cumulative Evaluation");
                System.out.print("    Estimator:  #Train Cases="
                                 + mEstimator.numTrainingCases());
                System.out.println(" #Train Toks="
                                   + mEstimator.numTrainingTokens());
                ConfusionMatrix tokenEval = mTaggerEvaluator.tokenEval().confusionMatrix();
                System.out.println("    First Best Accuracy (All Tokens) = "
                                   + tokenEval.totalCorrect() 
                                   + "/" + tokenEval.totalCount()
                                   + " = " + tokenEval.totalAccuracy());
                ConfusionMatrix unkTokenEval = mTaggerEvaluator.unknownTokenEval(mKnownTokenSet).confusionMatrix();
                mUnknownTokensTotal += unkTokenEval.totalCount();
                mUnknownTokensCorrect += unkTokenEval.totalCorrect();
                System.out.println("    First Best Accuracy (Unknown Tokens) = "
                                   + mUnknownTokensCorrect
                                   + "/" + mUnknownTokensTotal
                                   + " = " + (mUnknownTokensCorrect/(double)mUnknownTokensTotal));
            }
            // train after eval
            mEstimator.handle(tagging);
            for (int i = 0; i < tagging.size(); ++i)
                mKnownTokenSet.add(tagging.token(i));
        }
    }

    public static void main(String[] args)
        throws Exception {

        new EvaluatePos(args).run();
    }
}

我现在的问题是如何创建 HMM 模型文件以将其用作基于 Chain CRF 的 NER 中的一个特征。

The Lingpipe ../../models 文件夹中的 pos-en-general-brown.HiddenMarkovModel 是如何创建的?

我正在使用 BrownPosCorpus.java、BrownPosParser.java 和 EvaluatePos.java

创建pos hmm模型文件的以下代码应该放在哪里?

// write output to file
        File modelFile = new File(args[1]);
        AbstractExternalizable.compileTo(estimator,modelFile);

Ant 文件需要做哪些修改才能创建 pos hmm 模型文件?

我想将 POS HMM 模型文件用作 Chain CRF 特征提取器中的特征:

...
    static final File POS_HMM_FILE
        = new File("../../models/pos-en-general-brown.HiddenMarkovModel");
...

此致。

要保存经过训练的 HMM,只需像任何其他 Java 对象一样序列化经过训练的对象。

您需要为 HMM 编写特征提取器回调。

您可能希望将 HMM 和 CRF 打包到一个新的可序列化对象中。有一个 AbstractExternalizable 基础 class 使它更容易和更向前兼容。