如何使用 'iterable' 对象构建 Doc2Vec 模型

How build Doc2Vec model by useing an 'iterable' object

由于我在 this page. Then, I wrote the second code to have an iterable alldocs, not an all-in-memory alldocs. I changed my code based on the explanation of this page 中提出的问题,我的代码 运行 内存不足。我不熟悉流概念,我无法解决我遇到的错误。

此代码读取给定 path.The 的所有文件夹的所有文件每个文件的上下文由两个 lines.For 实例中的文档名称及其上下文组成:

clueweb09-en0010-07-00000

dove gif clipart pigeon clip art picture image hiox free birds india web icons clipart add stumble upon

clueweb09-en0010-07-00001

google bookmarks yahoo bookmarks php script java script jsp script licensed scripts html tutorials css tutorials

第一个代码:

# coding: utf-8
 import string
 import nltk
 import nltk.tokenize 
 from nltk.corpus import stopwords
 import re
 import os, sys 

 import MySQLRepository

 from gensim import utils
 from gensim.models.doc2vec import Doc2Vec
 import gensim.models.doc2vec
 from gensim.models.doc2vec import LabeledSentence
 from boto.emr.emrobject import KeyValue


 def readAllFiles(path):
    dirs = os.listdir( path )
    for file in dirs:
        if os.path.isfile(path+"/"+file):
           prepareDoc2VecSetting(path+'/'+file)
       else:
           pf=path+"/"+file
           readAllFiles(pf)      

def prepareDoc2VecSetting (fname):
    mapDocName_Id=[]
    keyValues=set()
   with open(fname) as alldata:
        a= alldata.readlines()
        end=len(a)
        label=0
        tokens=[]
        for i in range(0,end):
            if a[i].startswith('clueweb09-en00'):
               mapDocName_Id.insert(label,a[i])
               label=label+1
               alldocs.append(LabeledSentence(tokens[:],[label]))
               keyValues |= set(tokens)
               tokens=[]
           else:
               tokens=tokens+a[i].split()  

   mydb.insertkeyValueData(keyValues) 

   mydb.insertDocId(mapDocName_Id)


   mydb=MySQLRepository.MySQLRepository()

  alldocs = [] 
  pth='/home/flr/Desktop/newInput/tokens'
  readAllFiles(ipth)

  model = Doc2Vec(alldocs, size = 300, window = 5, min_count = 2, workers = 4)
  model.save(pth+'/my_model.doc2vec')

第二个代码:(我没有考虑与DB​​相关的部分)

import gensim
import os


from gensim.models.doc2vec import Doc2Vec
import gensim.models.doc2vec
from gensim.models.doc2vec import LabeledSentence



class prepareAllDocs(object):

    def __init__(self, top_dir):
        self.top_dir = top_dir

    def __iter__(self):
    mapDocName_Id=[]
    label=1
    for root, dirs, files in os.walk(top_directory):
        for fname in files:
            print fname
            inputs=[]
            tokens=[]
            with open(os.path.join(root, fname)) as f:
                for i, line in enumerate(f):          
                    if line.startswith('clueweb09-en00'):
                        mapDocName_Id.append(line)
                        if tokens:
                            yield LabeledSentence(tokens[:],[label])
                            label+=1
                            tokens=[]
                    else:
                        tokens=tokens+line.split()
                yield LabeledSentence(tokens[:],[label])

pth='/home/flashkar/Desktop/newInput/tokens/'
allDocs = prepareAllDocs('/home/flashkar/Desktop/newInput/tokens/')
for doc in allDocs:
    model = Doc2Vec(allDocs, size = 300, window = 5, min_count = 2, workers = 4)
model.save(pth+'/my_model.doc2vec')

这是错误:

Traceback (most recent call last): File "/home/flashkar/git/doc2vec_annoy/Doc2Vec_Annoy/KNN/testiterator.py", line 44, in model = Doc2Vec(allDocs, size = 300, window = 5, min_count = 2, >workers = 4) File "/home/flashkar/anaconda/lib/python2.7/site->packages/gensim/models/doc2vec.py", line 618, in init self.build_vocab(documents, trim_rule=trim_rule) File >"/home/flashkar/anaconda/lib/python2.7/site->packages/gensim/models/word2vec.py", line 523, in build_vocab self.scan_vocab(sentences, progress_per=progress_per, >trim_rule=trim_rule) # initial survey File "/home/flashkar/anaconda/lib/python2.7/site->packages/gensim/models/doc2vec.py", line 655, in scan_vocab for document_no, document in enumerate(documents): File >"/home/flashkar/git/doc2vec_annoy/Doc2Vec_Annoy/KNN/testiterator.py", line 40, in iter yield LabeledSentence(tokens[:],tpl1) IndexError: list index out of range

您正在使用生成器函数,因为您不想存储所有文档,但您仍然将所有文档存储在 alldocs 中。你可以 yield LabeledSentence(tokens[:], tpl[1]])).

当前发生的事情是您正在附加到列表并返回列表。这就是您收到 AttributeError 的原因。此外,在每次迭代中您都将追加到列表中,这意味着在每次迭代中,i,您将返回 i 和 i 之前的所有文档!