为什么下面的代码片段没有运行成功?
Why does not the following code snippet run successfully?
我正在阅读“集体智慧编程”一章的“搜索引擎”一章,我在其中遇到了以下代码并在实施后为我提供了 error.Please 帮助。
import urllib2
from BeautifulSoup import *
from urlparse import urljoin
class crawler:
def __init__(self,dbname):
pass
def __del__(self):
pass
def dbcommit(self):
pass
def getentryid(self,table,field,value,createnew=True):
return None
def addtoindex(self,url,soup):
print 'Indexing %s' % url
def gettextonly(self,soup):
return None
def seperatewords(self,text):
return None
def isindexed(self,url):
return False
def addlinkref(self,urlFrom,urlTo,linkText):
pass
def crawl(self,pages,depth=2):
for i in range(depth):
newpages=set()
for page in pages:
try:
c=urllib2.urlopen(page)
except:
print 'Could not open %s'%page
continue
soup=BeautifulSoup(c.read())
self.addtoindex(page,soup)
links=soup('a')
for link in links:
if('href' in dict(link.attrs)):
url=urljoin(page,link['href'])
if url.find("'")!=-1: continue
url=url.split('#')[0]
if url[0:4]=='http' and not self.isindexed(url):
newpages.add(url)
linkText=self.gettextonly(link)
self.addlinkref(page,url,linkTest)
self.dbcommit()
pages=newpages
def createindextables(self):
pass
我收到以下错误:
>>cwlr.crawl(pagelist)
Indexing http://en.wikipedia.org/wiki/Artificial_neural_network
---------------------------------------------------------------------------
NameError Traceback (most recent call last)
<ipython-input-50-97778b0c0db8> in <module>()
----> 1 cwlr.crawl(pagelist)
C:\Users\Blue\Anaconda\searchengine.py in crawl(self, pages, depth)
47 url=urljoin(page,link['href'])
48 if url.find("'")!=-1: continue
---> 49 url=url.split('#')[0]
50 if url[0:4]=='http' and not self.isindexed(url):
51 newpages.add(url)
NameError: global name 'linkTest' is not defined
NameError: global name 'linkTest' is not defined
您将 linkText
拼错为 linkTest
:
linkText=self.gettextonly(link)
↑
self.addlinkref(page,url,linkTest)
↑
我正在阅读“集体智慧编程”一章的“搜索引擎”一章,我在其中遇到了以下代码并在实施后为我提供了 error.Please 帮助。
import urllib2
from BeautifulSoup import *
from urlparse import urljoin
class crawler:
def __init__(self,dbname):
pass
def __del__(self):
pass
def dbcommit(self):
pass
def getentryid(self,table,field,value,createnew=True):
return None
def addtoindex(self,url,soup):
print 'Indexing %s' % url
def gettextonly(self,soup):
return None
def seperatewords(self,text):
return None
def isindexed(self,url):
return False
def addlinkref(self,urlFrom,urlTo,linkText):
pass
def crawl(self,pages,depth=2):
for i in range(depth):
newpages=set()
for page in pages:
try:
c=urllib2.urlopen(page)
except:
print 'Could not open %s'%page
continue
soup=BeautifulSoup(c.read())
self.addtoindex(page,soup)
links=soup('a')
for link in links:
if('href' in dict(link.attrs)):
url=urljoin(page,link['href'])
if url.find("'")!=-1: continue
url=url.split('#')[0]
if url[0:4]=='http' and not self.isindexed(url):
newpages.add(url)
linkText=self.gettextonly(link)
self.addlinkref(page,url,linkTest)
self.dbcommit()
pages=newpages
def createindextables(self):
pass
我收到以下错误:
>>cwlr.crawl(pagelist)
Indexing http://en.wikipedia.org/wiki/Artificial_neural_network
---------------------------------------------------------------------------
NameError Traceback (most recent call last)
<ipython-input-50-97778b0c0db8> in <module>()
----> 1 cwlr.crawl(pagelist)
C:\Users\Blue\Anaconda\searchengine.py in crawl(self, pages, depth)
47 url=urljoin(page,link['href'])
48 if url.find("'")!=-1: continue
---> 49 url=url.split('#')[0]
50 if url[0:4]=='http' and not self.isindexed(url):
51 newpages.add(url)
NameError: global name 'linkTest' is not defined
NameError: global name 'linkTest' is not defined
您将 linkText
拼错为 linkTest
:
linkText=self.gettextonly(link)
↑
self.addlinkref(page,url,linkTest)
↑