如何在与 pyodbc 通信 link 失败后重新建立连接?

How to re-establish connection after communication link failure with pyodbc?

每隔一段时间(也许每隔几个小时)我的程序就会捕获一个错误

'08S01', '[08S01] [Microsoft][ODBC SQL Server Driver]Communication link failure

我只想能够捕捉到这个错误并重新连接。我将如何做一个倾听者?

这是我的代码...

#!C:/Python/python.exe -u

import pyodbc, requests, re, time, random, sys,pickle,smtplib,os
from datetime import datetime
from multiprocessing import Lock, Process, Queue, current_process
from azure.storage.blob import BlobService


auth = requests.auth.HTTPProxyAuth('user', 'pwd')
proxies = {'http': 'proxies'}
user_agent = {'user-agent': 'Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/44.0.2403.157 Safari/537.36'}
blob_service = BlobService(account_name='user', account_key='pwd')

update = ("""UPDATE DocumentList
            SET Downloaded=?, DownLoadedAs=?,DownLoadedWhen=?,DownLoadedSizeKB=?
            WHERE DocNumberSequence=?""")


def Downloader(linkQueue,fileQueue,uniqueIDQueue):

    for url in iter(linkQueue.get, 'STOP'):
        name = current_process().name
        link = url
        fileName = fileQueue.get();
        uniqueID=uniqueIDQueue.get();
        cnxn2 = pyodbc.connect('DRIVER={SQL Server};SERVER=.windows.net;DATABASE=db;UID=userPWD=pwd', autocommit=True);
        cursor2 = cnxn2.cursor()

        #if 'LAS' or '.db' in str(fileName):
            #continue

        print 'BEFORE REQUEST'

        try:
            r = requests.get(link, proxies=proxies,headers=user_agent,auth=auth,allow_redirects=False)
        except requests.exceptions.RequestException as e:    # This is the correct syntax
            print e

        content = {}; content = r.headers          

        if 'location' in content:
            link = content['location'];
            if 'content-type' in content:
                extType= content['content-type'];
                ext = re.search(r'/(\w+)',extType).group(1);
            if 'content-length' in content:
                size = float(content['content-length'])*.001
            if ext=='pdf':
                with open(fileName+'.'+ext, "wb") as datum:
                    datum.write(r.content)
                datum.close()
            elif ext=='html':
                print 'YOU ARE GETTING REDIRECT PAGE!!!!!'
                break
            elif ext=='vnd':
                ext = 'xlsx'
                with open(fileName+'.'+ext, "wb") as datum:
                    datum.write(r.content)
                datum.close()
            elif ext=='msword':
                ext = 'doc'
                with open(fileName+'.'+ext, "wb") as datum:
                    datum.write(r.content)
                datum.close()
            else:
                with open(fileName+'.'+ext, "wb") as datum:
                    datum.write(r.content)
                datum.close()

        else:
            if 'content-type' in content:
                extType= content['content-type'];
                ext = re.search(r'/(\w+)',extType).group(1);
            if 'content-length' in content:
                size = float(content['content-length'])*.001
            if ext=='pdf':
                with open(fileName+'.'+ext, "wb") as datum:
                    datum.write(r.content)
                datum.close()
            elif ext=='html':     
                server = smtplib.SMTP('smtp.gmail.com', 587)
                server.starttls()
                server.login("user", "pwd")
                msg = "Get back to headquaters!"
                server.sendmail("email", "email", msg)
                server.quit()
                print 'YOU ARE GETTING REDIRECT PAGE!!!!!'
                continue
            elif ext=='vnd':
                ext = 'xlsx'
                with open(fileName+'.'+ext, "wb") as datum:
                    datum.write(r.content)
                datum.close()
            elif ext=='msword':
                ext = 'doc'
                with open(fileName+'.'+ext, "wb") as datum:
                    datum.write(r.content)
                datum.close()
            else:
                with open(fileName+'.'+ext, "wb") as datum:
                    datum.write(r.content)
                datum.close()   
        print 'here'
        dt = datetime.now()
        meta=[];meta1=[];meta1=[fileName,ext,link,dt,size]
        meta.append(meta1)
        #num = random.uniform(0,) 
        #time.sleep(num)
        while True:
            try:
                blob_service.put_block_blob_from_path(
                'container',
                fileName+'.'+ext,
                fileName+'.'+ext
                )
                break
            except:
                print sys.exc_info()[1]

        while True:
            try:
                updated = cursor2.execute(update,'Yes', fileName+'.'+ext, dt, size,uniqueID )
                break
            except:
                print sys.exc_info()[1]
        cnxn2.close()


        print fileName+'.'+ext
        print 'done'+(current_process().name)
        output = open(fileName+'.pkl', 'wb')
        pickle.dump(meta, output)
        output.close()
        try:
            os.remove(fileName+'.'+ext)
        except OSError:
            pass



if __name__ == '__main__':

    numFiles = int(sys.argv[2])
    cnxn = pyodbc.connect('DRIVER={SQL Server};SERVER=windows.net;DATABASE=;UID=;PWD=', autocommit=True);
    cursor1 = cnxn.cursor()

    cursor1.execute("""SELECT DocumentLink,DownLoadedAs,API,DocNumberSequence,StateAbbr
                      FROM  DocumentList
                      WHERE StateAbbr='CA' AND DocNumberSequence>'1253750'""")

    rows = cursor1.fetchmany(numFiles)
    linkQueue=Queue();fileQueue=Queue();uniqueIDQueue=Queue();processes =[];

    for row in rows:
            url = str(row.DocumentLink)
            linkQueue.put(url)
            uniqueID = str(row.DocNumberSequence)
            uniqueIDQueue.put(uniqueID)
            #tracking = str(row.API)
            #docType = str(row.DocumentClass)
            abbr = str(row.StateAbbr)
            fileName = row.DownLoadedAs
            fileName = fileName.split('.')[0]
            fileName = abbr+'_'+fileName+'_'+uniqueID; fileName=fileName.replace(' ','');
            fileQueue.put(fileName)

    cnxn.close()
    print "BEFORE WORKERS"
    workers = int(sys.argv[1]);
    for x in xrange(workers):
            p = Process(target=Downloader, args=(linkQueue, fileQueue, uniqueIDQueue))
            p.start()
            processes.append(p)
            linkQueue.put('STOP')
            fileQueue.put('STOP')

    for p in processes:
        p.join()

我认为错误已经被捕获,因为它一直在继续,但在我中断该过程之前我不会重新连接。我以前从未处理过这样的错误。我是否必须将整个代码块放入 try connect 或者我可以添加一个监听器?

看起来错误是由这里的“sys.exc_info()[0]”打印的:

    while True:
        try:
            updated = cursor2.execute(update,'Yes', fileName+'.'+ext, dt, size,uniqueID )
            break
        except:
            print sys.exc_info()[1]

这有点靠猜测,但这是您唯一的 .execute() 包装在 try / except 块中。要验证,您可以删除 try / except 并只执行您已有的 .execute() 语句。

如果是这种情况,您可以将 try / except 放回原位,然后在 except 中尝试重新连接?这是一种变通方法,但是针对您所在的计算机和数据库服务器之间的网络故障构建了一些故障保护(一级)。祝你好运!