Boto S3 response Error : Access Denied

Boto S3 response Error : Access Denied

我正在尝试使用并行上传将大文件上传到我的存储桶。我从 here 看到代码并决定使用它,因为它非常简单易懂。但是 运行 程序给我一个

的错误
boto.exception.S3ResponseError: S3ResponseError: 403 Forbidden
<?xml version="1.0" encoding="UTF-8"?>
<Error><Code>AccessDenied</Code><Message>Access Denied</Message><RequestId>BF24672A4459F15E</RequestId><HostId>SN94E8Sg3QeiNQdOoB0CNZmAKZkVSrae8ORBOcjN9mKl07LjYV8hHhNG5Ox2f2bC</HostId></Error>

我查看了此处在 Whosebug 上给出的所有不同解决方案,发现其中 none 是问题所在。我可以完全访问存储桶,我可以从存储桶中连续读取、写入、删除文件,并且只有在使用此代码时才会出现此错误。 s3cmd 也工作正常并且没有显示错误。任何帮助将不胜感激。下面粘贴的代码和堆栈跟踪:

代码:

import math
from multiprocessing.dummy import Pool #using dummy for debugging
import os

from boto.s3.connection import S3Connection
from filechunkio import FileChunkIO
from ConfigParser import RawConfigParser, NoOptionError

config = RawConfigParser()
config.read( 'tm/aws.cfg' )

#conn = S3Connection( config.get( 'prodAws', 'aws_access_key_id' ), config.get( 'prodAws', 'aws_secret_access_key' ) )
acs_key = config.get( 'prodAws', 'aws_access_key_id' )
sec_key = config.get( 'prodAws', 'aws_secret_access_key' )
try:
    default_bucket = config.get( 'prodAws', 'bucket' )
except NoOptionError, e:
    print( "Configuration error({0})".format( e.message ) )
    exit()

def _upload_part(bucketname, aws_key, aws_secret, multipart_id, part_num,
                 keyname, offset, bytes, amount_of_retries = 5):
    """
    Uploads a part with retries.
    """
    def _upload(retries_left=amount_of_retries):
        try:
            print( 'Start uploading part #%d ...' % part_num )
            conn = S3Connection( aws_key, aws_secret )
            bucket = conn.get_bucket( bucketname, validate=False )
            for mp in bucket.get_all_multipart_uploads():
                if mp.id == multipart_id:
                    with FileChunkIO( keyname, 'r', offset=offset, bytes=bytes) as fp:
                        mp.upload_part_from_file( fp=fp, part_num=part_num )
                    break
        except Exception as e:
            print e
            if retries_left:
                _upload( retries_left = retries_left - 1 )
            else:
                print( 'Failed uploading part #%d' % part_num )
                raise e
        else:
            print( 'Uploaded part #%d' % part_num )

    _upload()


def upload(bucketname, aws_key, aws_secret, keyname, parallel_processes=5):
    """
    Parallel multipart upload.
    """
    conn = S3Connection( aws_key, aws_secret )
    bucket = conn.get_bucket( bucketname, validate=False )

    mp = bucket.initiate_multipart_upload( keyname )

    source_size = os.stat( keyname ).st_size
    bytes_per_chunk = max( int( math.sqrt( 5242880 ) * math.sqrt( source_size ) ), 5242880 )
    chunk_amount = int( math.ceil( source_size / float( bytes_per_chunk ) ) )

    pool = Pool( processes=parallel_processes )
    for i in range( chunk_amount ):
        offset = i * bytes_per_chunk
        remaining_bytes = source_size - offset
        bytes = min([ bytes_per_chunk, remaining_bytes ])
        part_num = i + 1
        #_upload_part(bucketname, aws_key, aws_secret, mp.id, part_num, 
                #keyname, offset, bytes)
        pool.apply_async(_upload_part, [bucketname, aws_key, aws_secret, mp.id,
                                        part_num, keyname, offset, bytes] )
    pool.close()
    pool.join()

    if len( mp.get_all_parts() ) == chunk_amount:
        mp.complete_upload()
        key = bucket.get_key( keyname )
    else:
        mp.cancel_upload()

upload(default_bucket, acs_key, sec_key, 'bigfile.txt')

堆栈跟踪:

File "/Library/Frameworks/Python.framework/Versions/2.7/lib/python2.7/threading.py", line 783, in __bootstrap
  self.__bootstrap_inner()
File "/Library/Frameworks/Python.framework/Versions/2.7/lib/python2.7/threading.py", line 810, in __bootstrap_inner
  self.run()
File "/Library/Frameworks/Python.framework/Versions/2.7/lib/python2.7/threading.py", line 763, in run
  self.__target(*self.__args, **self.__kwargs)
File "/Library/Frameworks/Python.framework/Versions/2.7/lib/python2.7/multiprocessing/pool.py", line 113, in worker
  result = (True, func(*args, **kwds))
 File "/home/desktop/s3/parup.py", line 46, in _upload_part
  _upload()
 File "/home/desktop/s3/parup.py", line 39, in _upload
  _upload( retries_left = retries_left - 1 )
 File "/home/desktop/s3/parup.py", line 39, in _upload
  _upload( retries_left = retries_left - 1 )
 File "/home/desktop/s3/parup.py", line 39, in _upload
  _upload( retries_left = retries_left - 1 )
 File "/home/desktop/s3/parup.py", line 39, in _upload
  _upload( retries_left = retries_left - 1 )
 File "/home/desktop/s3/parup.py", line 39, in _upload
  _upload( retries_left = retries_left - 1 )
 File "/home/desktop/s3/parup.py", line 42, in _upload
  raise e

解决了问题:

问题出在 for mp in bucket.get_all_multipart_uploads()。我将其删除并将 if mp.id == multipart.id 更改为 if multipart.id