def upload_to_s3(s3key, filename, bucket, aws_key, aws_secret): conn = boto.connect_s3(aws_key, aws_secret) bucket = conn.get_bucket(bucket) # Get file info source_path = filename source_size = os.stat(source_path).st_size # Create a multipart upload request mp = bucket.initiate_multipart_upload(s3key) # Use a chunk size of 50 MiB chunk_size = 52428800 chunk_count = int(math.ceil(source_size / chunk_size)) # Send the file parts, using FileChunkIO to create a file-like object # that points to a certain byte range within the original file. We # set bytes to never exceed the original file size. for i in range(chunk_count + 1): print('Uploading file chunk: {0} of {1}'.format(i + 1, chunk_count + 1)) offset = chunk_size * i bytes = min(chunk_size, source_size - offset) with FileChunkIO(source_path, 'r', offset=offset, bytes=bytes) as fp: mp.upload_part_from_file(fp, part_num=i + 1) # Finish the upload completed_upload = mp.complete_upload() return completed_upload