'Downloading the files from s3 recursively using boto python.

I have a bucket in s3, which has deep directory structure. I wish I could download them all at once. My files look like this :

foo/bar/1. . 
foo/bar/100 . . 

Are there any ways to download these files recursively from the s3 bucket using boto lib in python?

Thanks in advance.



Solution 1:[1]

You can download all files in a bucket like this (untested):

from boto.s3.connection import S3Connection

conn = S3Connection('your-access-key','your-secret-key')
bucket = conn.get_bucket('bucket')
for key in bucket.list():
    try:
        res = key.get_contents_to_filename(key.name)
    except:
        logging.info(key.name+":"+"FAILED")

Keep in mind that folders in S3 are simply another way of writing the key name and only clients will show this as folders.

Solution 2:[2]

#!/usr/bin/env python

import boto
import sys, os
from boto.s3.key import Key
from boto.exception import S3ResponseError


DOWNLOAD_LOCATION_PATH = os.path.expanduser("~") + "/s3-backup/"
if not os.path.exists(DOWNLOAD_LOCATION_PATH):
    print ("Making download directory")
    os.mkdir(DOWNLOAD_LOCATION_PATH)


def backup_s3_folder():
    BUCKET_NAME = "your-bucket-name"
    AWS_ACCESS_KEY_ID= os.getenv("AWS_KEY_ID") # set your AWS_KEY_ID  on your environment path
    AWS_ACCESS_SECRET_KEY = os.getenv("AWS_ACCESS_KEY") # set your AWS_ACCESS_KEY  on your environment path
    conn  = boto.connect_s3(AWS_ACCESS_KEY_ID, AWS_ACCESS_SECRET_KEY)
    bucket = conn.get_bucket(BUCKET_NAME)

    #goto through the list of files
    bucket_list = bucket.list()

    for l in bucket_list:
        key_string = str(l.key)
        s3_path = DOWNLOAD_LOCATION_PATH + key_string
        try:
            print ("Current File is ", s3_path)
            l.get_contents_to_filename(s3_path)
        except (OSError,S3ResponseError) as e:
            pass
            # check if the file has been downloaded locally  
            if not os.path.exists(s3_path):
                try:
                    os.makedirs(s3_path)
                except OSError as exc:
                    # let guard againts race conditions
                    import errno
                    if exc.errno != errno.EEXIST:
                        raise




if __name__ == '__main__':
    backup_s3_folder()

Solution 3:[3]

import boto, os

LOCAL_PATH = 'tmp/'

AWS_ACCESS_KEY_ID = 'YOUUR_AWS_ACCESS_KEY_ID'
AWS_SECRET_ACCESS_KEY = 'YOUR_AWS_SECRET_ACCESS_KEY'
bucket_name = 'your_bucket_name'

# connect to the bucket
conn = boto.connect_s3(AWS_ACCESS_KEY_ID, AWS_SECRET_ACCESS_KEY)
bucket = conn.get_bucket(bucket_name)

# go through the list of files
bucket_list = bucket.list()
for l in bucket_list:
  keyString = str(l.key)
  d = LOCAL_PATH + keyString
  try:
    l.get_contents_to_filename(d)
  except OSError:
    # check if dir exists
    if not os.path.exists(d):
      os.makedirs(d)  # Creates dirs recurcivly 

Solution 4:[4]

Just added directory creation part to @j0nes comment

from boto.s3.connection import S3Connection
import os

conn = S3Connection('your-access-key','your-secret-key')
bucket = conn.get_bucket('bucket')

for key in bucket.list():
    print key.name
    if key.name.endswith('/'):
        if not os.path.exists('./'+key.name):
            os.makedirs('./'+key.name)
    else:
        res = key.get_contents_to_filename('./'+key.name)

This will download files to current directory and will create directories when needed.

Solution 5:[5]

if you have more than 1000 files in the folder you need to use a paginator to iterate through them

import boto3
import os
# create the client object
client = boto3.client(
's3',
aws_access_key_id= S3_ACCESS_KEY,
aws_secret_access_key=  S3_SECRET_KEY
)
# bucket and folder urls
bucket= 'bucket-name'
data_key = 'key/to/data/'

paginator = client.get_paginator("list_objects_v2")
for page in paginator.paginate(Bucket=bucket, Prefix=data_key):
    for obj in page['Contents']:
        key = obj['Key']
        tmp_dir =  '/'.join(key.split('/')[0:-1])
        if not os.path.exists('/'.join(key.split('/')[0:-1])):
            os.makedirs(tmp_dir)
        else:
            client.download_file(bucket, key, tmp_dir + key.split('/')[-1])

Solution 6:[6]

import boto
from boto.s3.key import Key

keyId = 'YOUR_AWS_ACCESS_KEY_ID'
sKeyId='YOUR_AWS_ACCESS_KEY_ID'
bucketName='your_bucket_name'

conn = boto.connect_s3(keyId,sKeyId)
bucket = conn.get_bucket(bucketName)
for key in bucket.list():
    print ">>>>>"+key.name
    pathV = key.name.split('/')
    if(pathV[0] == "data"):
        if(pathV[1] != ""):
            srcFileName = key.name
            filename = key.name
            filename = filename.split('/')[1]
            destFileName = "model/data/"+filename
            k = Key(bucket,srcFileName)
            k.get_contents_to_filename(destFileName)
    elif(pathV[0] == "nlu_data"):
        if(pathV[1] != ""):
            srcFileName = key.name
            filename = key.name
            filename = filename.split('/')[1]
            destFileName = "model/nlu_data/"+filename
            k = Key(bucket,srcFileName)
            k.get_contents_to_filename(destFileName`

Sources

This article follows the attribution requirements of Stack Overflow and is licensed under CC BY-SA 3.0.

Source: Stack Overflow

Solution Source
Solution 1 j0nes
Solution 2 pitaside
Solution 3 Community
Solution 4 raghavendrap
Solution 5 Edward Kirton
Solution 6 Aakash Handa