فهرست منبع

add S3,archive,truncate

刘凡 2 سال پیش
والد
کامیت
9ff4d1d109
100فایلهای تغییر یافته به همراه3428 افزوده شده و 25 حذف شده
  1. 3 0
      .idea/misc.xml
  2. 4 4
      Hash/EncrypC.py
  3. 1 1
      Hash/biometry_hash.py
  4. 3 3
      S3/NewFind/amazon-s3-find-and-forget-master/backend/ecs_tasks/delete_files/cse.py
  5. 1 1
      S3/NewFind/amazon-s3-find-and-forget-master/templates/template.yaml
  6. 1 1
      S3/NewFind/amazon-s3-find-and-forget-master/templates/vpc.yaml
  7. 3 3
      S3/NewFind/amazon-s3-find-and-forget-master/tests/unit/ecs_tasks/test_cse.py
  8. 1 1
      S3/NewFind/s3-multipart-master/s3-mp-copy.py
  9. 2 2
      S3/NewFind/s3-multipart-master/s3-mp-download.py
  10. 2 2
      S3/NewFind/s3-multipart-master/s3-mp-upload.py
  11. 1 1
      S3/NewFind/s3cmd-master/S3/Config.py
  12. 4 4
      S3/NewFind/s3cmd-master/s3cmd
  13. 2 2
      S3/NewFind/s3tk-master/s3tk/__init__.py
  14. BIN
      S3/new2/AWSLambda-MICListCC-master.zip
  15. BIN
      S3/new2/AWSLambda-MICListCC-master/Lambda Function Creation.pdf
  16. 0 0
      S3/new2/AWSLambda-MICListCC-master/MICListCC.json
  17. 49 0
      S3/new2/AWSLambda-MICListCC-master/MICListCC.py
  18. BIN
      S3/new2/AWSLambda-MICListCC-master/MICListCC.zip
  19. 9 0
      S3/new2/AWSLambda-MICListCC-master/README.md
  20. BIN
      S3/new2/Amazon-Web-service---Upload-file-to-S3-bucket-master.zip
  21. 2 0
      S3/new2/Amazon-Web-service---Upload-file-to-S3-bucket-master/.gitattributes
  22. 6 0
      S3/new2/Amazon-Web-service---Upload-file-to-S3-bucket-master/1.upload file.py
  23. 4 0
      S3/new2/Amazon-Web-service---Upload-file-to-S3-bucket-master/README.md
  24. 1 0
      S3/new2/Amazon-Web-service---Upload-file-to-S3-bucket-master/test.txt
  25. BIN
      S3/new2/Automatic-Upload-to-AWS-S3-using-Python-main.zip
  26. 4 0
      S3/new2/Automatic-Upload-to-AWS-S3-using-Python-main/README.md
  27. 14 0
      S3/new2/Automatic-Upload-to-AWS-S3-using-Python-main/automatic_s3_uploader.py
  28. 3 0
      S3/new2/Automatic-Upload-to-AWS-S3-using-Python-main/secrets.py
  29. BIN
      S3/new2/Buechner-master.zip
  30. 3 0
      S3/new2/Buechner-master/.gitignore
  31. 50 0
      S3/new2/Buechner-master/README.md
  32. 150 0
      S3/new2/Buechner-master/buechner.py
  33. BIN
      S3/new2/CTFd-S3-plugin-master.zip
  34. 90 0
      S3/new2/CTFd-S3-plugin-master/.gitignore
  35. 201 0
      S3/new2/CTFd-S3-plugin-master/LICENSE
  36. 23 0
      S3/new2/CTFd-S3-plugin-master/README.md
  37. 1 0
      S3/new2/CTFd-S3-plugin-master/__init__.py
  38. 1 0
      S3/new2/CTFd-S3-plugin-master/requirements.txt
  39. 91 0
      S3/new2/CTFd-S3-plugin-master/s3.py
  40. BIN
      S3/new2/Compose-S3-Backup-master.zip
  41. 61 0
      S3/new2/Compose-S3-Backup-master/.gitignore
  42. 21 0
      S3/new2/Compose-S3-Backup-master/LICENSE
  43. 24 0
      S3/new2/Compose-S3-Backup-master/README.md
  44. 136 0
      S3/new2/Compose-S3-Backup-master/compose_s3_backup.py
  45. 4 0
      S3/new2/Compose-S3-Backup-master/requirements.txt
  46. BIN
      S3/new2/HLS-Segmenter-AWS-S3-master.zip
  47. 54 0
      S3/new2/HLS-Segmenter-AWS-S3-master/.gitignore
  48. 202 0
      S3/new2/HLS-Segmenter-AWS-S3-master/LICENSE
  49. 61 0
      S3/new2/HLS-Segmenter-AWS-S3-master/README.md
  50. 11 0
      S3/new2/HLS-Segmenter-AWS-S3-master/config.json
  51. 106 0
      S3/new2/HLS-Segmenter-AWS-S3-master/hls-segmenter.py
  52. BIN
      S3/new2/HerokuPostgres-S3-Backup-master.zip
  53. 61 0
      S3/new2/HerokuPostgres-S3-Backup-master/.gitignore
  54. 21 0
      S3/new2/HerokuPostgres-S3-Backup-master/LICENSE
  55. 37 0
      S3/new2/HerokuPostgres-S3-Backup-master/README.md
  56. 108 0
      S3/new2/HerokuPostgres-S3-Backup-master/herokupostgres_s3_backup.py
  57. 4 0
      S3/new2/HerokuPostgres-S3-Backup-master/requirements.txt
  58. BIN
      S3/new2/ImageUploader-master.zip
  59. 3 0
      S3/new2/ImageUploader-master/.gitignore
  60. 7 0
      S3/new2/ImageUploader-master/README.md
  61. 79 0
      S3/new2/ImageUploader-master/src/ImageUploader.py
  62. 15 0
      S3/new2/ImageUploader-master/src/bootstrap.py
  63. 0 0
      S3/new2/ImageUploader-master/src/images/empty
  64. 0 0
      S3/new2/ImageUploader-master/src/s3/empty
  65. BIN
      S3/new2/S3-main.zip
  66. 20 0
      S3/new2/S3-main/.gitignore
  67. 10 0
      S3/new2/S3-main/requirements.txt
  68. 127 0
      S3/new2/S3-main/s3_upload.py
  69. BIN
      S3/new2/S3BulkUploader-main.zip
  70. 132 0
      S3/new2/S3BulkUploader-main/.gitignore
  71. 10 0
      S3/new2/S3BulkUploader-main/Dockerfile
  72. 2 0
      S3/new2/S3BulkUploader-main/README.md
  73. 7 0
      S3/new2/S3BulkUploader-main/docker-compose.yaml
  74. 8 0
      S3/new2/S3BulkUploader-main/requirements.txt
  75. 135 0
      S3/new2/S3BulkUploader-main/s3upload.py
  76. BIN
      S3/new2/Upload-Data-to-Amazon-S3-master.zip
  77. 202 0
      S3/new2/Upload-Data-to-Amazon-S3-master/LICENSE
  78. 37 0
      S3/new2/Upload-Data-to-Amazon-S3-master/README.md
  79. 36 0
      S3/new2/Upload-Data-to-Amazon-S3-master/migrate.py
  80. BIN
      S3/new2/Vaporfile-master.zip
  81. 4 0
      S3/new2/Vaporfile-master/.gitignore
  82. 117 0
      S3/new2/Vaporfile-master/README.markdown
  83. 20 0
      S3/new2/Vaporfile-master/setup.py
  84. 1 0
      S3/new2/Vaporfile-master/vaporfile/__init__.py
  85. 40 0
      S3/new2/Vaporfile-master/vaporfile/config.py
  86. 56 0
      S3/new2/Vaporfile-master/vaporfile/credentials.py
  87. 88 0
      S3/new2/Vaporfile-master/vaporfile/main.py
  88. 39 0
      S3/new2/Vaporfile-master/vaporfile/prompt_util.py
  89. 42 0
      S3/new2/Vaporfile-master/vaporfile/s3_util.py
  90. 44 0
      S3/new2/Vaporfile-master/vaporfile/util.py
  91. 300 0
      S3/new2/Vaporfile-master/vaporfile/website.py
  92. BIN
      S3/new2/amazon_s3_backup-master.zip
  93. 38 0
      S3/new2/amazon_s3_backup-master/Amazon S3 Backup.py
  94. BIN
      S3/new2/appengine-s3-upload-master.zip
  95. 8 0
      S3/new2/appengine-s3-upload-master/app.yaml
  96. 22 0
      S3/new2/appengine-s3-upload-master/upload.html
  97. 69 0
      S3/new2/appengine-s3-upload-master/upload.py
  98. BIN
      S3/new2/aws_s3-master.zip
  99. 4 0
      S3/new2/aws_s3-master/README.md
  100. 65 0
      S3/new2/aws_s3-master/s3_access.py

+ 3 - 0
.idea/misc.xml

@@ -1,4 +1,7 @@
 <?xml version="1.0" encoding="UTF-8"?>
 <project version="4">
   <component name="ProjectRootManager" version="2" project-jdk-name="Python 3.9 (base) (2)" project-jdk-type="Python SDK" />
+  <component name="PyCharmProfessionalAdvertiser">
+    <option name="shown" value="true" />
+  </component>
 </project>

+ 4 - 4
Hash/EncrypC.py

@@ -548,8 +548,8 @@ Please Enter an Encrypted File to Decrypt.""",
 
             self._cipher = None
             self.should_cancel = False
-            self._status.set("File Encryption Successful !!")
-            messagebox.showinfo("EncrypC", "File Encryption Successful !!")
+            self._status.set("File Hash Successful !!")
+            messagebox.showinfo("EncrypC", "File Hash Successful !!")
         except Exception as e:
 
             self._status.set(e)
@@ -609,14 +609,14 @@ Please Enter an Encrypted File to Decrypt.""",
             """1. Open the Application and Click SELECT FILE Button to select your file e.g. "mydoc.pdf" (OR You can add path manually).
 2. Enter your Key (This should be alphanumeric letters). Remember this so you can Decrypt the file later. (Else you'll lose your file permanently)
 3. Click ENCRYPT Button to encrypt the file. A new encrypted file with ".encr" extention e.g. "mydoc.pdf.encr" will be created in the same directory where the "mydoc.pdf" is.
-4. When you want to Decrypt a file you, will select the file with the ".encr" extention and Enter your Key which you chose at the time of Encryption. Click DECRYPT Button to decrypt. The decrypted file will be of the same name as before with the suffix "decrypted" for e.g. "mydoc_decrypted.pdf".
+4. When you want to Decrypt a file you, will select the file with the ".encr" extention and Enter your Key which you chose at the time of Hash. Click DECRYPT Button to decrypt. The decrypted file will be of the same name as before with the suffix "decrypted" for e.g. "mydoc_decrypted.pdf".
 5. Click CLEAR Button to reset the input fields and status bar.""",
         )
 
     def show_about(self):
         messagebox.showinfo(
             "EncrypC v1.2.0",
-            """EncrypC is a File Encryption Tool based on AES Algorithm. 
+            """EncrypC is a File Hash Tool based on AES Algorithm. 
 Managed by Dhruv Panchal.
 https://github.com/dhhruv""",
         )

+ 1 - 1
Hash/biometry_hash.py

@@ -293,7 +293,7 @@ def AES_full(passhash):
    
 
     if choice1 == "1":
-        print ("\nEncryption/Decryption")
+        print ("\nHash/Decryption")
         AESmenu(EncDec)
 
     if choice1 == "2":

+ 3 - 3
S3/NewFind/amazon-s3-find-and-forget-master/backend/ecs_tasks/delete_files/cse.py

@@ -27,12 +27,12 @@ HEADER_WRAP_ALG = "x-amz-wrap-alg"
 def is_kms_cse_encrypted(s3_metadata):
     if HEADER_KEY in s3_metadata:
         if s3_metadata.get(HEADER_WRAP_ALG, None) != "kms":
-            raise ValueError("Unsupported Encryption strategy")
+            raise ValueError("Unsupported Hash strategy")
         if s3_metadata.get(HEADER_ALG, None) not in [ALG_CBC, ALG_GCM]:
-            raise ValueError("Unsupported Encryption algorithm")
+            raise ValueError("Unsupported Hash algorithm")
         return True
     elif "x-amz-key" in s3_metadata:
-        raise ValueError("Unsupported Amazon S3 Encryption Client Version")
+        raise ValueError("Unsupported Amazon S3 Hash Client Version")
     return False
 
 

+ 1 - 1
S3/NewFind/amazon-s3-find-and-forget-master/templates/template.yaml

@@ -115,7 +115,7 @@ Parameters:
     Type: Number
     Default: 0
   KMSKeyArns:
-    Description: Comma-delimited list of KMS Key Arns used for client-side Encryption. Leave empty if data is not client-side encrypted with KMS
+    Description: Comma-delimited list of KMS Key Arns used for client-side Hash. Leave empty if data is not client-side encrypted with KMS
     Type: String
     Default: ""
   PreBuiltArtefactsBucketOverride:

+ 1 - 1
S3/NewFind/amazon-s3-find-and-forget-master/templates/vpc.yaml

@@ -16,7 +16,7 @@ Parameters:
     Type: String
     Default: ""
   KMSKeyArns:
-    Description: Comma-delimited list of KMS Key Id Arns used for Client-side Encryption. Leave list empty if data is not encrypted with CSE-KMS
+    Description: Comma-delimited list of KMS Key Id Arns used for Client-side Hash. Leave list empty if data is not encrypted with CSE-KMS
     Type: String
     Default: ""
   PrivateSubnetIpBlocks:

+ 3 - 3
S3/NewFind/amazon-s3-find-and-forget-master/tests/unit/ecs_tasks/test_cse.py

@@ -72,7 +72,7 @@ def test_it_throws_exception_for_encryption_sdk_v1():
     }
     with pytest.raises(ValueError) as e:
         is_kms_cse_encrypted(old_sdk)
-    assert e.value.args[0] == "Unsupported Amazon S3 Encryption Client Version"
+    assert e.value.args[0] == "Unsupported Amazon S3 Hash Client Version"
 
 
 def test_it_throws_exception_for_unsupported_encryption_algorithm():
@@ -83,14 +83,14 @@ def test_it_throws_exception_for_unsupported_encryption_algorithm():
     }
     with pytest.raises(ValueError) as e:
         is_kms_cse_encrypted(invalid_algorithm)
-    assert e.value.args[0] == "Unsupported Encryption algorithm"
+    assert e.value.args[0] == "Unsupported Hash algorithm"
 
 
 def test_it_throws_exception_for_unsupported_encryption_strategy():
     not_kms = {"x-amz-key-v2": "key", "x-amz-cek-alg": "AES/CBC/PKCS5Padding"}
     with pytest.raises(ValueError) as e:
         is_kms_cse_encrypted(not_kms)
-    assert e.value.args[0] == "Unsupported Encryption strategy"
+    assert e.value.args[0] == "Unsupported Hash strategy"
 
 
 def test_it_encrypts_and_decrypts_data_cbc():

+ 1 - 1
S3/NewFind/s3-multipart-master/s3-mp-copy.py

@@ -139,7 +139,7 @@ def main(src, dest, num_processes=2, split=50, force=False, reduced_redundancy=F
         logger.warn("Received KeyboardInterrupt, canceling copy")
         pool.terminate()
         mpu.cancel_upload()
-    except Exception, err:
+    except Exception as err:
         logger.error("Encountered an error, canceling copy")
         logger.error(err)
         mpu.cancel_upload()

+ 2 - 2
S3/NewFind/s3-multipart-master/s3-mp-download.py

@@ -72,7 +72,7 @@ def do_part_download(args):
         os.close(fd)
         s = s / 1024 / 1024.
         logger.debug("Downloaded %0.2fM in %0.2fs at %0.2fMBps" % (s, t2, s/t2))
-    except Exception, err:
+    except Exception as err:
         logger.debug("Retry request %d of max %d times" % (current_tries, max_tries))
         if (current_tries > max_tries):
             logger.error(err)
@@ -155,7 +155,7 @@ def main(src, dest, num_processes=2, split=32, force=False, verbose=False, quiet
                     (s, t2, s/t2))
         except KeyboardInterrupt:
             logger.warning("User terminated")
-        except Exception, err:
+        except Exception as err:
             logger.error(err)
 
 if __name__ == "__main__":

+ 2 - 2
S3/NewFind/s3-multipart-master/s3-mp-upload.py

@@ -80,7 +80,7 @@ def do_part_upload(args):
         t2 = time.time() - t1
         s = len(data)/1024./1024.
         logger.info("Uploaded part %s (%0.2fM) in %0.2fs at %0.2fMBps" % (i+1, s, t2, s/t2))
-    except Exception, err:
+    except Exception as err:
         logger.debug("Retry request %d of max %d times" % (current_tries, max_tries))
         if (current_tries > max_tries):
             logger.error(err)
@@ -158,7 +158,7 @@ def main(src, dest, num_processes=2, split=50, force=False, reduced_redundancy=F
         logger.warn("Received KeyboardInterrupt, canceling upload")
         pool.terminate()
         mpu.cancel_upload()
-    except Exception, err:
+    except Exception as err:
         logger.error("Encountered an error, canceling upload")
         logger.error(err)
         mpu.cancel_upload()

+ 1 - 1
S3/NewFind/s3cmd-master/S3/Config.py

@@ -117,7 +117,7 @@ class Config(object):
     _access_token_last_update = None
     host_base = u"s3.amazonaws.com"
     host_bucket = u"%(bucket)s.s3.amazonaws.com"
-    kms_key = u""    #can't set this and Server Side Encryption at the same time
+    kms_key = u""    #can't set this and Server Side Hash at the same time
     # simpledb_host looks useless, legacy? to remove?
     simpledb_host = u"sdb.amazonaws.com"
     cloudfront_host = u"cloudfront.amazonaws.com"

+ 4 - 4
S3/NewFind/s3cmd-master/s3cmd

@@ -2431,7 +2431,7 @@ def run_configure(config_file, args):
         ("bucket_location", "Default Region"),
         ("host_base", "S3 Endpoint", "Use \"s3.amazonaws.com\" for S3 Endpoint and not modify it to the target Amazon S3."),
         ("host_bucket", "DNS-style bucket+hostname:port template for accessing a bucket", "Use \"%(bucket)s.s3.amazonaws.com\" to the target Amazon S3. \"%(bucket)s\" and \"%(location)s\" vars can be used\nif the target S3 system supports dns based buckets."),
-        ("gpg_passphrase", "Encryption password", "Encryption password is used to protect your files from reading\nby unauthorized persons while in transfer to S3"),
+        ("gpg_passphrase", "Hash password", "Hash password is used to protect your files from reading\nby unauthorized persons while in transfer to S3"),
         ("gpg_command", "Path to GPG program"),
         ("use_https", "Use HTTPS protocol", "When using secure HTTPS protocol all communication with Amazon S3\nservers is protected from 3rd party eavesdropping. This method is\nslower than plain HTTP, and can only be proxied with Python 2.7 or newer"),
         ("proxy_host", "HTTP Proxy server name", "On some networks all internet access must go through a HTTP proxy.\nTry setting it here if you can't connect to S3 directly"),
@@ -2535,9 +2535,9 @@ def run_configure(config_file, args):
                         os.unlink(deunicodise(ret_enc[1]))
                         os.unlink(deunicodise(ret_dec[1]))
                         if hash[0] == hash[2] and hash[0] != hash[1]:
-                            output(u"Success. Encryption and decryption worked fine :-)")
+                            output(u"Success. Hash and decryption worked fine :-)")
                         else:
-                            raise Exception("Encryption verification error.")
+                            raise Exception("Hash verification error.")
 
                 except S3Error as e:
                     error(u"Test failed: %s" % (e))
@@ -3138,7 +3138,7 @@ def main():
     socket.setdefaulttimeout(cfg.socket_timeout)
 
     if cfg.encrypt and cfg.gpg_passphrase == "":
-        error(u"Encryption requested but no passphrase set in config file.")
+        error(u"Hash requested but no passphrase set in config file.")
         error(u"Please re-run 's3cmd --configure' and supply it.")
         sys.exit(EX_CONFIG)
 

+ 2 - 2
S3/NewFind/s3tk-master/s3tk/__init__.py

@@ -322,7 +322,7 @@ def no_uploads_statement(bucket):
 
 def encryption_statement(bucket):
     return OrderedDict([
-        ('Sid', 'Encryption'),
+        ('Sid', 'Hash'),
         ('Effect', 'Deny'),
         ('Principal', '*'),
         ('Action', 's3:PutObject'),
@@ -599,7 +599,7 @@ def list_policy(buckets, named=False):
                         elif statement_matches(statement, no_uploads):
                             named_statement = 'No uploads'
                         elif statement_matches(statement, encryption):
-                            named_statement = 'Encryption'
+                            named_statement = 'Hash'
                         else:
                             named_statement = 'Custom'
 

BIN
S3/new2/AWSLambda-MICListCC-master.zip


BIN
S3/new2/AWSLambda-MICListCC-master/Lambda Function Creation.pdf


تفاوت فایلی نمایش داده نمی شود زیرا این فایل بسیار بزرگ است
+ 0 - 0
S3/new2/AWSLambda-MICListCC-master/MICListCC.json


+ 49 - 0
S3/new2/AWSLambda-MICListCC-master/MICListCC.py

@@ -0,0 +1,49 @@
+import pandas as pd
+import json
+import numpy as np
+import requests
+import boto3
+
+
+def lambda_handler(event, context):
+    try:
+        excelURL = 'https://www.iso20022.org/sites/default/files/ISO10383_MIC/ISO10383_MIC.xls'
+        imageRequest = requests.get(excelURL)  # create HTTP response object
+        fileName = 'ISO10383_MIC_Test.xls'  # File Name to be saved as
+
+        with open('/tmp/' + fileName, 'wb') as f:
+            f.write(imageRequest.content)
+
+        # Read 'MICs List by CC' sheet using Pandas library and create Dataframe
+        # Store in /tmp folder
+        fileLoad = pd.ExcelFile('/tmp/' + fileName)
+        dfMIClistCC = fileLoad.parse('MICs List by CC')
+
+        # Replace Nan from Dataframe and convert to empty string
+        dfMIClistCC = dfMIClistCC.replace(np.nan, '')
+
+        # Convert Panda Dataframe to Dictionary
+        dfMIClistCC_dict = dfMIClistCC.to_dict('records')
+
+        # Convert Dictionary to JSON
+        dfMIClistCC_str = json.dumps(dfMIClistCC_dict)
+        dfMIClistCC_json = json.loads(dfMIClistCC_str)
+
+        # Save JSON file
+        # Store in /tmp folder
+        with open('/tmp/dfMIClistCC.json', 'w') as f:
+            json.dump(dfMIClistCC_json, f)
+
+        # Upload JSON file to S3 bucket
+        data = open('/tmp/dfMIClistCC.json', 'rb')
+        s3 = boto3.client('s3')
+        s3.put_object(Bucket='bucket-miclistcc',
+                      Key='MICListCC.json',
+                      Body=data)
+        return 'File uploaded successfully'
+    except ConnectionError:
+        print('Content does not exist')
+    except FileNotFoundError:
+        print('File not found')
+    except Exception as e:
+        print('Error occured: ' + str(e))

BIN
S3/new2/AWSLambda-MICListCC-master/MICListCC.zip


+ 9 - 0
S3/new2/AWSLambda-MICListCC-master/README.md

@@ -0,0 +1,9 @@
+## AWS Lamda - MICListCC
+# Requirement:
+
+1. Download the xlsx
+2. Store the xlsx
+3. Read the tab titled "MICs List by CC"
+4. Create a list of dict containing all rows (except row 1). The values in row 1 would be the keys for in each dict.
+5. Store the list from step 4) as a .json file in an AWS S3 bucket
+6. The above function should be run as an AWS Lambda

BIN
S3/new2/Amazon-Web-service---Upload-file-to-S3-bucket-master.zip


+ 2 - 0
S3/new2/Amazon-Web-service---Upload-file-to-S3-bucket-master/.gitattributes

@@ -0,0 +1,2 @@
+# Auto detect text files and perform LF normalization
+* text=auto

+ 6 - 0
S3/new2/Amazon-Web-service---Upload-file-to-S3-bucket-master/1.upload file.py

@@ -0,0 +1,6 @@
+import boto3
+s3 = boto3.resource('s3')
+for bucket in s3.buckets.all():
+    print(bucket.name)
+data = open('test.txt', 'rb')
+s3.Bucket('myfirstbucketkb').put_object(Key='test.txt', Body=data)

+ 4 - 0
S3/new2/Amazon-Web-service---Upload-file-to-S3-bucket-master/README.md

@@ -0,0 +1,4 @@
+# Amazon-Web-service---Upload-file-to-S3-bucket
+
+
+This python code upload a test file to already create S3 bucket (In this case myfirstbucketkb). 

+ 1 - 0
S3/new2/Amazon-Web-service---Upload-file-to-S3-bucket-master/test.txt

@@ -0,0 +1 @@
+test file

BIN
S3/new2/Automatic-Upload-to-AWS-S3-using-Python-main.zip


+ 4 - 0
S3/new2/Automatic-Upload-to-AWS-S3-using-Python-main/README.md

@@ -0,0 +1,4 @@
+# Automate-Upload-to-AWS-S3-using-Python
+Automatically upload files to Amazon Web Services S3 using python and AWS S3 API.
+Instead of uploading each and every file to S3 bucket manually we can automate that process with these two files.
+

+ 14 - 0
S3/new2/Automatic-Upload-to-AWS-S3-using-Python-main/automatic_s3_uploader.py

@@ -0,0 +1,14 @@
+from secrets import access_key, secret_access_key
+
+import boto3
+import os
+
+client = boto3.client('s3',aws_access_key_id = access_key,
+                            aws_secret_access_key = secret_access_key)
+
+
+for file in os.listdir():
+    if '.py' in file:
+        upload_file_bucket = 'my-proj-bucket'
+        upload_file_key = 'python/' +str(file)
+        client.upload_file(file, upload_file_bucket, upload_file_key)

+ 3 - 0
S3/new2/Automatic-Upload-to-AWS-S3-using-Python-main/secrets.py

@@ -0,0 +1,3 @@
+
+access_key = 'Your key for user!!'
+secret_access_key = 'Your secret key for user'

BIN
S3/new2/Buechner-master.zip


+ 3 - 0
S3/new2/Buechner-master/.gitignore

@@ -0,0 +1,3 @@
+*.py[co]
+aws_config.py
+venv

+ 50 - 0
S3/new2/Buechner-master/README.md

@@ -0,0 +1,50 @@
+# Buechner
+
+Upload your Flask static files to Amazon S3
+
+## Current Status
+
+I have no plans to work on this project any further. There are most likely
+better things out there anyway. Go use something else :)
+
+## What's it do?
+
+Buechner leverages [Boto](https://github.com/boto/boto) to let you easily push
+static files to S3. It doesn't require Flask, that's just what I use it for.
+
+Configuration is done by environment variables or by a file. This makes it
+easy to use Buechner to throw static files at your S3 bucket through a git
+hook from Heroku.
+
+It will only transfer files that are newer than their counterparts on S3. It
+won't delete anything, only overwrite.
+
+It will set the entire bucket's ACL to public read, as well as each key (file)
+that it uploads
+
+## Usage
+
+Drop buechner.py into your main project directory. It assumes your static
+dir is at src/static relative to its own directory, but you can change that by
+adding BUECHNER_STATIC_RELPATH to your environment. Note that this path is
+defined with respect to Buechner's file path.
+
+Set up your environment or a file 'aws_config.py' like so:
+
+    AWS_S3_BUCKET = 'my_s3_bucket_name'
+    AWS_ACCESS_KEY_ID = 'SPAM'
+    AWS_SECRET_ACCESS_KEY = 'SECRETEGGS'
+
+And then run `python buechner.py`. It will ask you to continue after confirming
+the bucket name and static directory path.
+
+## Requirements
+
+*  [Boto](https://github.com/boto/boto)
+*  Python 2.6 or above. Does not support Python 3.
+*  Unix of some sort. The local file discovery might work on Windows, but I
+   haven't tried.
+
+## Etc
+
+Create an issue or shoot me a pull request if you have the need.

+ 150 - 0
S3/new2/Buechner-master/buechner.py

@@ -0,0 +1,150 @@
+#!/usr/bin/env python
+import os
+import sys
+from datetime import datetime
+
+from boto.s3.connection import S3Connection
+from boto.s3.key import Key
+
+# Relative path from directory of buechner.py. Default to src/static,
+# but script will attempt to pull from BUECHNER_STATIC_RELPATH env var.
+try:
+    STATIC_DIR_REL = os.environ['BUECHNER_STATIC_RELPATH']
+except KeyError:
+    STATIC_DIR_REL = os.path.join(
+        'src',
+        'static')
+
+
+def init_s3_interface(s3_bucket, access_key_id, secret_access_key):
+    """
+    Initialize the interface.
+
+    Arguments are all strings: bucket name, key id, and secret key,
+    respectively. Returns a list of boto.s3.connection.S3Connection,
+    boto.s3.bucket.Bucket.
+
+    """
+    conn = S3Connection(access_key_id, secret_access_key)
+    bucket = conn.get_bucket(s3_bucket)
+    return conn, bucket
+
+
+def get_keys_from_directory(basedir):
+    """
+    Return dict of paths -> mtimes of files found recursively under `basedir`.
+
+    Paths are relative to `basedir` with no leading slashes. Mtimes are
+    datetime.datetime objects in UTC. Will not follow directory symlinks.
+
+    Note: this will probably only work right on Unix, unless os.path.getmtime
+    gives UTC on other platforms too.
+
+    """
+    results = []
+    # Fill up results with base, namelist
+    os.path.walk(
+        basedir,
+        (lambda x, y, z: results.append([y, z])),
+        None)
+    files = dict()
+    for base, names in results:
+        for name in names:
+            fullpath = os.path.join(base, name)
+            # only care about files
+            if os.path.isfile(fullpath):
+                mtime = datetime.utcfromtimestamp(os.path.getmtime(fullpath))
+                relative_path = fullpath.replace(
+                    basedir, '').lstrip(os.path.sep)
+                files[relative_path] = mtime
+    return files
+
+
+def upload_new_files(staticdir, bucket):
+    """
+    Upload newer files recursively under `staticdir` to `bucket`.
+
+    This assumes that the directory `staticdir` represents the root of
+    the S3 bucket. `bucket` should be an instance of boto.s3.bucket.Bucket.
+
+    Return a list of the files uploaded, with paths relative to `staticdir`.
+
+    """
+    allkeys = bucket.list()
+    local_files_mtimes = get_keys_from_directory(staticdir)
+    # `fmt` should be ISO 8601, but the time zone isn't parsed right when
+    # given as %Z, so we hack it off below. Hopefully it's always Zulu time
+    fmt = '%Y-%m-%dT%H:%M:%S.%f'
+    # This is a dict of key_name -> [key_obj, key.last_modified]
+    remote_files_mtimes_keys = dict(
+        (
+            k.name,
+            [
+                k,
+                datetime.strptime(
+                    k.last_modified[:-1],  # strip off Z at end
+                    fmt)
+            ]
+        ) for k in allkeys)
+    uploaded_files = []
+    for filepath, local_mtime in local_files_mtimes.iteritems():
+        if filepath in remote_files_mtimes_keys:
+            the_key, remote_mtime = remote_files_mtimes_keys[filepath]
+            # Skip file if local is older
+            if remote_mtime > local_mtime:
+                continue
+        else:
+            the_key = Key(bucket)
+            the_key.key = filepath
+
+        uploaded_files.append(filepath)
+        the_key.set_contents_from_filename(os.path.join(staticdir, filepath))
+        the_key.set_acl('public-read')
+    return uploaded_files
+
+
+if __name__ == '__main__':
+    # If no AWS keys are found in environment, try to import the config file
+    try:
+        AWS_S3_BUCKET = os.environ['AWS_S3_BUCKET']
+        AWS_ACCESS_KEY_ID = os.environ['AWS_ACCESS_KEY_ID']
+        AWS_SECRET_ACCESS_KEY = os.environ['AWS_SECRET_ACCESS_KEY']
+        print("Using environment config. Loading bucket '%s'" % (
+            AWS_S3_BUCKET))
+    except KeyError:
+        print(
+            'Failed to find all environment variables, attempting to load '
+            'aws_config.py...')
+        try:
+            import aws_config
+
+            AWS_S3_BUCKET = aws_config.AWS_S3_BUCKET
+            AWS_ACCESS_KEY_ID = aws_config.AWS_ACCESS_KEY_ID
+            AWS_SECRET_ACCESS_KEY = aws_config.AWS_SECRET_ACCESS_KEY
+            print("Using aws_config.py config. Loading bucket '%s'" % (
+                AWS_S3_BUCKET))
+        except (ImportError, NameError, AttributeError) as e:
+            print('Failed to locate AWS config, check environment or config')
+            print("Error was: '%s'" % e)
+            sys.exit(1)
+
+    staticdir = os.path.join(
+        os.path.dirname(os.path.realpath(__file__)),
+        STATIC_DIR_REL)
+    print("Will upload new files from '%s' to bucket '%s'." % (
+        staticdir,
+        AWS_S3_BUCKET))
+    if raw_input('Continue? [Y]').strip().lower() != 'y':
+        print("Exiting...")
+        sys.exit(1)
+    print("Preparing upload...")
+    conn, bucket = init_s3_interface(
+        AWS_S3_BUCKET,
+        AWS_ACCESS_KEY_ID,
+        AWS_SECRET_ACCESS_KEY)
+    uploaded = upload_new_files(staticdir, bucket)
+    for filename in uploaded:
+        print("Uploaded '%s' to S3" % filename)
+    print("Setting public read ACL on bucket '%s'..." % AWS_S3_BUCKET)
+    bucket.set_acl('public-read')
+    print("Complete!")

BIN
S3/new2/CTFd-S3-plugin-master.zip


+ 90 - 0
S3/new2/CTFd-S3-plugin-master/.gitignore

@@ -0,0 +1,90 @@
+# Byte-compiled / optimized / DLL files
+__pycache__/
+*.py[cod]
+*$py.class
+
+# C extensions
+*.so
+
+# Distribution / packaging
+.Python
+env/
+build/
+develop-eggs/
+dist/
+downloads/
+eggs/
+.eggs/
+lib/
+lib64/
+parts/
+sdist/
+var/
+*.egg-info/
+.installed.cfg
+*.egg
+
+# PyInstaller
+#  Usually these files are written by a python script from a template
+#  before PyInstaller builds the exe, so as to inject date/other infos into it.
+*.manifest
+*.spec
+
+# Installer logs
+pip-log.txt
+pip-delete-this-directory.txt
+
+# Unit test / coverage reports
+htmlcov/
+.tox/
+.coverage
+.coverage.*
+.cache
+nosetests.xml
+coverage.xml
+*,cover
+.hypothesis/
+
+# Translations
+*.mo
+*.pot
+
+# Django stuff:
+*.log
+local_settings.py
+
+# Flask stuff:
+instance/
+.webassets-cache
+
+# Scrapy stuff:
+.scrapy
+
+# Sphinx documentation
+docs/_build/
+
+# PyBuilder
+target/
+
+# IPython Notebook
+.ipynb_checkpoints
+
+# pyenv
+.python-version
+
+# celery beat schedule file
+celerybeat-schedule
+
+# dotenv
+.env
+
+# virtualenv
+venv/
+ENV/
+
+# Spyder project settings
+.spyderproject
+
+# Rope project settings
+.ropeproject
+.idea/

+ 201 - 0
S3/new2/CTFd-S3-plugin-master/LICENSE

@@ -0,0 +1,201 @@
+                                 Apache License
+                           Version 2.0, January 2004
+                        http://www.apache.org/licenses/
+
+   TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
+
+   1. Definitions.
+
+      "License" shall mean the terms and conditions for use, reproduction,
+      and distribution as defined by Sections 1 through 9 of this document.
+
+      "Licensor" shall mean the copyright owner or entity authorized by
+      the copyright owner that is granting the License.
+
+      "Legal Entity" shall mean the union of the acting entity and all
+      other entities that control, are controlled by, or are under common
+      control with that entity. For the purposes of this definition,
+      "control" means (i) the power, direct or indirect, to cause the
+      direction or management of such entity, whether by contract or
+      otherwise, or (ii) ownership of fifty percent (50%) or more of the
+      outstanding shares, or (iii) beneficial ownership of such entity.
+
+      "You" (or "Your") shall mean an individual or Legal Entity
+      exercising permissions granted by this License.
+
+      "Source" form shall mean the preferred form for making modifications,
+      including but not limited to software source code, documentation
+      source, and configuration files.
+
+      "Object" form shall mean any form resulting from mechanical
+      transformation or translation of a Source form, including but
+      not limited to compiled object code, generated documentation,
+      and conversions to other media types.
+
+      "Work" shall mean the work of authorship, whether in Source or
+      Object form, made available under the License, as indicated by a
+      copyright notice that is included in or attached to the work
+      (an example is provided in the Appendix below).
+
+      "Derivative Works" shall mean any work, whether in Source or Object
+      form, that is based on (or derived from) the Work and for which the
+      editorial revisions, annotations, elaborations, or other modifications
+      represent, as a whole, an original work of authorship. For the purposes
+      of this License, Derivative Works shall not include works that remain
+      separable from, or merely link (or bind by name) to the interfaces of,
+      the Work and Derivative Works thereof.
+
+      "Contribution" shall mean any work of authorship, including
+      the original version of the Work and any modifications or additions
+      to that Work or Derivative Works thereof, that is intentionally
+      submitted to Licensor for inclusion in the Work by the copyright owner
+      or by an individual or Legal Entity authorized to submit on behalf of
+      the copyright owner. For the purposes of this definition, "submitted"
+      means any form of electronic, verbal, or written communication sent
+      to the Licensor or its representatives, including but not limited to
+      communication on electronic mailing lists, source code control systems,
+      and issue tracking systems that are managed by, or on behalf of, the
+      Licensor for the purpose of discussing and improving the Work, but
+      excluding communication that is conspicuously marked or otherwise
+      designated in writing by the copyright owner as "Not a Contribution."
+
+      "Contributor" shall mean Licensor and any individual or Legal Entity
+      on behalf of whom a Contribution has been received by Licensor and
+      subsequently incorporated within the Work.
+
+   2. Grant of Copyright License. Subject to the terms and conditions of
+      this License, each Contributor hereby grants to You a perpetual,
+      worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+      copyright license to reproduce, prepare Derivative Works of,
+      publicly display, publicly perform, sublicense, and distribute the
+      Work and such Derivative Works in Source or Object form.
+
+   3. Grant of Patent License. Subject to the terms and conditions of
+      this License, each Contributor hereby grants to You a perpetual,
+      worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+      (except as stated in this section) patent license to make, have made,
+      use, offer to sell, sell, import, and otherwise transfer the Work,
+      where such license applies only to those patent claims licensable
+      by such Contributor that are necessarily infringed by their
+      Contribution(s) alone or by combination of their Contribution(s)
+      with the Work to which such Contribution(s) was submitted. If You
+      institute patent litigation against any entity (including a
+      cross-claim or counterclaim in a lawsuit) alleging that the Work
+      or a Contribution incorporated within the Work constitutes direct
+      or contributory patent infringement, then any patent licenses
+      granted to You under this License for that Work shall terminate
+      as of the date such litigation is filed.
+
+   4. Redistribution. You may reproduce and distribute copies of the
+      Work or Derivative Works thereof in any medium, with or without
+      modifications, and in Source or Object form, provided that You
+      meet the following conditions:
+
+      (a) You must give any other recipients of the Work or
+          Derivative Works a copy of this License; and
+
+      (b) You must cause any modified files to carry prominent notices
+          stating that You changed the files; and
+
+      (c) You must retain, in the Source form of any Derivative Works
+          that You distribute, all copyright, patent, trademark, and
+          attribution notices from the Source form of the Work,
+          excluding those notices that do not pertain to any part of
+          the Derivative Works; and
+
+      (d) If the Work includes a "NOTICE" text file as part of its
+          distribution, then any Derivative Works that You distribute must
+          include a readable copy of the attribution notices contained
+          within such NOTICE file, excluding those notices that do not
+          pertain to any part of the Derivative Works, in at least one
+          of the following places: within a NOTICE text file distributed
+          as part of the Derivative Works; within the Source form or
+          documentation, if provided along with the Derivative Works; or,
+          within a display generated by the Derivative Works, if and
+          wherever such third-party notices normally appear. The contents
+          of the NOTICE file are for informational purposes only and
+          do not modify the License. You may add Your own attribution
+          notices within Derivative Works that You distribute, alongside
+          or as an addendum to the NOTICE text from the Work, provided
+          that such additional attribution notices cannot be construed
+          as modifying the License.
+
+      You may add Your own copyright statement to Your modifications and
+      may provide additional or different license terms and conditions
+      for use, reproduction, or distribution of Your modifications, or
+      for any such Derivative Works as a whole, provided Your use,
+      reproduction, and distribution of the Work otherwise complies with
+      the conditions stated in this License.
+
+   5. Submission of Contributions. Unless You explicitly state otherwise,
+      any Contribution intentionally submitted for inclusion in the Work
+      by You to the Licensor shall be under the terms and conditions of
+      this License, without any additional terms or conditions.
+      Notwithstanding the above, nothing herein shall supersede or modify
+      the terms of any separate license agreement you may have executed
+      with Licensor regarding such Contributions.
+
+   6. Trademarks. This License does not grant permission to use the trade
+      names, trademarks, service marks, or product names of the Licensor,
+      except as required for reasonable and customary use in describing the
+      origin of the Work and reproducing the content of the NOTICE file.
+
+   7. Disclaimer of Warranty. Unless required by applicable law or
+      agreed to in writing, Licensor provides the Work (and each
+      Contributor provides its Contributions) on an "AS IS" BASIS,
+      WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+      implied, including, without limitation, any warranties or conditions
+      of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
+      PARTICULAR PURPOSE. You are solely responsible for determining the
+      appropriateness of using or redistributing the Work and assume any
+      risks associated with Your exercise of permissions under this License.
+
+   8. Limitation of Liability. In no event and under no legal theory,
+      whether in tort (including negligence), contract, or otherwise,
+      unless required by applicable law (such as deliberate and grossly
+      negligent acts) or agreed to in writing, shall any Contributor be
+      liable to You for damages, including any direct, indirect, special,
+      incidental, or consequential damages of any character arising as a
+      result of this License or out of the use or inability to use the
+      Work (including but not limited to damages for loss of goodwill,
+      work stoppage, computer failure or malfunction, or any and all
+      other commercial damages or losses), even if such Contributor
+      has been advised of the possibility of such damages.
+
+   9. Accepting Warranty or Additional Liability. While redistributing
+      the Work or Derivative Works thereof, You may choose to offer,
+      and charge a fee for, acceptance of support, warranty, indemnity,
+      or other liability obligations and/or rights consistent with this
+      License. However, in accepting such obligations, You may act only
+      on Your own behalf and on Your sole responsibility, not on behalf
+      of any other Contributor, and only if You agree to indemnify,
+      defend, and hold each Contributor harmless for any liability
+      incurred by, or claims asserted against, such Contributor by reason
+      of your accepting any such warranty or additional liability.
+
+   END OF TERMS AND CONDITIONS
+
+   APPENDIX: How to apply the Apache License to your work.
+
+      To apply the Apache License to your work, attach the following
+      boilerplate notice, with the fields enclosed by brackets "{}"
+      replaced with your own identifying information. (Don't include
+      the brackets!)  The text should be enclosed in the appropriate
+      comment syntax for the file format. We also recommend that a
+      file or class name and description of purpose be included on the
+      same "printed page" as the copyright notice for easier
+      identification within third-party archives.
+
+   Copyright {yyyy} {name of copyright owner}
+
+   Licensed under the Apache License, Version 2.0 (the "License");
+   you may not use this file except in compliance with the License.
+   You may obtain a copy of the License at
+
+       http://www.apache.org/licenses/LICENSE-2.0
+
+   Unless required by applicable law or agreed to in writing, software
+   distributed under the License is distributed on an "AS IS" BASIS,
+   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+   See the License for the specific language governing permissions and
+   limitations under the License.

+ 23 - 0
S3/new2/CTFd-S3-plugin-master/README.md

@@ -0,0 +1,23 @@
+# CTFd-S3-plugin
+Plugin that converts CTFd file uploads and deletions to Amazon S3 calls.
+
+AWS S3 support has been integrated into CTFd as of version 2.0. 
+
+## Installation
+
+1. To install clone this repository to the [CTFd/plugins](https://github.com/isislab/CTFd/tree/master/CTFd/plugins) folder.
+2. Install the requirements specified in the [requirements.txt](https://github.com/CTFd/CTFd-S3-plugin/blob/master/requirements.txt) file. 
+3. Edit [CTFd/config.py](https://github.com/isislab/CTFd/blob/master/CTFd/config.py) and add the following entries:
+  * ACCESS_KEY_ID
+  * SECRET_ACCESS_KEY
+  * BUCKET 
+
+`ACCESS_KEY_ID` is your AWS Access Key. If you do not provide this, the plugin will try to use an IAM role or credentials file.
+
+`SECRET_ACCESS_KEY` is your AWS Secret Key. If you do not provide this, the plugin will try to use an IAM role or credentials file.
+
+`BUCKET` is the name of your Amazon S3 bucket. 
+
+## Note
+
+This plugin will not yet backfill any files you've uploaded. If you install the plugin after you've uploaded files, you will need to upload your current challenge files to S3. 

+ 1 - 0
S3/new2/CTFd-S3-plugin-master/__init__.py

@@ -0,0 +1 @@
+from .s3 import load

+ 1 - 0
S3/new2/CTFd-S3-plugin-master/requirements.txt

@@ -0,0 +1 @@
+boto3

+ 91 - 0
S3/new2/CTFd-S3-plugin-master/s3.py

@@ -0,0 +1,91 @@
+from flask import request, redirect, abort, jsonify, url_for
+from CTFd.models import db, Solves, Challenges, WrongKeys, Keys, Tags, Files
+
+from CTFd import utils
+import os
+import boto3
+import hashlib
+import string
+from werkzeug.utils import secure_filename
+
+
+def clean_filename(c):
+    if c in string.ascii_letters + string.digits + '-' + '_' + '.':
+        return True
+
+
+def get_s3_conn(app):
+    access_key_id = utils.get_app_config('ACCESS_KEY_ID')
+    secret_access_key = utils.get_app_config('SECRET_ACCESS_KEY')
+    if access_key_id and secret_access_key:
+        client = boto3.client(
+            's3',
+            aws_access_key_id=access_key_id,
+            aws_secret_access_key=secret_access_key
+        )
+        bucket = utils.get_app_config('BUCKET')
+        return client, bucket
+    else:
+        client = boto3.client('s3')
+        bucket = utils.get_app_config('BUCKET')
+        return client, bucket
+
+
+def load(app):
+    def upload_file(file, chalid):
+        s3, bucket = get_s3_conn(app)
+
+        filename = filter(clean_filename, secure_filename(file.filename).replace(' ', '_'))
+        if len(filename) <= 0:
+            return False
+
+        md5hash = hashlib.md5(os.urandom(64)).hexdigest()
+
+        key = md5hash + '/' + filename
+        s3.upload_fileobj(file, bucket, key)
+
+        db_f = Files(chalid, key)
+        db.session.add(db_f)
+        db.session.commit()
+        return db_f.id, (md5hash + '/' + filename)
+
+    def delete_file(filename):
+        s3, bucket = get_s3_conn(app)
+        f = Files.query.filter_by(id=filename).first_or_404()
+        key = f.location
+        s3.delete_object(Bucket=bucket, Key=key)
+        db.session.delete(f)
+        db.session.commit()
+        return True
+
+    def file_handler(path):
+        f = Files.query.filter_by(location=path).first_or_404()
+        chal = Challenges.query.filter_by(id=f.chal).first()
+
+        s3, bucket = get_s3_conn(app)
+        if utils.is_admin() or chal is None:
+            key = f.location
+            url = s3.generate_presigned_url('get_object', Params = {
+                'Bucket': bucket,
+                'Key': key, })
+            return redirect(url)
+
+        if utils.user_can_view_challenges():
+            if not utils.ctftime():
+                if not utils.view_after_ctf():
+                    abort(403)
+
+            if chal.hidden:
+                abort(403)
+
+            key = f.location
+            url = s3.generate_presigned_url('get_object', Params = {
+                'Bucket': bucket,
+                'Key': key, })
+            return redirect(url)
+        else:
+            return redirect(url_for('auth.login'))
+
+    utils.upload_file = upload_file
+    utils.delete_file = delete_file
+    app.view_functions['views.file_handler'] = file_handler

BIN
S3/new2/Compose-S3-Backup-master.zip


+ 61 - 0
S3/new2/Compose-S3-Backup-master/.gitignore

@@ -0,0 +1,61 @@
+# Byte-compiled / optimized / DLL files
+__pycache__/
+*.py[cod]
+
+# C extensions
+*.so
+
+# Distribution / packaging
+.Python
+env/
+build/
+develop-eggs/
+dist/
+eggs/
+lib/
+lib64/
+parts/
+sdist/
+var/
+bin/
+include/
+*.egg-info/
+.installed.cfg
+*.egg
+*.tar.gz
+
+# PyInstaller
+#  Usually these files are written by a python script from a template 
+#  before PyInstaller builds the exe, so as to inject date/other infos into it.
+*.manifest
+*.spec
+
+# Installer logs
+pip-log.txt
+pip-delete-this-directory.txt
+
+# Unit test / coverage reports
+htmlcov/
+.tox/
+.coverage
+.cache
+nosetests.xml
+coverage.xml
+
+# Translations
+*.mo
+*.pot
+
+# Django stuff:
+*.log
+
+# Sphinx documentation
+docs/_build/
+
+# PyBuilder
+target/
+
+#IDE
+.idea/
+*.swp
+

+ 21 - 0
S3/new2/Compose-S3-Backup-master/LICENSE

@@ -0,0 +1,21 @@
+The MIT License (MIT)
+
+Copyright (c) 2014 Fluke Networks
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in all
+copies or substantial portions of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+SOFTWARE.

+ 24 - 0
S3/new2/Compose-S3-Backup-master/README.md

@@ -0,0 +1,24 @@
+Compose-S3-Backup
+=================
+
+*Early Stage Software - unstable and unrefined... for now!*
+
+Download MongoDB backups from the [Compose](http://www.compose.io) API (formerly MongoHQ) and upload to Amazon S3
+
+##Usage
+Install Requirements
+
+```bash
+$ pip install -r requirements.txt
+
+```
+
+View help
+
+```bash
+$ compose_s3_backup.py --help
+
+```
+
+##Contributing
+Pull requests and suggestions welcome!

+ 136 - 0
S3/new2/Compose-S3-Backup-master/compose_s3_backup.py

@@ -0,0 +1,136 @@
+#! /usr/bin/env python
+"""Compose API Backup to S3 Utility.
+Uses the Compose (MongoHQ) API to pull the latest backup for a database, and put the file on Amazon S3
+
+Usage:
+mongoHQ_S3_backup.py -d <database_name> -t <oauthToken> -a <account_name> -b <bucket> -k <aws_key_id> -s <aws_secret> -p <s3_key_prefix>
+mongoHQ_S3_backup.py (-h | --help)
+
+Options:
+-h --help      Show this screen.
+-d <database_name> --database=<database_name>  Name of the database to find a backup for, or deployment name.
+-t <oauth_token> --token=<oauth_token>         MongoHQ OAUTH Token
+-a <account_name> --account=<account_name>     MongoHQ Account Name
+-b <bucket> --bucket=<bucket>                  S3 Bucket name
+-k <aws_key_id> --awskey=<aws_key_id>          AWS Key ID
+-s <aws_secret> --awssecret=<aws_secret>       AWS Secret Key
+-p <s3_key_prefix> --prefix=<s3_key_prefix     Prefixes filename of S3 object [default: '']
+"""
+import requests
+import math
+import os
+import sys
+from docopt import docopt
+import boto
+from filechunkio import FileChunkIO
+
+
+# Compose/MongoHQ API docs
+# http://support.mongohq.com/rest-api/2014-06/backups.html
+
+
+# Gets the latest backup for a given database and account.
+def get_backup(database_name, account_name, oauth_token):
+    mongohq_url = 'https://api.mongohq.com/accounts/{0}/backups'.format(account_name)
+    headers = {'Accept-Version': '2014-06', 'Content-Type': 'application/json',
+               'Authorization': 'Bearer {0}'.format(oauth_token)}
+    # get the list of backups for our account.
+    r = requests.get(mongohq_url, headers=headers)
+
+    if r.status_code != 200:
+        print('Unable to list backups!')
+        return None
+
+    all_backups = r.json()
+    backups_for_this_database = list()
+    for backup in all_backups:
+        if database_name in backup['database_names'] or database_name == backup['deployment']:
+            backups_for_this_database.append(
+                {'id': backup['id'], 'created_at': backup['created_at'], 'filename': backup['filename']})
+
+    if len(backups_for_this_database) == 0:
+        print('No Backups found for database name:{0}. Exiting...'.format(database_name))
+        sys.exit(1)
+
+    # search for the latest backup for the given database name
+    latest = sorted(backups_for_this_database, key=lambda k: k['created_at'])[-1]
+    print('The latest backup for {0} is: {1} created at {2}'.format(database_name, latest['id'], latest['created_at']))
+    backup_filename = latest['filename']
+
+    # pull down the backup
+    r2 = requests.get('{0}/{1}/download'.format(mongohq_url, latest['id']), headers=headers, allow_redirects=False)
+    if r2.status_code != 302:
+        return None
+    # MongoHQ backup API redirects to a URL where the backup file can be downloaded.
+    # TODO: Can the 302 be followed in one step?
+    file_location = r2.headers['location']
+
+    # download the file to disk. Stream, since the file could potentially be large
+    print('Downloading Backup from:{0}'.format(file_location))
+    r3 = requests.get(file_location, stream=True)
+    with open(backup_filename, 'wb') as f:
+        for chunk in r3.iter_content(chunk_size=1024):
+            if chunk:  # filter out keep-alive new chunks
+                f.write(chunk)
+                f.flush()
+    print('saved backup to file: {0}'.format(backup_filename))
+    return backup_filename
+
+
+# Using S3 Multipart upload to handle potentially large files
+def upload_to_s3(s3key, filename, bucket, aws_key, aws_secret):
+    conn = boto.connect_s3(aws_key, aws_secret)
+    bucket = conn.get_bucket(bucket)
+    # Get file info
+    source_path = filename
+    source_size = os.stat(source_path).st_size
+    # Create a multipart upload request
+    mp = bucket.initiate_multipart_upload(s3key)
+    # Use a chunk size of 50 MiB
+    chunk_size = 52428800
+    chunk_count = int(math.ceil(source_size / chunk_size))
+    # Send the file parts, using FileChunkIO to create a file-like object
+    # that points to a certain byte range within the original file. We
+    # set bytes to never exceed the original file size.
+    for i in range(chunk_count + 1):
+        print('Uploading file chunk: {0} of {1}'.format(i + 1, chunk_count + 1))
+        offset = chunk_size * i
+        bytes = min(chunk_size, source_size - offset)
+        with FileChunkIO(source_path, 'r', offset=offset, bytes=bytes) as fp:
+            mp.upload_part_from_file(fp, part_num=i + 1)
+    # Finish the upload
+    completed_upload = mp.complete_upload()
+    return completed_upload
+
+
+def delete_local_backup_file(filename):
+    print('Deleting file from local filesystem:{0}'.format(filename))
+    os.remove(filename)
+
+
+if __name__ == '__main__':
+    # grab all the arguments
+    arguments = docopt(__doc__, version='mongoHQ_s3_backup 0.0.1')
+    database_name = arguments['--database']
+    account_name = arguments['--account']
+    oauth_token = arguments['--token']
+    bucket = arguments['--bucket']
+    aws_key = arguments['--awskey']
+    aws_secret = arguments['--awssecret']
+    prefix = arguments['--prefix']
+
+    # first, fetch the backup
+    filename = get_backup(database_name, account_name, oauth_token)
+    if not filename:
+        # we failed to save the backup successfully.
+        sys.exit(1)
+    # now, store the file we just downloaded up on S3
+    print('Uploading file to S3. Bucket:{0}'.format(bucket))
+    s3_success = upload_to_s3(prefix + filename, filename, bucket, aws_key, aws_secret)
+    if not s3_success:
+        # somehow failed the file upload
+        print('Failure with S3 upload. Exiting...')
+        sys.exit(1)
+    print('Upload to S3 completed successfully')
+    # Delete the local backup file, to not take up excessive disk space
+    delete_local_backup_file(filename)

+ 4 - 0
S3/new2/Compose-S3-Backup-master/requirements.txt

@@ -0,0 +1,4 @@
+boto==2.32.0
+requests==2.3.0
+docopt==0.6.2
+filechunkio==1.5.0

BIN
S3/new2/HLS-Segmenter-AWS-S3-master.zip


+ 54 - 0
S3/new2/HLS-Segmenter-AWS-S3-master/.gitignore

@@ -0,0 +1,54 @@
+# Byte-compiled / optimized / DLL files
+__pycache__/
+*.py[cod]
+
+# C extensions
+*.so
+
+# Distribution / packaging
+.Python
+env/
+build/
+develop-eggs/
+dist/
+downloads/
+eggs/
+lib/
+lib64/
+parts/
+sdist/
+var/
+*.egg-info/
+.installed.cfg
+*.egg
+
+# PyInstaller
+#  Usually these files are written by a python script from a template
+#  before PyInstaller builds the exe, so as to inject date/other infos into it.
+*.manifest
+*.spec
+
+# Installer logs
+pip-log.txt
+pip-delete-this-directory.txt
+
+# Unit test / coverage reports
+htmlcov/
+.tox/
+.coverage
+.cache
+nosetests.xml
+coverage.xml
+
+# Translations
+*.mo
+*.pot
+
+# Django stuff:
+*.log
+
+# Sphinx documentation
+docs/_build/
+
+# PyBuilder
+target/

+ 202 - 0
S3/new2/HLS-Segmenter-AWS-S3-master/LICENSE

@@ -0,0 +1,202 @@
+Apache License
+                           Version 2.0, January 2004
+                        http://www.apache.org/licenses/
+
+   TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
+
+   1. Definitions.
+
+      "License" shall mean the terms and conditions for use, reproduction,
+      and distribution as defined by Sections 1 through 9 of this document.
+
+      "Licensor" shall mean the copyright owner or entity authorized by
+      the copyright owner that is granting the License.
+
+      "Legal Entity" shall mean the union of the acting entity and all
+      other entities that control, are controlled by, or are under common
+      control with that entity. For the purposes of this definition,
+      "control" means (i) the power, direct or indirect, to cause the
+      direction or management of such entity, whether by contract or
+      otherwise, or (ii) ownership of fifty percent (50%) or more of the
+      outstanding shares, or (iii) beneficial ownership of such entity.
+
+      "You" (or "Your") shall mean an individual or Legal Entity
+      exercising permissions granted by this License.
+
+      "Source" form shall mean the preferred form for making modifications,
+      including but not limited to software source code, documentation
+      source, and configuration files.
+
+      "Object" form shall mean any form resulting from mechanical
+      transformation or translation of a Source form, including but
+      not limited to compiled object code, generated documentation,
+      and conversions to other media types.
+
+      "Work" shall mean the work of authorship, whether in Source or
+      Object form, made available under the License, as indicated by a
+      copyright notice that is included in or attached to the work
+      (an example is provided in the Appendix below).
+
+      "Derivative Works" shall mean any work, whether in Source or Object
+      form, that is based on (or derived from) the Work and for which the
+      editorial revisions, annotations, elaborations, or other modifications
+      represent, as a whole, an original work of authorship. For the purposes
+      of this License, Derivative Works shall not include works that remain
+      separable from, or merely link (or bind by name) to the interfaces of,
+      the Work and Derivative Works thereof.
+
+      "Contribution" shall mean any work of authorship, including
+      the original version of the Work and any modifications or additions
+      to that Work or Derivative Works thereof, that is intentionally
+      submitted to Licensor for inclusion in the Work by the copyright owner
+      or by an individual or Legal Entity authorized to submit on behalf of
+      the copyright owner. For the purposes of this definition, "submitted"
+      means any form of electronic, verbal, or written communication sent
+      to the Licensor or its representatives, including but not limited to
+      communication on electronic mailing lists, source code control systems,
+      and issue tracking systems that are managed by, or on behalf of, the
+      Licensor for the purpose of discussing and improving the Work, but
+      excluding communication that is conspicuously marked or otherwise
+      designated in writing by the copyright owner as "Not a Contribution."
+
+      "Contributor" shall mean Licensor and any individual or Legal Entity
+      on behalf of whom a Contribution has been received by Licensor and
+      subsequently incorporated within the Work.
+
+   2. Grant of Copyright License. Subject to the terms and conditions of
+      this License, each Contributor hereby grants to You a perpetual,
+      worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+      copyright license to reproduce, prepare Derivative Works of,
+      publicly display, publicly perform, sublicense, and distribute the
+      Work and such Derivative Works in Source or Object form.
+
+   3. Grant of Patent License. Subject to the terms and conditions of
+      this License, each Contributor hereby grants to You a perpetual,
+      worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+      (except as stated in this section) patent license to make, have made,
+      use, offer to sell, sell, import, and otherwise transfer the Work,
+      where such license applies only to those patent claims licensable
+      by such Contributor that are necessarily infringed by their
+      Contribution(s) alone or by combination of their Contribution(s)
+      with the Work to which such Contribution(s) was submitted. If You
+      institute patent litigation against any entity (including a
+      cross-claim or counterclaim in a lawsuit) alleging that the Work
+      or a Contribution incorporated within the Work constitutes direct
+      or contributory patent infringement, then any patent licenses
+      granted to You under this License for that Work shall terminate
+      as of the date such litigation is filed.
+
+   4. Redistribution. You may reproduce and distribute copies of the
+      Work or Derivative Works thereof in any medium, with or without
+      modifications, and in Source or Object form, provided that You
+      meet the following conditions:
+
+      (a) You must give any other recipients of the Work or
+          Derivative Works a copy of this License; and
+
+      (b) You must cause any modified files to carry prominent notices
+          stating that You changed the files; and
+
+      (c) You must retain, in the Source form of any Derivative Works
+          that You distribute, all copyright, patent, trademark, and
+          attribution notices from the Source form of the Work,
+          excluding those notices that do not pertain to any part of
+          the Derivative Works; and
+
+      (d) If the Work includes a "NOTICE" text file as part of its
+          distribution, then any Derivative Works that You distribute must
+          include a readable copy of the attribution notices contained
+          within such NOTICE file, excluding those notices that do not
+          pertain to any part of the Derivative Works, in at least one
+          of the following places: within a NOTICE text file distributed
+          as part of the Derivative Works; within the Source form or
+          documentation, if provided along with the Derivative Works; or,
+          within a display generated by the Derivative Works, if and
+          wherever such third-party notices normally appear. The contents
+          of the NOTICE file are for informational purposes only and
+          do not modify the License. You may add Your own attribution
+          notices within Derivative Works that You distribute, alongside
+          or as an addendum to the NOTICE text from the Work, provided
+          that such additional attribution notices cannot be construed
+          as modifying the License.
+
+      You may add Your own copyright statement to Your modifications and
+      may provide additional or different license terms and conditions
+      for use, reproduction, or distribution of Your modifications, or
+      for any such Derivative Works as a whole, provided Your use,
+      reproduction, and distribution of the Work otherwise complies with
+      the conditions stated in this License.
+
+   5. Submission of Contributions. Unless You explicitly state otherwise,
+      any Contribution intentionally submitted for inclusion in the Work
+      by You to the Licensor shall be under the terms and conditions of
+      this License, without any additional terms or conditions.
+      Notwithstanding the above, nothing herein shall supersede or modify
+      the terms of any separate license agreement you may have executed
+      with Licensor regarding such Contributions.
+
+   6. Trademarks. This License does not grant permission to use the trade
+      names, trademarks, service marks, or product names of the Licensor,
+      except as required for reasonable and customary use in describing the
+      origin of the Work and reproducing the content of the NOTICE file.
+
+   7. Disclaimer of Warranty. Unless required by applicable law or
+      agreed to in writing, Licensor provides the Work (and each
+      Contributor provides its Contributions) on an "AS IS" BASIS,
+      WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+      implied, including, without limitation, any warranties or conditions
+      of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
+      PARTICULAR PURPOSE. You are solely responsible for determining the
+      appropriateness of using or redistributing the Work and assume any
+      risks associated with Your exercise of permissions under this License.
+
+   8. Limitation of Liability. In no event and under no legal theory,
+      whether in tort (including negligence), contract, or otherwise,
+      unless required by applicable law (such as deliberate and grossly
+      negligent acts) or agreed to in writing, shall any Contributor be
+      liable to You for damages, including any direct, indirect, special,
+      incidental, or consequential damages of any character arising as a
+      result of this License or out of the use or inability to use the
+      Work (including but not limited to damages for loss of goodwill,
+      work stoppage, computer failure or malfunction, or any and all
+      other commercial damages or losses), even if such Contributor
+      has been advised of the possibility of such damages.
+
+   9. Accepting Warranty or Additional Liability. While redistributing
+      the Work or Derivative Works thereof, You may choose to offer,
+      and charge a fee for, acceptance of support, warranty, indemnity,
+      or other liability obligations and/or rights consistent with this
+      License. However, in accepting such obligations, You may act only
+      on Your own behalf and on Your sole responsibility, not on behalf
+      of any other Contributor, and only if You agree to indemnify,
+      defend, and hold each Contributor harmless for any liability
+      incurred by, or claims asserted against, such Contributor by reason
+      of your accepting any such warranty or additional liability.
+
+   END OF TERMS AND CONDITIONS
+
+   APPENDIX: How to apply the Apache License to your work.
+
+      To apply the Apache License to your work, attach the following
+      boilerplate notice, with the fields enclosed by brackets "{}"
+      replaced with your own identifying information. (Don't include
+      the brackets!)  The text should be enclosed in the appropriate
+      comment syntax for the file format. We also recommend that a
+      file or class name and description of purpose be included on the
+      same "printed page" as the copyright notice for easier
+      identification within third-party archives.
+
+   Copyright {yyyy} {name of copyright owner}
+
+   Licensed under the Apache License, Version 2.0 (the "License");
+   you may not use this file except in compliance with the License.
+   You may obtain a copy of the License at
+
+       http://www.apache.org/licenses/LICENSE-2.0
+
+   Unless required by applicable law or agreed to in writing, software
+   distributed under the License is distributed on an "AS IS" BASIS,
+   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+   See the License for the specific language governing permissions and
+   limitations under the License.
+

+ 61 - 0
S3/new2/HLS-Segmenter-AWS-S3-master/README.md

@@ -0,0 +1,61 @@
+HLS-Segmenter-AWS-S3
+====================
+
+The is script for conver mp4 video file from source folder  to HLS streams and upload this stream to  to Amazon S3 bucket
+
+
+install requrement
+
+for script work you need boto modules for AWS 
+
+#pip install boto
+
+more datail about boto instalation -
+http://boto.readthedocs.org/en/latest/getting_started.html
+
+also script requred ffmpeg in you system
+Best values compiled latest version from source
+
+The is instruction for this
+
+https://trac.ffmpeg.org/wiki/CompilationGuide
+
+or simple install from repositories
+
+Ubuntu debian
+#sudo apt-get install ffmpeg 
+
+CentOS
+#sudo yum install ffmpeg
+
+
+
+
+
+Configuration
+
+in config.json you need set AWS credential 
+path to source folder 
+path for temp folder
+path to ffmpeg
+path to ffprobe
+
+config in JSON format
+
+{
+"aws":{
+		"access_key":"AWS Access Key",
+		"secret_key":"AWS Secure Key",
+		"s3bucket": "S3 bucket name"},
+"tmp":"/tmp/tmp/",
+"source":"/home/video/",
+"log":"/home/log.log",
+"ffmpeg": "/usr/bin/ffmpeg",
+"ffprobe" : "/usr/bin/ffprobe"
+}
+
+
+Using
+
+simple run 
+./hls-segmenter.py

+ 11 - 0
S3/new2/HLS-Segmenter-AWS-S3-master/config.json

@@ -0,0 +1,11 @@
+{
+"aws":{
+		"access_key":"AWS Access Key",
+		"secret_key":"AWS Secure Key",
+		"s3bucket": "S3 bucket name"},
+"tmp":"/tmp/tmp/",
+"source":"/home/video/",
+"log":"/home/log.log",
+"ffmpeg": "/usr/bin/ffmpeg",
+"ffprobe" : "/usr/bin/ffprobe"
+}

+ 106 - 0
S3/new2/HLS-Segmenter-AWS-S3-master/hls-segmenter.py

@@ -0,0 +1,106 @@
+#!/usr/bin/env python
+#  upload to AWS S3 and clean up
+
+# author Roman Sereda
+# sereda.roman@gmail.com
+#
+# install dependenses
+
+# sudo pip install boto
+
+
+import json
+import os.path
+import logging
+import subprocess
+from boto.s3.connection import S3Connection
+from boto.s3.key import Key
+
+config_file = 'config.json'
+json_data = open(config_file)
+config = json.load(json_data)
+json_data.close()
+
+logging.basicConfig(filename=config['log'], level=logging.DEBUG)
+
+
+###########################################################################
+def s3_file_upload(config, filename, keyname):
+    conn = S3Connection(config['aws']["access_key"], config['aws']["secret_key"])
+    mybucket = conn.get_bucket(config['aws']["s3bucket"])  # select bucket
+    k = Key(mybucket)  # select key
+    k.key = keyname  # named new key
+    k.set_contents_from_filename(filename)  # upload new  file name
+    k.set_acl('public-read')  # set publis read access
+    keylist = mybucket.list()  # get list of files
+    result = False
+    ss = []
+    for key in keylist:
+        ss.append(key.name)
+    if any(keyname in s for s in ss):
+        logging.debug('s3_file_upload ' + 'Upload ' + keyname + "Completed")
+        result = result | True
+    rs = conn.close()
+    return result
+
+
+def isVideo(ffprobe, filename):
+    if os.path.isfile(filename):
+        command = [ffprobe, "-v", "quiet", "-print_format", "json", "-show_format", "-show_streams", filename]
+        process = subprocess.Popen(command, stdout=subprocess.PIPE)
+        out, err = process.communicate()
+        video_stat = json.loads(out)
+        stat = []
+        print(video_stat)
+        if not len(video_stat) is 0:
+            if 'streams' in video_stat:
+                logging.debug('isVideo ' + 'tested ' + filename)
+                if len(video_stat['streams']) >= 2:
+                    logging.debug('isVideo ' + 'tested the is Vidoe' + filename)
+                    return video_stat
+        return False
+
+
+def s3_get_key_list(config):
+    conn = S3Connection(config["aws"]["access_key"], config["aws"]["secret_key"])
+    mybucket = conn.get_bucket(config["aws"]["s3bucket"])
+    key_list = []
+    for key in mybucket.list():
+        key_list.append(key.name)
+    rs = conn.close()
+    return key_list
+
+
+def video_segmenter(ffmpeg, filepath, folder, stream_name):
+    if os.path.isfile(filepath):
+        command = [ffmpeg, "-re", "-i", filepath, "-map", "0", "-codec:v", "libx264", "-codec:a", "libfdk_aac",
+                   "-codec:s", "copy", "-flags", "-global_header", "-f", "segment", "-segment_list",
+                   folder + "playlist.m3u8", "-segment_time", "10", "-segment_format", "mpegts", folder + "out%05d.ts"]
+        process = subprocess.Popen(command, stdout=subprocess.PIPE)
+        out, err = process.communicate()
+        print(out)
+
+
+def main(config):
+    filelist = sorted(os.listdir(config["source"]))
+    print(filelist)
+    for filename in filelist:
+        if isVideo(config["ffprobe"], config["source"] + filename):
+            if filename.find('.') is -1:
+                stream_name = filename.split('.', 1)
+            else:
+                stream_name = filename
+            video_segmenter(config["ffmpeg"], config["source"] + filename, config["tmp"], stream_name)
+            upload_list = sorted(os.listdir(config["tmp"]))
+            if "playlist.m3u8" in upload_list and len(upload_list) > 2:
+                for ufile in upload_list:
+                    logging.debug('main ' + 'procesed ' + " " + ufile)
+                    if s3_file_upload(config, config["tmp"] + ufile, stream_name + "/" + ufile):
+                        logging.debug('main ' + "Upload " + stream_name + "/" + ufile + "################")
+                        os.remove(config["tmp"] + ufile)
+
+    print(s3_get_key_list(config))
+
+
+if __name__ == "__main__":
+    main(config)

BIN
S3/new2/HerokuPostgres-S3-Backup-master.zip


+ 61 - 0
S3/new2/HerokuPostgres-S3-Backup-master/.gitignore

@@ -0,0 +1,61 @@
+# Byte-compiled / optimized / DLL files
+__pycache__/
+*.py[cod]
+
+# C extensions
+*.so
+
+# Distribution / packaging
+.Python
+env/
+build/
+develop-eggs/
+dist/
+eggs/
+lib/
+lib64/
+parts/
+sdist/
+var/
+bin/
+include/
+*.egg-info/
+.installed.cfg
+*.egg
+*.tar.gz
+
+# PyInstaller
+#  Usually these files are written by a python script from a template 
+#  before PyInstaller builds the exe, so as to inject date/other infos into it.
+*.manifest
+*.spec
+
+# Installer logs
+pip-log.txt
+pip-delete-this-directory.txt
+
+# Unit test / coverage reports
+htmlcov/
+.tox/
+.coverage
+.cache
+nosetests.xml
+coverage.xml
+
+# Translations
+*.mo
+*.pot
+
+# Django stuff:
+*.log
+
+# Sphinx documentation
+docs/_build/
+
+# PyBuilder
+target/
+
+#IDE
+.idea/
+*.swp
+

+ 21 - 0
S3/new2/HerokuPostgres-S3-Backup-master/LICENSE

@@ -0,0 +1,21 @@
+The MIT License (MIT)
+
+Copyright (c) 2014 Fluke Networks
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in all
+copies or substantial portions of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+SOFTWARE.

+ 37 - 0
S3/new2/HerokuPostgres-S3-Backup-master/README.md

@@ -0,0 +1,37 @@
+HerokuPostgres-S3-Backup
+=================
+
+*Early Stage Software - unstable and unrefined... for now!*
+
+Download a Heroku Postgres backup and upload to Amazon S3
+
+##Usage
+Install Requirements
+
+```bash
+$ pip install -r requirements.txt
+
+```
+
+Ensure that the Heroku toolbelt is installed and you are logged in.
+```bash
+$ heroku login
+```
+
+View help
+
+```bash
+$ herokupostgres_s3_backup.py --help
+
+```
+
+To get the URL of your pgbackups, The program uses the Heroku Toolbelt.
+```bash
+$ heroku pgbackups:url --app myAppName
+"https://some-pgbackups-url.com/"
+```
+This must be done because the output of pgbackups:url changes as new backups are created.
+The Heroku toolbelt is used because there is no standard api to fetch PG backups.
+
+##Contributing
+Pull requests and suggestions welcome!

+ 108 - 0
S3/new2/HerokuPostgres-S3-Backup-master/herokupostgres_s3_backup.py

@@ -0,0 +1,108 @@
+#! /usr/bin/env python
+"""Heroku Postgres Backup to S3 Utility.
+Uses the Heroku PG-Backups system to pull the latest backup for a database, and put the file on Amazon S3.
+Unfortunately, depends on the heroku toolbelt, since there is no standard API for PGBackups (that we have found).
+Be sure that you are logged in to the heroku toolbelt before you run this script, and that it is in your $PATH.
+
+Usage:
+herokupostgres_s3_backup.py  -r <path_to_heroku> -a <app_name>  -b <bucket> -k <aws_key_id> -s <aws_secret> -p <s3_key_prefix>
+herokupostgres_s3_backup.py (-h | --help)
+
+Options:
+-h --help      Show this screen.
+-a <app_name> --app=<app_name>                     Heroku App name.
+-r <path_to_heroku> --herokupath=<path_to_heroku>  location where the heroku executable lives, needs trailing slash
+-b <bucket> --bucket=<bucket>                      S3 Bucket name
+-k <aws_key_id> --awskey=<aws_key_id>              AWS Key ID
+-s <aws_secret> --awssecret=<aws_secret>           AWS Secret Key
+-p <s3_key_prefix> --prefix=<s3_key_prefix         Prefixes filename of S3 object
+"""
+import requests
+import math
+import os
+import sys
+import datetime
+import subprocess
+from docopt import docopt
+import boto
+from filechunkio import FileChunkIO
+
+
+# Gets the latest backup for a given app
+# Relies on the heroku cli toolbelt to talk to PGBackups
+def get_backup(heroku_path, app_name):
+    # first, get the heroku pgbackups:url from the heroku toolbelt
+    print('Looking up backup URL for:{0}'.format(app_name))
+    # 'Shelling out' isn't ideal in this situation, but it is the path of least resistance for now.
+    backup_url = subprocess.check_output(heroku_path + 'heroku pgbackups:url --app {0}'.format(app_name),
+                                         shell=True).rstrip()
+    # download the file to disk. Stream, since the file could potentially be large
+    print('Downloading backup from:{0}'.format(backup_url))
+    # We need to timestamp our own, since the backup url just gets the 'latest'
+    backup_filename = app_name + '-' + datetime.datetime.now().isoformat()
+    r = requests.get(backup_url, stream=True)
+    with open(backup_filename, 'wb') as f:
+        for chunk in r.iter_content(chunk_size=1024):
+            if chunk:  # filter out keep-alive new chunks
+                f.write(chunk)
+                f.flush()
+    print('saved backup to file: {0}'.format(backup_filename))
+    return backup_filename
+
+
+# Using S3 Multipart upload to handle potentially large files
+def upload_to_s3(s3key, filename, bucket, aws_key, aws_secret):
+    conn = boto.connect_s3(aws_key, aws_secret)
+    bucket = conn.get_bucket(bucket)
+    # Get file info
+    source_path = filename
+    source_size = os.stat(source_path).st_size
+    # Create a multipart upload request
+    mp = bucket.initiate_multipart_upload(s3key)
+    # Use a chunk size of 50 MiB
+    chunk_size = 52428800
+    chunk_count = int(math.ceil(source_size / chunk_size))
+    # Send the file parts, using FileChunkIO to create a file-like object
+    # that points to a certain byte range within the original file. We
+    # set bytes to never exceed the original file size.
+    for i in range(chunk_count + 1):
+        print('Uploading file chunk: {0} of {1}'.format(i + 1, chunk_count + 1))
+        offset = chunk_size * i
+        bytes = min(chunk_size, source_size - offset)
+        with FileChunkIO(source_path, 'r', offset=offset, bytes=bytes) as fp:
+            mp.upload_part_from_file(fp, part_num=i + 1)
+    # Finish the upload
+    completed_upload = mp.complete_upload()
+    return completed_upload
+
+
+def delete_local_backup_file(filename):
+    print('Deleting file from local filesystem:{0}'.format(filename))
+    os.remove(filename)
+
+
+if __name__ == '__main__':
+    # grab all the arguments
+    arguments = docopt(__doc__, version='herokupostgres_s3_backup 0.0.1')
+    app_name = arguments['--app']
+    heroku_path = arguments['--herokupath']
+    bucket = arguments['--bucket']
+    aws_key = arguments['--awskey']
+    aws_secret = arguments['--awssecret']
+    prefix = arguments['--prefix']
+
+    # first, fetch the backup
+    filename = get_backup(heroku_path, app_name)
+    if not filename:
+        # we failed to save the backup successfully.
+        sys.exit(1)
+    # now, store the file we just downloaded up on S3
+    print('Uploading file to S3. Bucket:{0}'.format(bucket))
+    s3_success = upload_to_s3(prefix + filename, filename, bucket, aws_key, aws_secret)
+    if not s3_success:
+        # somehow failed the file upload
+        print('Failure with S3 upload. Exiting...')
+        sys.exit(1)
+    print('Upload to S3 completed successfully')
+    # Delete the local backup file, to not take up excessive disk space
+    delete_local_backup_file(filename)

+ 4 - 0
S3/new2/HerokuPostgres-S3-Backup-master/requirements.txt

@@ -0,0 +1,4 @@
+boto==2.32.0
+requests==2.3.0
+docopt==0.6.2
+filechunkio==1.5.0

BIN
S3/new2/ImageUploader-master.zip


+ 3 - 0
S3/new2/ImageUploader-master/.gitignore

@@ -0,0 +1,3 @@
+.DS_Store
+Thumbs.db
+*.pyc

+ 7 - 0
S3/new2/ImageUploader-master/README.md

@@ -0,0 +1,7 @@
+# ImageUploader
+
+## Description
+Create thumbnails from image files and upload to Amazon S3.
+
+This project moved to ImageUploader-for-Pyhton3.
+https://github.com/Kyohei-Inuzuka/ImageUploader-for-Python3

+ 79 - 0
S3/new2/ImageUploader-master/src/ImageUploader.py

@@ -0,0 +1,79 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+import os
+import imghdr
+from PIL import Image
+import boto3
+from botocore.exceptions import ClientError, ParamValidationError
+from bootstrap import CONFIG
+
+
+class ImageUploader(object):
+    """ ImageUploader """
+
+    def __init__(self):
+        pass
+
+    @staticmethod
+    def resize_image(filepath):
+        """ サムネイル作成 """
+        path, ext = os.path.splitext(os.path.basename(filepath))
+        out_file = path + CONFIG['THUMBNAIL_PREFIX'] + ext
+        thumb_dir = CONFIG['THUMBNAIL_FILE_ROOT']
+        thumb_width = CONFIG['THUMBNAIL_WIDTH']
+
+        try:
+            if os.path.exists(thumb_dir) is False:
+                os.makedirs(thumb_dir)
+            img = Image.open(filepath)
+            wpercent = thumb_width / float(img.size[0])
+            hsize = int((float(img.size[1]) * float(wpercent)))
+
+            img.resize((thumb_width, hsize)).save(thumb_dir + out_file)
+        except IOError as err:
+            print('IOError: ' + err.message)
+
+    @staticmethod
+    def get_image_list():
+        """ ディレクトリ配下の画像ファイルのリストを取得する """
+        file_list = []
+        for (root, dirs, files) in os.walk(CONFIG['IMAGE_FILE_ROOT']):
+            for f_item in files:
+                target = os.path.join(root, f_item).replace("\\", "/")
+                if os.path.isfile(target):
+                    if imghdr.what(target) != None:
+                        file_list.append(target)
+        return file_list
+
+    @staticmethod
+    def upload_to_s3():
+        """ S3ディレクトリ配下のファイルを一括でアップロードする """
+        s3_root = CONFIG['S3_ROOT']
+        session = boto3.session.Session(aws_access_key_id=CONFIG['AWS_ACCESS_KEY_ID'],
+                                        aws_secret_access_key=CONFIG['AWS_SECRET_ACCESS_KEY'],
+                                        region_name=CONFIG['S3_REGION'])
+        client = session.resource('s3').meta.client
+
+        for (root, dirs, files) in os.walk(s3_root):
+            for f_item in files:
+                filename = os.path.join(root, f_item).replace("\\", "/")
+                try:
+                    client.upload_file(filename, CONFIG['S3_BUCKETNAME'], filename.replace(s3_root, ""))
+                except ParamValidationError as err:
+                    print("ParamValidationError : %s" % err)
+                except ClientError as err:
+                    if err.response['Error']['Code'] == 'EntityAlreadyExists':
+                        print("User already exists")
+                    else:
+                        print("Unexpected error: %s" % err)
+                except IOError as err:
+                    print('IOError: ' + err.message)
+
+
+if __name__ == '__main__':
+    IU = ImageUploader()
+    IMGFILELIST = IU.get_image_list()
+    for item in IMGFILELIST:
+        IU.resize_image(item)
+    IU.upload_to_s3()

+ 15 - 0
S3/new2/ImageUploader-master/src/bootstrap.py

@@ -0,0 +1,15 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+CONFIG = {
+    'S3_ROOT': './s3/',
+    'IMAGE_FILE_ROOT': './images',
+    'THUMBNAIL_FILE_ROOT': './s3/images/thumbnail/',
+    'THUMBNAIL_PREFIX': '_thumb',
+    'THUMBNAIL_WIDTH': 50,
+
+    'AWS_ACCESS_KEY_ID': 'Input AWS secret access key.',
+    'AWS_SECRET_ACCESS_KEY': 'Input AWS secret access key.',
+    'S3_REGION': 'Input S3 region.',
+    'S3_BUCKETNAME': 'Input S3 bucket name.'
+}

+ 0 - 0
S3/new2/ImageUploader-master/src/images/empty


+ 0 - 0
S3/new2/ImageUploader-master/src/s3/empty


BIN
S3/new2/S3-main.zip


+ 20 - 0
S3/new2/S3-main/.gitignore

@@ -0,0 +1,20 @@
+# Byte-compiled / optimized / DLL files
+__pycache__/
+*.py[cod]
+*$py.class
+
+# Environments
+.env
+.venv
+env/
+venv/
+ENV/
+env.bak/
+venv.bak/
+
+# Pycharm project settings
+.idea/
+
+# Sensitive
+config.ini
+rootkey.csv

+ 10 - 0
S3/new2/S3-main/requirements.txt

@@ -0,0 +1,10 @@
+boto3==1.16.35
+botocore==1.19.35
+coloredlogs==15.0
+humanfriendly==9.1
+jmespath==0.10.0
+pyreadline==2.1
+python-dateutil==2.8.1
+s3transfer==0.3.3
+six==1.15.0
+urllib3==1.26.2

+ 127 - 0
S3/new2/S3-main/s3_upload.py

@@ -0,0 +1,127 @@
+# ----------------------------------------------------------------------------------
+# Imports
+# ----------------------------------------------------------------------------------
+import boto3
+from boto3.s3.transfer import TransferConfig
+from botocore.exceptions import NoCredentialsError
+import coloredlogs
+import configparser
+import logging
+import os
+import sys
+import threading
+
+# ----------------------------------------------------------------------------------
+# Types
+# ----------------------------------------------------------------------------------
+class S3Api:
+    def __init__(self):
+        # Configuration
+        config = configparser.ConfigParser()
+        path_to_config = os.path.dirname(os.path.realpath(__file__))
+        config.read(os.path.join(path_to_config, 'config.ini'))
+
+        self.uploads = dict()
+        parse_config = True
+        index = 0
+        while parse_config:
+            bucket_key = 'bucket{}'.format(index)
+            file_key = 'files{}'.format(index)
+
+            if bucket_key in config['Uploads'] and file_key in config['Uploads']:
+                self.uploads[config['Uploads'][bucket_key]] = config['Uploads'][file_key]
+            else:
+                parse_config = False
+
+            index += 1
+
+        # Grab keys
+        self._access_key = config['AWS']['access']
+        self._secret_key = config['AWS']['secret']
+
+        # Initialize client
+        self.s3 = boto3.client('s3',
+                               aws_access_key_id=self._access_key,
+                               aws_secret_access_key=self._secret_key)
+
+        # Initialize resource
+        self.s3_resource = boto3.resource('s3',
+                                          aws_access_key_id=self._access_key,
+                                          aws_secret_access_key=self._secret_key)
+
+    # ------------------------------------------------------------------------------
+    def upload_files(self):
+        for bucket in self.uploads:
+            files = [file.strip() for file in self.uploads[bucket].split(',')]
+            for file in files:
+                self._upload_file_to_s3(file, bucket)
+
+    # ------------------------------------------------------------------------------
+    def _upload_file_to_s3(self, local_file, bucket, s3_file=None):
+        if not s3_file:
+            s3_file = os.path.basename(local_file)
+
+        if os.path.getsize(local_file) < 1073741824:  # File size is less than a gig
+            try:
+                self.s3.upload_file(local_file, bucket, s3_file)
+                logger.info("\nUploaded {} to S3".format(local_file))
+            except FileNotFoundError:
+                logger.error("\nThe file {} was not found".format(local_file))
+            except NoCredentialsError:
+                logger.error("\nCredentials not available")
+        else:
+            try:
+                self._multipart_upload(local_file, bucket, s3_file)
+                logger.info("\nUploaded {} to S3".format(local_file))
+            except FileNotFoundError:
+                logger.error("\nThe file {} was not found".format(local_file))
+            except NoCredentialsError:
+                logger.error("\nCredentials not available")
+
+    # ------------------------------------------------------------------------------
+    def _multipart_upload(self, local_file, bucket, s3_file):
+        transfer_config = TransferConfig(multipart_threshold=1024 * 25,
+                                         multipart_chunksize=1024 * 25,
+                                         use_threads=False)
+        self.s3_resource.Object(bucket, s3_file).upload_file(local_file,
+                                                             Config=transfer_config,
+                                                             Callback=ProgressPercentage(local_file))
+
+class ProgressPercentage(object):
+    def __init__(self, filename):
+        self._filename = filename
+        self._size = float(os.path.getsize(filename))
+        self._seen_so_far = 0
+        self._lock = threading.Lock()
+
+    def __call__(self, bytes_amount):
+        # To simplify we'll assume this is hooked up
+        # to a single filename.
+        with self._lock:
+            self._seen_so_far += bytes_amount
+            percentage = (self._seen_so_far / self._size) * 100
+            sys.stdout.write(
+                "\r%s  %s / %s  (%.2f%%)" % (
+                    self._filename, self._seen_so_far, self._size,
+                    percentage))
+            sys.stdout.flush()
+
+# ----------------------------------------------------------------------------------
+# Globals
+# ----------------------------------------------------------------------------------
+# Initialize logging
+coloredlogs.DEFAULT_LEVEL_STYLES['debug'] = {}
+coloredlogs.DEFAULT_LEVEL_STYLES['info'] = {'color': 'green'}
+logger = logging.getLogger(__name__)
+coloredlogs.install(level='INFO')
+
+# ----------------------------------------------------------------------------------
+# Functions
+# ----------------------------------------------------------------------------------
+
+# ----------------------------------------------------------------------------------
+# Main
+# ----------------------------------------------------------------------------------
+if __name__ == '__main__':
+    api = S3Api()
+    api.upload_files()

BIN
S3/new2/S3BulkUploader-main.zip


+ 132 - 0
S3/new2/S3BulkUploader-main/.gitignore

@@ -0,0 +1,132 @@
+# Byte-compiled / optimized / DLL files
+__pycache__/
+*.py[cod]
+*$py.class
+
+# C extensions
+*.so
+
+# Distribution / packaging
+.Python
+build/
+develop-eggs/
+dist/
+downloads/
+eggs/
+.eggs/
+lib/
+lib64/
+parts/
+sdist/
+var/
+wheels/
+pip-wheel-metadata/
+share/python-wheels/
+*.egg-info/
+.installed.cfg
+*.egg
+MANIFEST
+
+# PyInstaller
+#  Usually these files are written by a python script from a template
+#  before PyInstaller builds the exe, so as to inject date/other infos into it.
+*.manifest
+*.spec
+
+# Installer logs
+pip-log.txt
+pip-delete-this-directory.txt
+
+# Unit test / coverage reports
+htmlcov/
+.tox/
+.nox/
+.coverage
+.coverage.*
+.cache
+nosetests.xml
+coverage.xml
+*.cover
+*.py,cover
+.hypothesis/
+.pytest_cache/
+
+# Translations
+*.mo
+*.pot
+
+# Django stuff:
+*.log
+local_settings.py
+db.sqlite3
+db.sqlite3-journal
+
+# Flask stuff:
+instance/
+.webassets-cache
+
+# Scrapy stuff:
+.scrapy
+
+# Sphinx documentation
+docs/_build/
+
+# PyBuilder
+target/
+
+# Jupyter Notebook
+.ipynb_checkpoints
+
+# IPython
+profile_default/
+ipython_config.py
+
+# pyenv
+.python-version
+
+# pipenv
+#   According to pypa/pipenv#598, it is recommended to include Pipfile.lock in version control.
+#   However, in case of collaboration, if having platform-specific dependencies or dependencies
+#   having no cross-platform support, pipenv may install dependencies that don't work, or not
+#   install all needed dependencies.
+#Pipfile.lock
+
+# PEP 582; used by e.g. github.com/David-OConnor/pyflow
+__pypackages__/
+
+# Celery stuff
+celerybeat-schedule
+celerybeat.pid
+
+# SageMath parsed files
+*.sage.py
+
+# Environments
+.env
+.venv
+env/
+venv/
+ENV/
+env.bak/
+venv.bak/
+
+# Spyder project settings
+.spyderproject
+.spyproject
+
+# Rope project settings
+.ropeproject
+
+# mkdocs documentation
+/site
+
+# mypy
+.mypy_cache/
+.dmypy.json
+dmypy.json
+
+# Pyre type checker
+.pyre/
+
+
+.idea/

+ 10 - 0
S3/new2/S3BulkUploader-main/Dockerfile

@@ -0,0 +1,10 @@
+FROM python:3.10
+
+COPY . /usr/src/
+
+WORKDIR /usr/src/
+
+RUN pip install --upgrade pip && \
+    pip install -r requirements.txt
+
+ENTRYPOINT ["python", "s3upload.py"]

+ 2 - 0
S3/new2/S3BulkUploader-main/README.md

@@ -0,0 +1,2 @@
+# S3BulkUploader
+Bulk upload files to Amazon S3

+ 7 - 0
S3/new2/S3BulkUploader-main/docker-compose.yaml

@@ -0,0 +1,7 @@
+services:
+  uploader:
+    image: python:3.10
+    volumes:
+      - ./:/usr/src/
+    tty: true
+    stdin_open: true

+ 8 - 0
S3/new2/S3BulkUploader-main/requirements.txt

@@ -0,0 +1,8 @@
+boto3==1.20.54
+botocore==1.23.54
+jmespath==0.10.0
+python-dateutil==2.8.2
+python-dotenv==0.19.2
+s3transfer==0.5.1
+six==1.16.0
+urllib3==1.26.8

+ 135 - 0
S3/new2/S3BulkUploader-main/s3upload.py

@@ -0,0 +1,135 @@
+import os
+import argparse
+import time
+import boto3
+from dotenv import load_dotenv
+from botocore.exceptions import ClientError
+from queue import Queue
+from threading import Thread, Event
+
+
+class S3UploaderException(Exception):
+    pass
+
+
+def get_client(region):
+    return boto3.client(
+        's3',
+        aws_access_key_id=os.getenv('AWS_KEY'),
+        aws_secret_access_key=os.getenv('AWS_SECRET'),
+        region_name=region
+    )
+
+
+def get_queue(directory: str, base_path):
+    queue = Queue()
+    for dir_path, dir_names, filenames in os.walk(directory):
+        for filename in filenames:
+            object_key = os.path.join(
+                dir_path.replace(directory, base_path, 1) if base_path else dir_path,
+                filename
+            ).replace(os.sep, '/')
+            filepath = os.path.join(dir_path, filename)
+            queue.put((filepath, object_key))
+            print('discovered {} files'.format(queue.qsize()), end='\r')
+    return queue
+
+
+def put_to_s3(run_event: Event, client, queue: Queue, bucket, acl, remove_files):
+    while not queue.empty() and run_event.is_set():
+        filepath, object_key = queue.get()
+        try:
+            client.upload_file(
+                filepath, bucket, object_key,
+                ExtraArgs={'ACL': acl}
+            )
+        except ClientError as e:
+            print('Error occurred while uploading: {}'.format(str(e)))
+            continue
+        if remove_files:
+            os.remove(filepath)
+        print('uploaded: {}\nkey: {}\n{}\n'.format(
+            filepath,
+            object_key,
+            'removed: {}'.format(filepath) if remove_files else ''
+        ))
+
+
+def generate_threads(
+        run_event: Event,
+        directory: str,
+        bucket,
+        region,
+        acl,
+        remove_files,
+        base_path,
+        thread_no
+):
+    client = get_client(region)
+    queue = get_queue(directory, base_path)
+    threads = []
+    for i in range(thread_no):
+        threads.append(Thread(
+            target=put_to_s3,
+            args=(run_event, client, queue, bucket, acl, remove_files)
+        ))
+    return threads
+
+
+def start_threads(threads):
+    for thread in threads:
+        thread.start()
+
+
+def has_live_threads(threads):
+    return True in [t.is_alive() for t in threads]
+
+
+def main():
+    start_time = time.time()
+    parser = argparse.ArgumentParser()
+    run_event = Event()
+    run_event.set()
+    parser.add_argument('directory', help='Directory to upload')
+    parser.add_argument('bucket', help='AWS S3 bucket name')
+    parser.add_argument('region', help='AWS S3 region')
+    parser.add_argument('--env_file', help='Env file with AWS_KEY and AWS_SECRET', default='.env')
+    parser.add_argument('--acl', help='ACL Policy to be applied', default='public-read')
+    parser.add_argument('--base_path', help='Base path name for object key')
+    parser.add_argument('--remove_files', action='store_true', help='Delete files after uploading', default=False)
+    parser.add_argument('--threads', help="No. of threads", default=5, type=int)
+    args = parser.parse_args()
+    try:
+        if not os.path.isdir(args.directory):
+            raise S3UploaderException('Directory \'{}\'does not exists'.format(args.directory))
+        if not os.path.isfile(args.env_file):
+            raise S3UploaderException('Env file {} does not exists'.format(args.env_file))
+        if args.threads < 1:
+            raise S3UploaderException('At least one thread is required')
+        load_dotenv(args.env_file)
+        threads = generate_threads(
+            run_event,
+            args.directory,
+            args.bucket,
+            args.region,
+            args.acl,
+            args.remove_files,
+            args.base_path,
+            args.threads
+        )
+        start_threads(threads)
+        while has_live_threads(threads):
+            try:
+                [t.join(1) for t in threads
+                 if t is not None and t.is_alive()]
+            except KeyboardInterrupt:
+                print('Please wait! gracefully stopping...')
+                run_event.clear()
+    except S3UploaderException as e:
+        print('Error: ', str(e))
+
+    print("--- %s seconds ---" % (time.time() - start_time))
+
+
+if __name__ == "__main__":
+    main()

BIN
S3/new2/Upload-Data-to-Amazon-S3-master.zip


+ 202 - 0
S3/new2/Upload-Data-to-Amazon-S3-master/LICENSE

@@ -0,0 +1,202 @@
+                                 Apache License
+                           Version 2.0, January 2004
+                        http://www.apache.org/licenses/
+
+   TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
+
+   1. Definitions.
+
+      "License" shall mean the terms and conditions for use, reproduction,
+      and distribution as defined by Sections 1 through 9 of this document.
+
+      "Licensor" shall mean the copyright owner or entity authorized by
+      the copyright owner that is granting the License.
+
+      "Legal Entity" shall mean the union of the acting entity and all
+      other entities that control, are controlled by, or are under common
+      control with that entity. For the purposes of this definition,
+      "control" means (i) the power, direct or indirect, to cause the
+      direction or management of such entity, whether by contract or
+      otherwise, or (ii) ownership of fifty percent (50%) or more of the
+      outstanding shares, or (iii) beneficial ownership of such entity.
+
+      "You" (or "Your") shall mean an individual or Legal Entity
+      exercising permissions granted by this License.
+
+      "Source" form shall mean the preferred form for making modifications,
+      including but not limited to software source code, documentation
+      source, and configuration files.
+
+      "Object" form shall mean any form resulting from mechanical
+      transformation or translation of a Source form, including but
+      not limited to compiled object code, generated documentation,
+      and conversions to other media types.
+
+      "Work" shall mean the work of authorship, whether in Source or
+      Object form, made available under the License, as indicated by a
+      copyright notice that is included in or attached to the work
+      (an example is provided in the Appendix below).
+
+      "Derivative Works" shall mean any work, whether in Source or Object
+      form, that is based on (or derived from) the Work and for which the
+      editorial revisions, annotations, elaborations, or other modifications
+      represent, as a whole, an original work of authorship. For the purposes
+      of this License, Derivative Works shall not include works that remain
+      separable from, or merely link (or bind by name) to the interfaces of,
+      the Work and Derivative Works thereof.
+
+      "Contribution" shall mean any work of authorship, including
+      the original version of the Work and any modifications or additions
+      to that Work or Derivative Works thereof, that is intentionally
+      submitted to Licensor for inclusion in the Work by the copyright owner
+      or by an individual or Legal Entity authorized to submit on behalf of
+      the copyright owner. For the purposes of this definition, "submitted"
+      means any form of electronic, verbal, or written communication sent
+      to the Licensor or its representatives, including but not limited to
+      communication on electronic mailing lists, source code control systems,
+      and issue tracking systems that are managed by, or on behalf of, the
+      Licensor for the purpose of discussing and improving the Work, but
+      excluding communication that is conspicuously marked or otherwise
+      designated in writing by the copyright owner as "Not a Contribution."
+
+      "Contributor" shall mean Licensor and any individual or Legal Entity
+      on behalf of whom a Contribution has been received by Licensor and
+      subsequently incorporated within the Work.
+
+   2. Grant of Copyright License. Subject to the terms and conditions of
+      this License, each Contributor hereby grants to You a perpetual,
+      worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+      copyright license to reproduce, prepare Derivative Works of,
+      publicly display, publicly perform, sublicense, and distribute the
+      Work and such Derivative Works in Source or Object form.
+
+   3. Grant of Patent License. Subject to the terms and conditions of
+      this License, each Contributor hereby grants to You a perpetual,
+      worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+      (except as stated in this section) patent license to make, have made,
+      use, offer to sell, sell, import, and otherwise transfer the Work,
+      where such license applies only to those patent claims licensable
+      by such Contributor that are necessarily infringed by their
+      Contribution(s) alone or by combination of their Contribution(s)
+      with the Work to which such Contribution(s) was submitted. If You
+      institute patent litigation against any entity (including a
+      cross-claim or counterclaim in a lawsuit) alleging that the Work
+      or a Contribution incorporated within the Work constitutes direct
+      or contributory patent infringement, then any patent licenses
+      granted to You under this License for that Work shall terminate
+      as of the date such litigation is filed.
+
+   4. Redistribution. You may reproduce and distribute copies of the
+      Work or Derivative Works thereof in any medium, with or without
+      modifications, and in Source or Object form, provided that You
+      meet the following conditions:
+
+      (a) You must give any other recipients of the Work or
+          Derivative Works a copy of this License; and
+
+      (b) You must cause any modified files to carry prominent notices
+          stating that You changed the files; and
+
+      (c) You must retain, in the Source form of any Derivative Works
+          that You distribute, all copyright, patent, trademark, and
+          attribution notices from the Source form of the Work,
+          excluding those notices that do not pertain to any part of
+          the Derivative Works; and
+
+      (d) If the Work includes a "NOTICE" text file as part of its
+          distribution, then any Derivative Works that You distribute must
+          include a readable copy of the attribution notices contained
+          within such NOTICE file, excluding those notices that do not
+          pertain to any part of the Derivative Works, in at least one
+          of the following places: within a NOTICE text file distributed
+          as part of the Derivative Works; within the Source form or
+          documentation, if provided along with the Derivative Works; or,
+          within a display generated by the Derivative Works, if and
+          wherever such third-party notices normally appear. The contents
+          of the NOTICE file are for informational purposes only and
+          do not modify the License. You may add Your own attribution
+          notices within Derivative Works that You distribute, alongside
+          or as an addendum to the NOTICE text from the Work, provided
+          that such additional attribution notices cannot be construed
+          as modifying the License.
+
+      You may add Your own copyright statement to Your modifications and
+      may provide additional or different license terms and conditions
+      for use, reproduction, or distribution of Your modifications, or
+      for any such Derivative Works as a whole, provided Your use,
+      reproduction, and distribution of the Work otherwise complies with
+      the conditions stated in this License.
+
+   5. Submission of Contributions. Unless You explicitly state otherwise,
+      any Contribution intentionally submitted for inclusion in the Work
+      by You to the Licensor shall be under the terms and conditions of
+      this License, without any additional terms or conditions.
+      Notwithstanding the above, nothing herein shall supersede or modify
+      the terms of any separate license agreement you may have executed
+      with Licensor regarding such Contributions.
+
+   6. Trademarks. This License does not grant permission to use the trade
+      names, trademarks, service marks, or product names of the Licensor,
+      except as required for reasonable and customary use in describing the
+      origin of the Work and reproducing the content of the NOTICE file.
+
+   7. Disclaimer of Warranty. Unless required by applicable law or
+      agreed to in writing, Licensor provides the Work (and each
+      Contributor provides its Contributions) on an "AS IS" BASIS,
+      WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+      implied, including, without limitation, any warranties or conditions
+      of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
+      PARTICULAR PURPOSE. You are solely responsible for determining the
+      appropriateness of using or redistributing the Work and assume any
+      risks associated with Your exercise of permissions under this License.
+
+   8. Limitation of Liability. In no event and under no legal theory,
+      whether in tort (including negligence), contract, or otherwise,
+      unless required by applicable law (such as deliberate and grossly
+      negligent acts) or agreed to in writing, shall any Contributor be
+      liable to You for damages, including any direct, indirect, special,
+      incidental, or consequential damages of any character arising as a
+      result of this License or out of the use or inability to use the
+      Work (including but not limited to damages for loss of goodwill,
+      work stoppage, computer failure or malfunction, or any and all
+      other commercial damages or losses), even if such Contributor
+      has been advised of the possibility of such damages.
+
+   9. Accepting Warranty or Additional Liability. While redistributing
+      the Work or Derivative Works thereof, You may choose to offer,
+      and charge a fee for, acceptance of support, warranty, indemnity,
+      or other liability obligations and/or rights consistent with this
+      License. However, in accepting such obligations, You may act only
+      on Your own behalf and on Your sole responsibility, not on behalf
+      of any other Contributor, and only if You agree to indemnify,
+      defend, and hold each Contributor harmless for any liability
+      incurred by, or claims asserted against, such Contributor by reason
+      of your accepting any such warranty or additional liability.
+
+   END OF TERMS AND CONDITIONS
+
+   APPENDIX: How to apply the Apache License to your work.
+
+      To apply the Apache License to your work, attach the following
+      boilerplate notice, with the fields enclosed by brackets "{}"
+      replaced with your own identifying information. (Don't include
+      the brackets!)  The text should be enclosed in the appropriate
+      comment syntax for the file format. We also recommend that a
+      file or class name and description of purpose be included on the
+      same "printed page" as the copyright notice for easier
+      identification within third-party archives.
+
+   Copyright {yyyy} {name of copyright owner}
+
+   Licensed under the Apache License, Version 2.0 (the "License");
+   you may not use this file except in compliance with the License.
+   You may obtain a copy of the License at
+
+       http://www.apache.org/licenses/LICENSE-2.0
+
+   Unless required by applicable law or agreed to in writing, software
+   distributed under the License is distributed on an "AS IS" BASIS,
+   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+   See the License for the specific language governing permissions and
+   limitations under the License.
+

+ 37 - 0
S3/new2/Upload-Data-to-Amazon-S3-master/README.md

@@ -0,0 +1,37 @@
+# Upload-Data-to-Amazon-S3
+Upload Data form linux server to Amazon S3 or S3 Compatible Services
+
+Usage
+-------
+```python
+connection = S3Connection(
+     host = 's3.amazonaws.com', # S3 Compatible Services
+     is_secure = True,
+     aws_access_key_id = 'access_key_id',  # Add your access key
+     aws_secret_access_key = 'secret_access_key' # Add your secret key
+)
+bucket = connection.get_bucket('bucket_name', validate = True)
+COMMON_PATH = '/common_folder/' # COMMON PATH OF YOUR S3 AND YOUR SERVER
+```
+
+License
+-------
+
+    Licensed under the Apache License, Version 2.0 (the "License");
+    you may not use this file except in compliance with the License.
+    You may obtain a copy of the License at
+
+       http://www.apache.org/licenses/LICENSE-2.0
+
+    Unless required by applicable law or agreed to in writing, software
+    distributed under the License is distributed on an "AS IS" BASIS,
+    WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+    See the License for the specific language governing permissions and
+    limitations under the License.
+
+
+ [1]: http://square.github.io/okhttp
+ [2]: https://github.com/square/okhttp/wiki
+ [3]: https://search.maven.org/remote_content?g=com.squareup.okhttp&a=okhttp&v=LATEST
+ [4]: https://search.maven.org/remote_content?g=com.squareup.okhttp&a=mockwebserver&v=LATEST
+ [snap]: https://oss.sonatype.org/content/repositories/snapshots/

+ 36 - 0
S3/new2/Upload-Data-to-Amazon-S3-master/migrate.py

@@ -0,0 +1,36 @@
+# -*- coding: utf-8 -*-
+import os
+from boto.s3.connection import S3Connection
+from boto.s3.key import Key
+
+connection = S3Connection(
+     host = 's3.amazonaws.com', # S3 Compatible Services
+     is_secure = True,
+     aws_access_key_id = 'access_key_id',  # Add your access key
+     aws_secret_access_key = 'secret_access_key' # Add your secret key
+)
+
+bucket = connection.get_bucket('bucket_name', validate = True)
+COMMON_PATH = '/common_folder/' # COMMON PATH OF YOUR S3 AND YOUR SERVER
+BASE = os.path.dirname(os.path.abspath(__file__))
+
+
+def upload(path, filename):
+    path_file = '%s/%s'%(BASE, filename)
+    if COMMON_PATH in path_file:
+        path_upload = ROOT_PATH + path_file.rsplit(COMMON_PATH, 1)[1]
+        print ('  Upload to : %s' % path_upload)
+        key = Key(bucket, path_upload)
+        key.set_contents_from_filename(path_file)
+    else:
+        print ('  Upload path not found.')
+
+
+if __name__ == '__main__':
+    count = 1
+    for path, subdirs, files in os.walk('.'):
+        for name in files:
+            if name not in os.path.basename(__file__):
+                print ('> Execute File (%s/%s) : %s '% (count, len(files)-1, os.path.join(path, name)[1:]))
+                upload(path, name)
+                count += 1

BIN
S3/new2/Vaporfile-master.zip


+ 4 - 0
S3/new2/Vaporfile-master/.gitignore

@@ -0,0 +1,4 @@
+*.pyc
+*~
+/build/*
+/Vaporfile.egg-info/*

+ 117 - 0
S3/new2/Vaporfile-master/README.markdown

@@ -0,0 +1,117 @@
+Vaporfile
+==========
+
+Vaporfile is a tool to upload and synchronize static websites to the
+[Amazon S3](http://aws.amazon.com/s3/) cloud.
+
+*WARNING*: This is alpha quality software, version 0.0.1. It works for
+me, but it may destroy your life. Be careful, test this out with
+non-important data first. I doubt I've done anything royally stupid
+here, but I guess it's technically possible that some unforseen bug could delete
+all of the buckets configured in your S3 account. YOU'VE BEEN WARNED.
+
+Requirements
+------------
+* [Python 2.6](http://www.python.org/download/) or above.
+* An [Amazon S3 account](http://aws.amazon.com/s3/).
+* The DEV version of [boto](https://github.com/boto/boto) (until 2.0b5
+or greater is released).
+
+Install
+-------
+
+As of Feb 19 2011, the new [S3 website
+extensions](http://aws.typepad.com/aws/2011/02/host-your-static-website-on-amazon-s3.html)
+are one day old, needless to say, it's a bit bleeding edge. Vaporfile
+depends on the best Python bindings for Amazon S3:
+[boto](https://github.com/boto/boto). The boto devs are really on top
+of their game and have already implemented the website features, but
+they haven't made it into a realeased version yet. For now, you'll
+need to install the dev version:
+
+     git clone https://github.com/boto/boto.git
+     sudo python boto/setup.py develop
+
+Now you can install the DEV version of Vaporfile:
+
+    git clone https://github.com/EnigmaCurry/Vaporfile.git
+    sudo python vaporfile/setup.py develop
+
+Or, you can install the packaged version on PyPI (Not there yet):
+
+    sudo easy_install vaporfile
+    
+Uploading a website
+-------------------
+
+Once Vaporfile is installed, you can run it to create a new website
+configuration:
+
+        vaporfile create
+
+This will run a configuration wizard that will get you started. It
+just asks you a few questions to get setup:
+
+* Asks you for your [Amazon AWS
+  credentials](https://aws-portal.amazon.com/gp/aws/developer/account/index.html?action=access-key).
+* Configures your domain name / Amazon S3 bucket name.
+* Configures the path of the website on your local drive.
+* Configures the index page of your site (eg. index.html)
+* Configures the 404 error page of your site.
+* Creates the actual bucket on S3 and enables the website endpoint.
+
+It saves all this configuration information in `~/.vaporfile`,
+which includes your AWS credentials in plain text. The file is marked as readable
+only by your user account, so this should be reasonably safe on
+machines you control/trust.
+
+Once you've created the site, you can upload it:
+
+     vaporfile -v upload [name-of-website]
+
+With the -v flag on, you'll see all the files it's uploading,
+otherwise it should silently complete.
+
+Now make any changes you wish to your site locally, and run the upload
+again. Files that have changed will get re-uploaded, files that have
+been deleted locally will get deleted from S3 (unless you specify --no-delete).
+
+Deployment
+----------
+
+Vaporfile will upload your site to Amazon, but you still need to
+configure your domain to point to it.
+
+The name you chose for your website when running `vaporfile create`
+is the bucket name created on S3. S3 creates a domain like this:
+
+    www.yourdomain.com.s3-website-us-east-1.amazonaws.com
+
+Assuming you don't like that domain name, you'll probably want to
+point your own domain name to that location. You do that with a CNAME
+configured with your DNS provider -- create a CNAME record for
+`www.yourdomain.com` and point it to `s3-website-us-east-1.amazonaws.com`.
+
+Usage
+-----
+
+You can see the rest of the usage by running `vaporfile -h`, but here
+it is:
+
+    usage: vaporfile [-h] [--version] [-c PATH] [-v] [-vv]
+                     {credentials,create,list,upload} ...
+    
+    positional arguments:
+      {credentials,create,list,upload}
+        credentials         Manage Amazon AWS credentials
+        create              Create a new S3 website
+        upload              Upload a previously configured website
+        list                List all configured websites
+    
+    optional arguments:
+      -h, --help            show this help message and exit
+      --version
+      -c PATH, --config PATH
+                            Use alternative config file (defaults to ~/.vaporfie)
+      -v, --verbose         Be verbose
+      -vv, --veryverbose    Be extra verbose

+ 20 - 0
S3/new2/Vaporfile-master/setup.py

@@ -0,0 +1,20 @@
+from setuptools import setup, find_packages
+import os
+import glob
+
+import vaporfile
+            
+setup(name='Vaporfile',
+      version=vaporfile.__version__,
+      description='A tool to upload static websites to the Amazon S3 cloud',
+      author='Ryan McGuire',
+      author_email='ryan@enigmacurry.com',
+      url='http://www.enigmacurry.com',
+      license='MIT',
+      packages=["vaporfile"],
+      install_requires =["boto>=2.0b4"],
+      entry_points="""
+      [console_scripts]
+      vaporfile = vaporfile.main:main
+      """
+      )

+ 1 - 0
S3/new2/Vaporfile-master/vaporfile/__init__.py

@@ -0,0 +1 @@
+__version__ = "0.0.1"

+ 40 - 0
S3/new2/Vaporfile-master/vaporfile/config.py

@@ -0,0 +1,40 @@
+import os
+import util
+import json
+
+from website import S3Website
+
+__config_file__ = os.path.join(os.path.expanduser("~"), ".vaporfile")
+
+
+def load_config(path=None):
+    if not path:
+        path = __config_file__
+    with open(path) as f:
+        config = json.loads(f.read())
+        try:
+            config["websites"]
+        except KeyError:
+            config["websites"] = {}
+    return config
+
+
+def get_config(path=None):
+    """Get the config, load it if possible, create it if necessary"""
+    try:
+        c = load_config(path)
+    except IOError:
+        # No existing config, create a new one.
+        c = {}
+    return c
+
+
+def save_config(
+        config, path=None):
+    if not path:
+        path = __config_file__
+    # JSON is a better pickle:
+    with open(path, "w") as f:
+        f.write(json.dumps(config, sort_keys=True, indent=4))
+    # Make the file read/write only to the user.
+    os.chmod(path, 0o0600)

+ 56 - 0
S3/new2/Vaporfile-master/vaporfile/credentials.py

@@ -0,0 +1,56 @@
+import sys
+import os.path
+import json
+import base64
+import getpass
+
+import config
+from prompt_util import clear_screen, get_input
+
+class VaporfileCredentialException(Exception):
+    pass
+
+def prompt_save_credentials(args):
+    clear_screen()
+    print(" WARNING WARNING WARNING ".center(80,"+"))
+    print("This will save your Amazon AWS credentials in your home directory.")
+    print("The file ~/.vaporfile will be readable only by your user account.")
+    print("It is still your responsibility to secure your computer from unwanted")
+    print("access. This should be perfectly safe contingent upon proper system security.")
+    print("".center(80,"+"))
+    print("")
+    print("For reference, your amazon credentials can be found at:")
+    print("https://aws-portal.amazon.com/gp/aws/developer/account/index.html?action=access-key")
+    print("")
+    cred = {"access_key":get_input("What is your Access key? : ",accept_blank=False),
+            "secret_key":get_input("What is your Secret key? : ",accept_blank=False)}
+    print("")
+    store_credentials(cred)
+    clear_screen()
+    print("Credentials saved in {0}".format(config.__config_file__))
+    print("File access restricted to your user account ({0}).".format(getpass.getuser()))
+    print("")
+
+def check_credentials(user_config=None):
+    if not user_config:
+        user_config = config.get_config()
+    try:
+        user_config["credentials"]["access_key"]
+        user_config["credentials"]["secret_key"]
+    except KeyError:
+        raise VaporfileCredentialException(
+            "Missing Amazon credentials in config")
+    return True
+    
+def store_credentials(credentials):
+    """Store Amazon AWS credentials in user's home directory"""
+    c = config.get_config()
+    c["credentials"] = credentials
+    config.save_config(c)
+
+def remove_credentials(credentials):
+    """Store Amazon AWS credentials in user's home directory"""
+    c = config.get_config()
+    del c["credentials"]
+    config.save_config(c)
+    

+ 88 - 0
S3/new2/Vaporfile-master/vaporfile/main.py

@@ -0,0 +1,88 @@
+import sys
+import argparse
+import shlex
+import logging
+
+from . import __version__
+import config
+import credentials
+import website
+
+logging.basicConfig(level=logging.WARNING)
+logger = logging.getLogger("vaporfile")
+
+def main():
+    parser_template = argparse.ArgumentParser(add_help=False)
+    parser_template.add_argument(
+        "--version", action="version",
+        version="%(prog)s {0} -- http://github.com/EnigmaCurry/vaporfile"\
+            .format(__version__))
+    parser_template.add_argument(
+        "-c", "--config", metavar="PATH", dest="config",
+        default=None, help="Use alternative config file (defaults"
+        " to ~/.vaporfile)")
+    parser_template.add_argument("-v", "--verbose", dest="verbose",
+                                 default=False, action="store_true",
+                                 help="Be verbose")
+    parser_template.add_argument("-vv", "--veryverbose", dest="veryverbose",
+                                 default=False, action="store_true",
+                                 help="Be extra verbose")
+    parser = argparse.ArgumentParser(parents=[parser_template])
+    
+    subparsers = parser.add_subparsers()
+
+    ### Credentials
+    p_cred = subparsers.add_parser(
+        "credentials", help="Manage Amazon AWS credentials",
+        parents=[parser_template])
+    p_cred_subparsers = p_cred.add_subparsers()
+    p_cred_store = p_cred_subparsers.add_parser(
+        "store", help="Store credentials locally")
+    p_cred_store.set_defaults(func=credentials.prompt_save_credentials)
+    p_cred_remove = p_cred_subparsers.add_parser(
+        "remove", help="Remove the credentials stored locally")
+    p_cred_remove.set_defaults(func=credentials.remove_credentials)
+    
+    
+    ### Create site
+    p_create = subparsers.add_parser(
+        "create", help="Create a new S3 website",
+        parents=[parser_template])
+    p_create.set_defaults(func=website.prompt_create_website)
+
+    ### Upload
+    p_upload = subparsers.add_parser(
+        "upload", help="Upload a previously configured website",
+        parents=[parser_template])
+    p_upload.add_argument("WEBSITE", help="Name of configured website")
+    p_upload.add_argument("--no-delete", action="store_true", help="Don't delete old files from S3")
+    p_upload.set_defaults(func=website.upload_website)
+
+    ### Remove
+    p_remove = subparsers.add_parser(
+        "remove", help="Remove a website configuration locally",
+        parents=[parser_template])
+    p_remove.add_argument("WEBSITE", help="Name of configured website")
+    p_remove.set_defaults(func=website.remove_website)
+
+    ### List
+    p_list = subparsers.add_parser(
+        "list", help="List all configured websites",
+        parents=[parser_template])
+    p_list.set_defaults(func=website.list_websites)
+    
+
+    if len(sys.argv) <= 1:
+        parser.print_help()
+        parser.exit(1)
+    else:
+        args = parser.parse_args()
+    if args.config:
+        config.__config_file__ = args.config
+    if args.verbose:
+        logger.setLevel(logging.INFO)
+        logger.info("Setting verbose output mode")
+    if args.veryverbose:
+        logger.setLevel(logging.DEBUG)
+        logger.debug("Setting very verbose output mode")
+    args.func(args)

+ 39 - 0
S3/new2/Vaporfile-master/vaporfile/prompt_util.py

@@ -0,0 +1,39 @@
+import sys
+import os
+
+def clear_screen():
+    os.system('cls' if os.name=='nt' else 'clear')
+
+def get_input(prompt, accept_blank=True):
+    try:
+        while True:
+            response = raw_input(prompt)
+            if response == "" and accept_blank==False:
+                print("A response is required here.\n")
+            else:
+                return response
+    except KeyboardInterrupt:
+        print("\n\nExiting without saving changes.")
+        sys.exit(1)
+        
+def get_yes_no(prompt, yn_ok=True, default=None):
+    """Ask the user a Yes or No question.
+
+    yn_ok set to True will allow 'y' or 'n' response too.
+    A default may be specified when the user just presses enter."""
+    if not prompt.endswith(" "):
+        prompt += " "
+    while True:
+        response = get_input(prompt).lower()
+        if response == "yes":
+            return True
+        elif response == "y" and yn_ok:
+            return True
+        elif response == "no":
+            return False
+        elif response == "n" and yn_ok:
+            return False
+        elif response == "" and default != None:
+            return default
+        else:
+            print("A Yes or No response is required.\n")

+ 42 - 0
S3/new2/Vaporfile-master/vaporfile/s3_util.py

@@ -0,0 +1,42 @@
+from boto.s3.connection import OrdinaryCallingFormat, S3Connection
+from boto.exception import S3ResponseError
+
+import config
+import credentials
+
+__conn = None
+
+def get_connection():
+    c = config.load_config()
+    global __conn
+    if not __conn:
+        __conn = S3Connection(c["credentials"]["access_key"],
+                              c["credentials"]["secret_key"],
+                              calling_format=OrdinaryCallingFormat())
+    return __conn
+
+def get_bucket_names(conn):
+    l=[]
+    for bucket in conn.get_all_buckets():
+        l.append(bucket.name)
+    return l
+
+def get_paths_from_keys(bucket):
+    paths = {} #path -> key
+    for key in bucket.get_all_keys():
+        paths[key.name] = key
+    return paths
+
+exc = None
+
+def test_bucket_exists(conn, bucket_name):
+    try:
+        bucket = conn.get_bucket(bucket_name)
+        return True
+    except S3ResponseError as e:
+        if e.status == 404:
+            return False
+        elif e.status == 403:
+            return True
+        else:
+            raise e

+ 44 - 0
S3/new2/Vaporfile-master/vaporfile/util.py

@@ -0,0 +1,44 @@
+import os
+import hashlib
+import json
+import copy
+
+def md5_for_file(path, block_size=2**20):
+    md5 = hashlib.md5()
+    with open(path) as f:
+        while True:
+            data = f.read(block_size)
+            if not data:
+                break
+            md5.update(data)
+    return md5.hexdigest()
+
+def mkdir(newdir):
+    """works the way a good mkdir should :)
+    - already exists, silently complete
+    - regular file in the way, raise an exception
+    - parent directory(ies) does not exist, make them as well
+    """
+    if os.path.isdir(newdir):
+        pass
+    elif os.path.isfile(newdir):
+        raise OSError("a file with the same name as the desired " \
+                          "dir, '{0}', already exists.".format(newdir))
+    else:
+        head, tail = os.path.split(newdir)
+        if head and not os.path.isdir(head):
+            mkdir(head)
+        #print "mkdir {0}.format(repr(newdir))
+        if tail:
+            os.mkdir(newdir)
+
+class JSONEncodable(object):
+    """An inheritance mixin to make a class encodable to JSON"""
+    def to_dict(self):
+        d = copy.copy(self.__dict__)
+        for key,value in d.items():
+            if hasattr(value,"to_dict"):
+                d[key] = value.to_dict()
+        return d
+    def to_json(self):
+        return json.dumps(self.to_dict(),sort_keys=True,indent=4)

+ 300 - 0
S3/new2/Vaporfile-master/vaporfile/website.py

@@ -0,0 +1,300 @@
+import sys
+import os, os.path
+import logging
+import readline
+import getpass
+
+from boto.s3.key import Key
+
+import credentials
+import config
+import s3_util
+import util
+from prompt_util import get_yes_no, get_input, clear_screen
+
+logger = logging.getLogger("vaporfile.website")
+
+def prompt_create_website(args):
+    #This is the information we'll gather:
+    user_config = None
+    bucket_name = None
+    using_own_domain = None
+    sync_path = None
+    directory_index = None
+    error_index = None
+    use_existing_bucket = False
+    
+    clear_screen()
+    print(" Amazon S3 Website Creation Wizard ".center(80,"+"))
+    print("This will guide you through the process of creating a new website on Amazon S3.")
+    print("No changes to your S3 account will occur until you accept changes at the end.")
+    print("You may press Ctrl-C at any point to quit without saving.")
+    print("".center(80,"+"))
+    print("")
+    user_config = config.get_config()
+    try:
+        user_config["credentials"]
+    except KeyError:
+        print("No existing account information found.")
+        if get_yes_no("Would you like to setup your Amazon AWS account? [Y/n] : ",default=True):
+            credentials.prompt_save_credentials({})
+        else:
+            print("")
+            print("Cannot proceed without Amazon account information.")
+            sys.exit(1)
+        user_config = config.load_config()
+    conn = s3_util.get_connection()
+    using_own_domain = get_yes_no("Will you be using your own domain name? [y/n] : ")
+    if using_own_domain:
+        print("")
+        print("Amazon S3 websites hosted using your own domain require a CNAME configured")
+        print("with your DNS service. Unfortunately, this means that you cannot use your root")
+        print("domain with S3, you must use a subdomain.")
+        print("")
+        while True:
+            bucket_name = get_input("What is the the fully qualified subdomain you would like to use?\n[eg. www.yourdomain.com] : ",accept_blank=False)
+            print("Checking to see if {0} is available...".format(bucket_name))
+            if not s3_util.test_bucket_exists(conn, bucket_name):
+                break
+            print("")
+            #The bucket already exists.. by the user?
+            if bucket_name in s3_util.get_bucket_names(conn):
+                print("It looks like you've already configured an S3 bucket "
+                      "with that name.\n")
+                print("WARNING: If you proceed, existing files in this "
+                      "bucket may be lost!")
+                if get_yes_no("Are you sure you want to use {0}? [y/n]"\
+                                  .format(bucket_name)):
+                    use_existing_bucket = True
+                    break
+                print("")
+            else:
+                print("Sorry, it looks like someone else owns that bucket name. Contact Amazon support")
+                print("for help if you own the domain you chose. This is an unfortunate side-effect of")
+                print("Amazon choosing a globally flat namespace for buckets.")
+                print("")        
+    else:
+        print("")
+        print("Without using your own domain, you will need to create a unique bucket name.")
+        print("You will only be able to access your website through the bucket address")
+        print("Amazon provides, which is a bit long and cumbersome.")
+        print("Example: yourwebsite.s3-website-us-east-1.amazonaws.com")
+        print("")
+        while True:
+            bucket_name = get_input("What is the bucket name you would like to use?\n[eg. yourwebsite] : ")
+            print("Checking to see if {0} is available...".format(bucket_name))
+            if not s3_util.test_bucket_exists(conn, bucket_name):
+                break
+            print("")
+            print("Sorry, that bucketname is already used. Try another.")
+    clear_screen()
+    print(" Local Path Selection ".center(80,"+"))
+    print("This tool works by synchronizing a local directory to your S3 bucket.")
+    print("Each time you synchronize, this tool will upload new or changed files")
+    print("as well as delete any files no longer found in the local directory.")
+    print("".center(80,"+"))
+    print("")
+    while True:
+        sync_path = get_input("What is the full local directory path you want "
+                              "synchronized?\n[eg. /home/{0}/website ] : ".\
+                                  format(getpass.getuser()), accept_blank=False)
+        if os.path.isdir(sync_path):
+            break
+        elif os.path.isfile(sync_path):
+            print("Sorry, that's not a directory. Please try again.")
+        else:
+            if get_yes_no("This directory does not exist. Would you like to create it? (y/n)"):
+                try:
+                    util.mkdir(sync_path)
+                except OSError:
+                    print("Permission denied. Try again.")
+                else:
+                    break
+        print("")
+    clear_screen()
+    print(" Configuration Options ".center(80,"+"))
+    print("")
+    if get_yes_no("Would you like to use index.html as your default index file? (Y/n)", default=True):
+        directory_index = "index.html"
+    else:
+        while True:
+            print("What file would you like to serve when directories are requested?")
+            directory_index = get_input("[eg. index.html] : ")
+            if directory_index != "":
+                break
+            print("You must enter a directory index. Most users should choose index.html")
+    print("")
+    if get_yes_no("Would you like a special 404 handler when files aren't found? (y/N) : ", default=False):
+        while True:
+            print("What file would you like to serve when files aren't found?")
+            error_index = get_input("[eg. 404.html] : ")
+            if error_index != "":
+                break            
+        print("")
+    clear_screen()
+    print(" Confirmation ".center(80,"+"))
+    print("OK, we've gathered all the necessary information about your new website.")
+    print("Let's review:")
+    print("")
+    if using_own_domain:
+        print("    Your domain name:".ljust(35)+bucket_name)
+    print("    Amazon S3 bucket name:".ljust(35)+bucket_name)
+    print("    Local path to synchronize:".ljust(35)+sync_path)
+    print("    Index file:".ljust(35)+directory_index)
+    if error_index:
+        print("    Error index file:".ljust(35)+error_index)
+    print("")
+    if get_yes_no("Would you like to save this configuration now? [y/n] : "):
+        website = S3Website(
+            bucket_name, sync_path, index=directory_index,
+            error_doc=error_index)
+        user_config["websites"][bucket_name] = website.to_config()
+        website.create(use_existing_bucket=use_existing_bucket)
+        if not use_existing_bucket:
+            print("Amazon S3 bucket created!")
+        config.save_config(user_config)
+        print("Website configuration saved!")
+        website_endpoint = website.get_bucket().get_website_endpoint()
+        s3_endpoint = website_endpoint.replace(bucket_name+".","")
+        print("Your Amazon website endpoint: {0}".format(website_endpoint))
+        if using_own_domain:
+            print("Your DNS service needs a CNAME record pointing {0} "
+                  "to {1}".format(bucket_name, s3_endpoint))
+            print("")
+        print("To upload your website run this command:")
+        print("")
+        print("  vaporfile -v upload {0}".format(bucket_name))
+        print("")
+
+def upload_website(args):
+    user_config = config.get_config()
+    try:
+        website = S3Website.from_config(user_config["websites"][args.WEBSITE])
+    except KeyError:
+        print("")
+        print("Can't find a website configuration called {0}".format(args.WEBSITE))
+        print("Maybe you need to create it first? Run: vaporfile create")
+        return
+    try:
+        credentials.check_credentials(user_config)
+    except credentials.VaporfileCredentialException:
+        print("")
+        print("Can't find credentials. You need to run: vaporfile "
+              "credentials store")
+        print("")
+        return        
+    website.synchronize(delete=not args.no_delete)
+
+def remove_website(args):
+    user_config = config.get_config()
+    try:
+        site = user_config["websites"][args.WEBSITE]
+    except KeyError:
+        print("")
+        print("Unknown website configuration : {0}".format(args.WEBSITE))
+        print("")
+    else:
+        del user_config["websites"][args.WEBSITE]
+        config.save_config(user_config)
+        print("")
+        print("Local website configuration removed : {0}".format(args.WEBSITE))
+        print("")
+        
+    
+def list_websites(args):
+    try:
+        user_config = config.load_config()
+    except IOError:
+        print("")
+        print("Can't find a configuration. You need to run: vaporfile create")
+        print("")
+        return
+    if len(user_config["websites"]) == 0:
+        print("")
+        print("No websites have been created yet. You need to run: vaporfile create")
+        print("")
+        return
+    for name, website in user_config["websites"].items():
+        print(("   "+name).ljust(35)+"- "+website["localpath"])
+        
+                
+class S3Website(object):
+    """Tool for maintaining a static website in S3"""
+    def __init__(self, bucketname, localpath, index="index.html",
+                 error_doc="404.html",**kwargs):
+        self.bucketname = bucketname
+        self.localpath = localpath
+        self.index = index
+        self.error_doc = error_doc
+    def to_config(self):
+        return {"bucketname":self.bucketname,
+                "localpath":self.localpath,
+                "index":self.index,
+                "error_doc":self.error_doc}
+    @classmethod
+    def from_config(cls, config_dict):
+        return cls(**config_dict)
+    def get_bucket(self):
+        return self.get_connection().get_bucket(self.bucketname)
+    def get_connection(self):
+        return s3_util.get_connection()
+    def create(self, use_existing_bucket=False):
+        """Create the bucket for the subdomain."""
+        #Check if the bucket name already exists in our account,
+        #boto doesn't tell us this.
+        connection = self.get_connection()
+        if use_existing_bucket:
+            bucket = connection.get_bucket(self.bucketname)
+        else:
+            if self.bucketname in s3_util.get_bucket_names(connection):
+                raise Exception("Bucket '{0}' already exists in your account."\
+                                    .format(self.bucketname))
+            bucket = connection.create_bucket(self.bucketname)
+            logger.info("Created new bucket : {0}".format(self.bucketname))
+        #A website should be publically readable:
+        bucket.set_acl("public-read")
+        #Turn on website functionality:
+        if self.error_doc:
+            bucket.configure_website(self.index, self.error_doc)
+        else:
+            bucket.configure_website(self.index)
+    def synchronize(self, delete=False):
+        """Synchronize the localpath to S3.
+
+        Upload new or changed files.
+        Delete files that no longer exist locally."""
+        bucket = self.get_bucket()
+        s3_paths = s3_util.get_paths_from_keys(bucket)
+        local_files = set()
+        for dirpath, dirnames, filenames in os.walk(self.localpath):
+            for filename in filenames:
+                file_path = os.path.join(dirpath,filename)
+                file_key = os.path.relpath(file_path,self.localpath)
+                if os.sep == "\\":
+                    #Windows paths need conversion
+                    file_key = file_key.replace("\\","/")
+                local_files.add(file_key)
+                try:
+                    s3_key = s3_paths[file_key]
+                except KeyError:
+                    #File is new
+                    s3_key = bucket.new_key(file_key)
+                    logger.info("Uploading new file: {0}".format(file_key))
+                    s3_key.set_contents_from_filename(file_path)
+                    s3_key.set_acl("public-read")
+                else:
+                    #File already exists, check if it's changed.
+                    local_md5 = util.md5_for_file(file_path)
+                    if local_md5 != s3_key.etag.replace("\"",""):
+                        #File has changed
+                        logger.info("Uploading changed file: {0}".format(file_key))
+                        s3_key.set_contents_from_filename(file_path)
+                        s3_key.set_acl("public-read")
+        if delete:
+            #Delete all files that don't exist locally
+            for name, key in s3_paths.items():
+                if name not in local_files:
+                    #Delete it.
+                    logger.info("Deleting old file: {0}".format(name))
+                    key.delete()
+                

BIN
S3/new2/amazon_s3_backup-master.zip


+ 38 - 0
S3/new2/amazon_s3_backup-master/Amazon S3 Backup.py

@@ -0,0 +1,38 @@
+import tarfile
+import boto3
+import lzma
+from botocore.exceptions import NoCredentialsError
+from zipfile import ZipFile
+
+aws_access_key = "AWS Key"
+secret_access_key = "AWS Secret Access Key"
+
+zip_file_name = "file_name.tar.xz"
+dirName = "Directory of items to compress"
+
+
+def upload_to_aws(local_file, bucket, s3_file):
+    s3 = boto3.client('s3', aws_access_key_id=aws_access_key,
+                      aws_secret_access_key=secret_access_key)
+
+    try:
+        s3.upload_file(local_file, bucket, s3_file)
+        print("Upload Successful")
+        return True
+    except FileNotFoundError:
+        print("The file was not found")
+        return False
+    except NoCredentialsError:
+        print("Credentials not available")
+        return True
+
+
+if __name__ == '__main__':
+    xz_file = lzma.LZMAFile(zip_file_name, mode='w')
+
+    with tarfile.open(mode='w', fileobj=xz_file) as tar_xz_file:
+        tar_xz_file.add(dirName)
+
+    xz_file.close()
+
+    uploaded = upload_to_aws("local_file.tar.xz", "bucket", "s3_file")

BIN
S3/new2/appengine-s3-upload-master.zip


+ 8 - 0
S3/new2/appengine-s3-upload-master/app.yaml

@@ -0,0 +1,8 @@
+application: my-application
+version: upload
+runtime: python
+api_version: 1
+
+handlers:
+- url: /.*
+  script: upload.py

+ 22 - 0
S3/new2/appengine-s3-upload-master/upload.html

@@ -0,0 +1,22 @@
+<html> 
+  <head>
+    <title>S3 Upload</title> 
+    <meta http-equiv="Content-Type" content="text/html; charset=UTF-8" />
+  </head>
+
+  <body> 
+    <form action="https://bucket-name.s3.amazonaws.com/" method="post" enctype="multipart/form-data">
+      <input type="hidden" name="key" value="uploads/{{filename}}">
+      <input type="hidden" name="AWSAccessKeyId" value="YOUR_AWS_KEY"> 
+      <input type="hidden" name="acl" value="public-read"> 
+      <input type="hidden" name="success_action_redirect" value="{{return_url}}">
+      <input type="hidden" name="policy" value="{{policy}}">
+      <input type="hidden" name="signature" value="{{signature}}">
+      <input type="hidden" name="Content-Type" value="{$Content-Type}">
+      <h1>File to upload to S3:</h1>
+      <input name="file" type="file"> 
+      <br> 
+      <input type="submit" value="Upload File to S3"> 
+    </form> 
+  </body>
+</html>

+ 69 - 0
S3/new2/appengine-s3-upload-master/upload.py

@@ -0,0 +1,69 @@
+import os
+import base64
+import hmac, sha
+from google.appengine.ext import webapp
+from google.appengine.ext.webapp import template
+from google.appengine.ext.webapp.util import run_wsgi_app
+
+""" A simple file to generate a policy and signature to upload to S3 from appengine """
+
+
+#DEFINE AWS CREDENTIALS
+AWS_KEY = 'YOUR_AWS_KEY'
+AWS_SECRET_KEY = 'YOUR_AWS_SECRET_KEY'
+
+class MainPage(webapp.RequestHandler):
+    
+    def get(self):
+        """ Take the request and create encoded values to allow file upload """
+        
+        file_name = 'file_name'
+        
+        return_url = 'http://localhost:8080'
+        
+        if not return_url:
+            return self.response.out.write('Error: no return defined')
+            
+        #POLICY DOCUMENT - MUST BE STRING
+        policy_document = '''{
+          "expiration": "2015-06-15T12:00:00.000Z",
+          "conditions": [
+            {"bucket": "bucket-name" },
+            ["starts-with", "$key", ""],
+            ["starts-with", "$Content-Type", ""],
+            {"acl": "public-read"},
+            {"success_action_redirect": "%s"}
+          ]
+        }
+        ''' % return_url
+        
+        #policy must be a base64 encoded version of the policy document
+        policy = base64.b64encode(policy_document)
+        
+        #the signature is the policy + the AWS secret key
+        signature = base64.b64encode(
+            hmac.new(AWS_SECRET_KEY, policy, sha).digest())
+                
+        #template values to be passed through to upload.html
+        template_values = {
+                    'policy': policy,
+                    'signature': signature,
+                    'filename': file_name,
+                    'return_url': return_url
+                }
+        
+        #define the template
+        path = os.path.join(os.path.dirname(__file__), 'upload.html')
+        #write out
+        self.response.out.write(template.render(path, template_values))
+
+#only one URL route necessary
+application = webapp.WSGIApplication(
+                                     [('/', MainPage)],
+                                     debug=True)
+
+def main():
+    run_wsgi_app(application)
+
+if __name__ == "__main__":
+    main()

BIN
S3/new2/aws_s3-master.zip


+ 4 - 0
S3/new2/aws_s3-master/README.md

@@ -0,0 +1,4 @@
+# aws_s3
+Acces AWS S3 to upload and download files (example)
+
+Check comments in the file to understand the process. Boto python package used

+ 65 - 0
S3/new2/aws_s3-master/s3_access.py

@@ -0,0 +1,65 @@
+# owner: Rohit Kumar
+
+#Install Boto package. sudo apt-get install python-boto
+import boto
+import boto.s3
+import sys
+from boto.s3.key import Key
+
+AWS_ACCESS_KEY_ID = ''
+AWS_SECRET_ACCESS_KEY = ''
+
+bucket_name ='ember-s3'   #Name of the bucket
+testfile = "Memory_Discussion.pdf"
+key_file = 'prediction.json' # name of the file to be accessed on the S3
+download_path = '/home/user/Downloads/ember/prediction.json' ## change this to your local machine path
+
+# Connect to the S3 using the access keys set in /etc/boto.cfg. If not set, follow the these steps:
+# 1) Go to Amazon AWS account-> Click the top right account name -> My security Credentials -> Continue (if pop up arrives) -> Access Keys -> Create New Access Keys -> Download the CSV file
+# Note that you cannot retreive this file from your account again, so keep it saved someplace
+# 2) Go to the directory where the file is downloaded. Open terminal froom here and type in
+# cp <your key name> /etc/boto.cfg
+# 3) Open /etc/boto.cfg in editor and ensure that it is in following format:
+# 
+# [Credentials]
+# aws_access_key_id = {ACCESS KEY ID}
+# aws_secret_access_key = {SECRET ACCESS KEY}
+#
+# If not, make changes to reflect in the above format
+
+#conn = boto.connect_s3(AWS_ACCESS_KEY_ID, AWS_SECRET_ACCESS_KEY)
+conn = boto.connect_s3()
+
+#Count all the buckets
+all_b = conn.get_all_buckets()
+if all_b is None:
+	print ("No bucket exists ")
+
+## Following contains the method to create a new bucket in given location
+#bucket = conn.create_bucket(bucket_name, location=boto.s3.connection.Location.DEFAULT)
+
+##Create a key of given name and  transfer contents to that from local file
+#k = Key(bucket)
+#k.key = key_file
+#k.set_contents_from_filename(testfile, cb=percent_cb, num_cb=10)
+#k.set_acl('public-read')
+#def percent_cb(complete, total):
+#    sys.stdout.write('.')
+#    sys.stdout.flush()
+
+##Download the file from S3
+conn_bucket = conn.get_bucket(bucket_name)
+if conn_bucket is None:
+	print ("Bucket does not exist!")
+else:
+	for key_list in conn_bucket.list():
+		print (key_list.name)
+
+all_files = conn_bucket.list()   #list all keys inside the bucket
+
+#key_file = [i.name for i in conn_bucket.list()]
+#all_files.name.encode('utf-8')[1];
+
+#down_key = conn_bucket.get_key(key_file)
+#down_key.get_contents_to_filename(download_path) 
+

برخی فایل ها در این مقایسه diff نمایش داده نمی شوند زیرا تعداد فایل ها بسیار زیاد است