; configure file for s3_migration_cluster master and worker nodes [Basic] JobType = PUT # JobType = PUT | GET # PUT means EC2 is not in the same account as Destination Bucket. # GET means EC2 is not in the same account as Source Bucket. # PUT表示EC2跟目标S3不在一个Account,GET表示EC2跟源S3不在一个Account. table_queue_name = s3_migration_file_list # table_queue_name ,如果CloudFormation/CDK部署,这个值会被自动替换 sqs_queue_name = s3_migration_file_list # sqs_queue_name,如果CloudFormation/CDK部署,这个值会被自动替换 ssm_parameter_bucket = s3_migration_bucket_para # ssm_parameter_bucket ,如果CloudFormation/CDK部署,这个值会被自动替换 ssm_parameter_credentials = s3_migration_credentials ;需要在ssm parameter store手工新建一个名为 "s3_migrate_credentials" 的 parameter(CloudFormation/CDK 没设这个) ;这个是跟EC2不在一个Account体系下的另一个Account的access_key ;例如EC2在Global,则这个是China Account access_key,反之EC2在中国,这就是Global Account ;EC2本Account的权限会从 IAM Role 获取。以下是这个参数的例子: ;{ ; "aws_access_key_id": "your_aws_access_key_id", ; "aws_secret_access_key": "your_aws_secret_access_key", ; "region": "cn-northwest-1" ;} Des_bucket_default = hawkey999 Des_prefix_default = s3-migration-test/ # 默认目标桶信息是给S3新增文件自动触发SQS的场景,用来配置目标桶/前缀的。 # 对于Jobsender扫描S3并派发Job的场景,不需要配置这两项。即使配置了,程序看到SQS消息里面有就会使用消息里面的目标桶/前缀 [Mode] StorageClass = STANDARD # STANDARD|REDUCED_REDUNDANCY|STANDARD_IA|ONEZONE_IA|INTELLIGENT_TIERING|GLACIER|DEEP_ARCHIVE ResumableThreshold = 5 # 单位MBytes,小于该值的文件,则开始传文件时不查S3上已有的Multipart Upload,不做断点续传,而直接覆盖,节省性能 MaxRetry = 20 # 单个Part上传失败后,最大重试次数, type = int MaxThread = 20 # 单文件同时working的Thread进程数量, type = int MaxParallelFile = 5 # 并行操作文件数量, type = int JobTimeout = 3500 # 单个文件传输超时时间。Seconds 秒, sqs队列inVisibletime 设置为 3600,中间留了100秒间隔 JobsenderCompareVersionId = False # True: When Jobsender compare source/destination bucket list, Jobsender get the destination versionId from DDB UpdateVersionId = False # True: When Worker start a new object job(multipart upload), head source s3 to get object new versionId # If you disable JobsenderCompareVersionId, the jobsender sends job with null versionId: # then you should enable this UpdateVersionId feature, to get the real versionId, not 'null' # so to avoid in special case transfer half new object half old object GetObjectWithVersionId = False # True: Worker get object with versionId # Even the object has no version, the versionId 'null' can still work for getting lastest object. # If Bucket owner only allow you to get object, but not allow to get object version, then need to disable this feature. [Debug] CleanUnfinishedUpload = False # 建议设置 False。遇到存在现有的未完成 multipart upload id 时,可以找出来进行续传。 # True 启动传输文件时,自动清理掉 S3 上所有的未完成 multipart upload id 。也就是不做断点续传了。 # 如果调整过ChunkSize 可以用这个功能把上传过的清理掉。 LoggingLevel = INFO # 日志输出级别 WARNING | INFO | DEBUG # 日常建议设置为 WARNING LocalProfileMode = False # 一般设置为False # True: get credential from local profile; False: get from EC2 IAM Role # If debug on local, os type as Darwin, you don't have to set True, I will recognize it ifVerifyMD5Twice = False # 是否做两次的MD5校验 # ifVerifyMD5Twice 为True则一个文件完成上传合并分片之后再次进行整个文件的ETag校验MD5。 # 对于S3_TO_S3,该开关True会在断点续传的时候,也重新"下载"源文件中所有已传过的分片来计算MD5,但不会重新上传。 # 所以启用该模式下,推荐 worker node 与源S3在同Region,否则延迟会较大 # 该开关不影响每个分片上传时候的校验,即使为False也会校验每个分片MD5,但完成文件合并后则不再校验。 # Danger!!! Don't change ChunkSize unless you well understand how to clean uploaded parts ChunkSize = 5 # 单位:MBytes,文件分片大小,不小于5M,如果发现对于某个文件分片会多于10000个,本程序会自动针对这个文件先扩大ChunkSize_auto再分片 # 不建议修改该参数。如修改,请务必手工清除所有已上传而又未完成文件合并的 Parts,否则: # 会导致文件错误。对于启用了ifVerifyMD5Twice为True的,会校验整个文件从而发现并重传。而没有打开的话则会导致最终文件是错误的。