You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.

438 lines
16 KiB

4 years ago
  1. #!/home/alpcentaur/ProjektA/PrototypeWebApp/venv/bin/python3.5
  2. # Copyright (c) 2006,2007,2008 Mitch Garnaat http://garnaat.org/
  3. #
  4. # Permission is hereby granted, free of charge, to any person obtaining a
  5. # copy of this software and associated documentation files (the
  6. # "Software"), to deal in the Software without restriction, including
  7. # without limitation the rights to use, copy, modify, merge, publish, dis-
  8. # tribute, sublicense, and/or sell copies of the Software, and to permit
  9. # persons to whom the Software is furnished to do so, subject to the fol-
  10. # lowing conditions:
  11. #
  12. # The above copyright notice and this permission notice shall be included
  13. # in all copies or substantial portions of the Software.
  14. #
  15. # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
  16. # OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL-
  17. # ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
  18. # SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
  19. # WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
  20. # OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
  21. # IN THE SOFTWARE.
  22. #
  23. import getopt
  24. import sys
  25. import os
  26. import boto
  27. from boto.compat import six
  28. try:
  29. # multipart portions copyright Fabian Topfstedt
  30. # https://gist.github.com/924094
  31. import math
  32. import mimetypes
  33. from multiprocessing import Pool
  34. from boto.s3.connection import S3Connection
  35. from filechunkio import FileChunkIO
  36. multipart_capable = True
  37. usage_flag_multipart_capable = """ [--multipart]"""
  38. usage_string_multipart_capable = """
  39. multipart - Upload files as multiple parts. This needs filechunkio.
  40. Requires ListBucket, ListMultipartUploadParts,
  41. ListBucketMultipartUploads and PutObject permissions."""
  42. except ImportError as err:
  43. multipart_capable = False
  44. usage_flag_multipart_capable = ""
  45. if six.PY2:
  46. attribute = 'message'
  47. else:
  48. attribute = 'msg'
  49. usage_string_multipart_capable = '\n\n "' + \
  50. getattr(err, attribute)[len('No module named '):] + \
  51. '" is missing for multipart support '
  52. DEFAULT_REGION = 'us-east-1'
  53. usage_string = """
  54. SYNOPSIS
  55. s3put [-a/--access_key <access_key>] [-s/--secret_key <secret_key>]
  56. -b/--bucket <bucket_name> [-c/--callback <num_cb>]
  57. [-d/--debug <debug_level>] [-i/--ignore <ignore_dirs>]
  58. [-n/--no_op] [-p/--prefix <prefix>] [-k/--key_prefix <key_prefix>]
  59. [-q/--quiet] [-g/--grant grant] [-w/--no_overwrite] [-r/--reduced]
  60. [--header] [--region <name>] [--host <s3_host>]""" + \
  61. usage_flag_multipart_capable + """ path [path...]
  62. Where
  63. access_key - Your AWS Access Key ID. If not supplied, boto will
  64. use the value of the environment variable
  65. AWS_ACCESS_KEY_ID
  66. secret_key - Your AWS Secret Access Key. If not supplied, boto
  67. will use the value of the environment variable
  68. AWS_SECRET_ACCESS_KEY
  69. bucket_name - The name of the S3 bucket the file(s) should be
  70. copied to.
  71. path - A path to a directory or file that represents the items
  72. to be uploaded. If the path points to an individual file,
  73. that file will be uploaded to the specified bucket. If the
  74. path points to a directory, it will recursively traverse
  75. the directory and upload all files to the specified bucket.
  76. debug_level - 0 means no debug output (default), 1 means normal
  77. debug output from boto, and 2 means boto debug output
  78. plus request/response output from httplib
  79. ignore_dirs - a comma-separated list of directory names that will
  80. be ignored and not uploaded to S3.
  81. num_cb - The number of progress callbacks to display. The default
  82. is zero which means no callbacks. If you supplied a value
  83. of "-c 10" for example, the progress callback would be
  84. called 10 times for each file transferred.
  85. prefix - A file path prefix that will be stripped from the full
  86. path of the file when determining the key name in S3.
  87. For example, if the full path of a file is:
  88. /home/foo/bar/fie.baz
  89. and the prefix is specified as "-p /home/foo/" the
  90. resulting key name in S3 will be:
  91. /bar/fie.baz
  92. The prefix must end in a trailing separator and if it
  93. does not then one will be added.
  94. key_prefix - A prefix to be added to the S3 key name, after any
  95. stripping of the file path is done based on the
  96. "-p/--prefix" option.
  97. reduced - Use Reduced Redundancy storage
  98. grant - A canned ACL policy that will be granted on each file
  99. transferred to S3. The value of provided must be one
  100. of the "canned" ACL policies supported by S3:
  101. private|public-read|public-read-write|authenticated-read
  102. no_overwrite - No files will be overwritten on S3, if the file/key
  103. exists on s3 it will be kept. This is useful for
  104. resuming interrupted transfers. Note this is not a
  105. sync, even if the file has been updated locally if
  106. the key exists on s3 the file on s3 will not be
  107. updated.
  108. header - key=value pairs of extra header(s) to pass along in the
  109. request
  110. region - Manually set a region for buckets that are not in the US
  111. classic region. Normally the region is autodetected, but
  112. setting this yourself is more efficient.
  113. host - Hostname override, for using an endpoint other then AWS S3
  114. """ + usage_string_multipart_capable + """
  115. If the -n option is provided, no files will be transferred to S3 but
  116. informational messages will be printed about what would happen.
  117. """
  118. def usage(status=1):
  119. print(usage_string)
  120. sys.exit(status)
  121. def submit_cb(bytes_so_far, total_bytes):
  122. print('%d bytes transferred / %d bytes total' % (bytes_so_far, total_bytes))
  123. def get_key_name(fullpath, prefix, key_prefix):
  124. if fullpath.startswith(prefix):
  125. key_name = fullpath[len(prefix):]
  126. else:
  127. key_name = fullpath
  128. l = key_name.split(os.sep)
  129. return key_prefix + '/'.join(l)
  130. def _upload_part(bucketname, aws_key, aws_secret, multipart_id, part_num,
  131. source_path, offset, bytes, debug, cb, num_cb,
  132. amount_of_retries=10):
  133. """
  134. Uploads a part with retries.
  135. """
  136. if debug == 1:
  137. print("_upload_part(%s, %s, %s)" % (source_path, offset, bytes))
  138. def _upload(retries_left=amount_of_retries):
  139. try:
  140. if debug == 1:
  141. print('Start uploading part #%d ...' % part_num)
  142. conn = S3Connection(aws_key, aws_secret)
  143. conn.debug = debug
  144. bucket = conn.get_bucket(bucketname)
  145. for mp in bucket.get_all_multipart_uploads():
  146. if mp.id == multipart_id:
  147. with FileChunkIO(source_path, 'r', offset=offset,
  148. bytes=bytes) as fp:
  149. mp.upload_part_from_file(fp=fp, part_num=part_num,
  150. cb=cb, num_cb=num_cb)
  151. break
  152. except Exception as exc:
  153. if retries_left:
  154. _upload(retries_left=retries_left - 1)
  155. else:
  156. print('Failed uploading part #%d' % part_num)
  157. raise exc
  158. else:
  159. if debug == 1:
  160. print('... Uploaded part #%d' % part_num)
  161. _upload()
  162. def check_valid_region(conn, region):
  163. if conn is None:
  164. print('Invalid region (%s)' % region)
  165. sys.exit(1)
  166. def multipart_upload(bucketname, aws_key, aws_secret, source_path, keyname,
  167. reduced, debug, cb, num_cb, acl='private', headers={},
  168. guess_mimetype=True, parallel_processes=4,
  169. region=DEFAULT_REGION):
  170. """
  171. Parallel multipart upload.
  172. """
  173. conn = boto.s3.connect_to_region(region, aws_access_key_id=aws_key,
  174. aws_secret_access_key=aws_secret)
  175. check_valid_region(conn, region)
  176. conn.debug = debug
  177. bucket = conn.get_bucket(bucketname)
  178. if guess_mimetype:
  179. mtype = mimetypes.guess_type(keyname)[0] or 'application/octet-stream'
  180. headers.update({'Content-Type': mtype})
  181. mp = bucket.initiate_multipart_upload(keyname, headers=headers,
  182. reduced_redundancy=reduced)
  183. source_size = os.stat(source_path).st_size
  184. bytes_per_chunk = max(int(math.sqrt(5242880) * math.sqrt(source_size)),
  185. 5242880)
  186. chunk_amount = int(math.ceil(source_size / float(bytes_per_chunk)))
  187. pool = Pool(processes=parallel_processes)
  188. for i in range(chunk_amount):
  189. offset = i * bytes_per_chunk
  190. remaining_bytes = source_size - offset
  191. bytes = min([bytes_per_chunk, remaining_bytes])
  192. part_num = i + 1
  193. pool.apply_async(_upload_part, [bucketname, aws_key, aws_secret, mp.id,
  194. part_num, source_path, offset, bytes,
  195. debug, cb, num_cb])
  196. pool.close()
  197. pool.join()
  198. if len(mp.get_all_parts()) == chunk_amount:
  199. mp.complete_upload()
  200. key = bucket.get_key(keyname)
  201. key.set_acl(acl)
  202. else:
  203. mp.cancel_upload()
  204. def singlepart_upload(bucket, key_name, fullpath, *kargs, **kwargs):
  205. """
  206. Single upload.
  207. """
  208. k = bucket.new_key(key_name)
  209. k.set_contents_from_filename(fullpath, *kargs, **kwargs)
  210. def expand_path(path):
  211. path = os.path.expanduser(path)
  212. path = os.path.expandvars(path)
  213. return os.path.abspath(path)
  214. def main():
  215. # default values
  216. aws_access_key_id = None
  217. aws_secret_access_key = None
  218. bucket_name = ''
  219. ignore_dirs = []
  220. debug = 0
  221. cb = None
  222. num_cb = 0
  223. quiet = False
  224. no_op = False
  225. prefix = '/'
  226. key_prefix = ''
  227. grant = None
  228. no_overwrite = False
  229. reduced = False
  230. headers = {}
  231. host = None
  232. multipart_requested = False
  233. region = None
  234. try:
  235. opts, args = getopt.getopt(
  236. sys.argv[1:], 'a:b:c::d:g:hi:k:np:qs:wr',
  237. ['access_key=', 'bucket=', 'callback=', 'debug=', 'help', 'grant=',
  238. 'ignore=', 'key_prefix=', 'no_op', 'prefix=', 'quiet',
  239. 'secret_key=', 'no_overwrite', 'reduced', 'header=', 'multipart',
  240. 'host=', 'region='])
  241. except:
  242. usage(1)
  243. # parse opts
  244. for o, a in opts:
  245. if o in ('-h', '--help'):
  246. usage(0)
  247. if o in ('-a', '--access_key'):
  248. aws_access_key_id = a
  249. if o in ('-b', '--bucket'):
  250. bucket_name = a
  251. if o in ('-c', '--callback'):
  252. num_cb = int(a)
  253. cb = submit_cb
  254. if o in ('-d', '--debug'):
  255. debug = int(a)
  256. if o in ('-g', '--grant'):
  257. grant = a
  258. if o in ('-i', '--ignore'):
  259. ignore_dirs = a.split(',')
  260. if o in ('-n', '--no_op'):
  261. no_op = True
  262. if o in ('-w', '--no_overwrite'):
  263. no_overwrite = True
  264. if o in ('-p', '--prefix'):
  265. prefix = a
  266. if prefix[-1] != os.sep:
  267. prefix = prefix + os.sep
  268. prefix = expand_path(prefix)
  269. if o in ('-k', '--key_prefix'):
  270. key_prefix = a
  271. if o in ('-q', '--quiet'):
  272. quiet = True
  273. if o in ('-s', '--secret_key'):
  274. aws_secret_access_key = a
  275. if o in ('-r', '--reduced'):
  276. reduced = True
  277. if o == '--header':
  278. (k, v) = a.split("=", 1)
  279. headers[k] = v
  280. if o == '--host':
  281. host = a
  282. if o == '--multipart':
  283. if multipart_capable:
  284. multipart_requested = True
  285. else:
  286. print("multipart upload requested but not capable")
  287. sys.exit(4)
  288. if o == '--region':
  289. regions = boto.s3.regions()
  290. for region_info in regions:
  291. if region_info.name == a:
  292. region = a
  293. break
  294. else:
  295. raise ValueError('Invalid region %s specified' % a)
  296. if len(args) < 1:
  297. usage(2)
  298. if not bucket_name:
  299. print("bucket name is required!")
  300. usage(3)
  301. connect_args = {
  302. 'aws_access_key_id': aws_access_key_id,
  303. 'aws_secret_access_key': aws_secret_access_key
  304. }
  305. if host:
  306. connect_args['host'] = host
  307. c = boto.s3.connect_to_region(region or DEFAULT_REGION, **connect_args)
  308. check_valid_region(c, region or DEFAULT_REGION)
  309. c.debug = debug
  310. b = c.get_bucket(bucket_name, validate=False)
  311. # Attempt to determine location and warn if no --host or --region
  312. # arguments were passed. Then try to automagically figure out
  313. # what should have been passed and fix it.
  314. if host is None and region is None:
  315. try:
  316. location = b.get_location()
  317. # Classic region will be '', any other will have a name
  318. if location:
  319. print('Bucket exists in %s but no host or region given!' % location)
  320. # Override for EU, which is really Ireland according to the docs
  321. if location == 'EU':
  322. location = 'eu-west-1'
  323. print('Automatically setting region to %s' % location)
  324. # Here we create a new connection, and then take the existing
  325. # bucket and set it to use the new connection
  326. c = boto.s3.connect_to_region(location, **connect_args)
  327. c.debug = debug
  328. b.connection = c
  329. except Exception as e:
  330. if debug > 0:
  331. print(e)
  332. print('Could not get bucket region info, skipping...')
  333. existing_keys_to_check_against = []
  334. files_to_check_for_upload = []
  335. for path in args:
  336. path = expand_path(path)
  337. # upload a directory of files recursively
  338. if os.path.isdir(path):
  339. if no_overwrite:
  340. if not quiet:
  341. print('Getting list of existing keys to check against')
  342. for key in b.list(get_key_name(path, prefix, key_prefix)):
  343. existing_keys_to_check_against.append(key.name)
  344. for root, dirs, files in os.walk(path):
  345. for ignore in ignore_dirs:
  346. if ignore in dirs:
  347. dirs.remove(ignore)
  348. for path in files:
  349. if path.startswith("."):
  350. continue
  351. files_to_check_for_upload.append(os.path.join(root, path))
  352. # upload a single file
  353. elif os.path.isfile(path):
  354. fullpath = os.path.abspath(path)
  355. key_name = get_key_name(fullpath, prefix, key_prefix)
  356. files_to_check_for_upload.append(fullpath)
  357. existing_keys_to_check_against.append(key_name)
  358. # we are trying to upload something unknown
  359. else:
  360. print("I don't know what %s is, so i can't upload it" % path)
  361. for fullpath in files_to_check_for_upload:
  362. key_name = get_key_name(fullpath, prefix, key_prefix)
  363. if no_overwrite and key_name in existing_keys_to_check_against:
  364. if b.get_key(key_name):
  365. if not quiet:
  366. print('Skipping %s as it exists in s3' % fullpath)
  367. continue
  368. if not quiet:
  369. print('Copying %s to %s/%s' % (fullpath, bucket_name, key_name))
  370. if not no_op:
  371. # 0-byte files don't work and also don't need multipart upload
  372. if os.stat(fullpath).st_size != 0 and multipart_capable and \
  373. multipart_requested:
  374. multipart_upload(bucket_name, aws_access_key_id,
  375. aws_secret_access_key, fullpath, key_name,
  376. reduced, debug, cb, num_cb,
  377. grant or 'private', headers,
  378. region=region or DEFAULT_REGION)
  379. else:
  380. singlepart_upload(b, key_name, fullpath, cb=cb, num_cb=num_cb,
  381. policy=grant, reduced_redundancy=reduced,
  382. headers=headers)
  383. if __name__ == "__main__":
  384. main()