s3_2.py 40 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272
  1. # -*- coding: utf-8 -*-
  2. #
  3. # Copyright (C) 2019 Radim Rehurek <me@radimrehurek.com>
  4. #
  5. # This code is distributed under the terms and conditions
  6. # from the MIT License (MIT).
  7. #
  8. """Implements file-like objects for reading and writing from/to AWS S3."""
  9. import io
  10. import functools
  11. import logging
  12. import time
  13. import warnings
  14. try:
  15. import boto3
  16. import botocore.client
  17. import botocore.exceptions
  18. import urllib3.exceptions
  19. except ImportError:
  20. MISSING_DEPS = True
  21. import smart_open.bytebuffer
  22. import smart_open.concurrency
  23. import smart_open.utils
  24. from smart_open import constants
  25. logger = logging.getLogger(__name__)
  26. DEFAULT_MIN_PART_SIZE = 50 * 1024**2
  27. """Default minimum part size for S3 multipart uploads"""
  28. MIN_MIN_PART_SIZE = 5 * 1024 ** 2
  29. """The absolute minimum permitted by Amazon."""
  30. SCHEMES = ("s3", "s3n", 's3u', "s3a")
  31. DEFAULT_PORT = 443
  32. DEFAULT_HOST = 's3.amazonaws.com'
  33. DEFAULT_BUFFER_SIZE = 128 * 1024
  34. URI_EXAMPLES = (
  35. 's3://my_bucket/my_key',
  36. 's3://my_key:my_secret@my_bucket/my_key',
  37. 's3://my_key:my_secret@my_server:my_port@my_bucket/my_key',
  38. )
  39. _UPLOAD_ATTEMPTS = 6
  40. _SLEEP_SECONDS = 10
  41. # Returned by AWS when we try to seek beyond EOF.
  42. _OUT_OF_RANGE = 'InvalidRange'
  43. class _ClientWrapper:
  44. """Wraps a client to inject the appropriate keyword args into each method call.
  45. The keyword args are a dictionary keyed by the fully qualified method name.
  46. For example, S3.Client.create_multipart_upload.
  47. See https://boto3.amazonaws.com/v1/documentation/api/latest/reference/services/s3.html#client
  48. This wrapper behaves identically to the client otherwise.
  49. """
  50. def __init__(self, client, kwargs):
  51. self.client = client
  52. self.kwargs = kwargs
  53. def __getattr__(self, method_name):
  54. method = getattr(self.client, method_name)
  55. kwargs = self.kwargs.get('S3.Client.%s' % method_name, {})
  56. return functools.partial(method, **kwargs)
  57. def parse_uri(uri_as_string):
  58. #
  59. # Restrictions on bucket names and labels:
  60. #
  61. # - Bucket names must be at least 3 and no more than 63 characters long.
  62. # - Bucket names must be a series of one or more labels.
  63. # - Adjacent labels are separated by a single period (.).
  64. # - Bucket names can contain lowercase letters, numbers, and hyphens.
  65. # - Each label must start and end with a lowercase letter or a number.
  66. #
  67. # We use the above as a guide only, and do not perform any validation. We
  68. # let boto3 take care of that for us.
  69. #
  70. split_uri = smart_open.utils.safe_urlsplit(uri_as_string)
  71. assert split_uri.scheme in SCHEMES
  72. port = DEFAULT_PORT
  73. host = DEFAULT_HOST
  74. ordinary_calling_format = False
  75. #
  76. # These defaults tell boto3 to look for credentials elsewhere
  77. #
  78. access_id, access_secret = None, None
  79. #
  80. # Common URI template [secret:key@][host[:port]@]bucket/object
  81. #
  82. # The urlparse function doesn't handle the above schema, so we have to do
  83. # it ourselves.
  84. #
  85. uri = split_uri.netloc + split_uri.path
  86. if '@' in uri and ':' in uri.split('@')[0]:
  87. auth, uri = uri.split('@', 1)
  88. access_id, access_secret = auth.split(':')
  89. head, key_id = uri.split('/', 1)
  90. if '@' in head and ':' in head:
  91. ordinary_calling_format = True
  92. host_port, bucket_id = head.split('@')
  93. host, port = host_port.split(':', 1)
  94. port = int(port)
  95. elif '@' in head:
  96. ordinary_calling_format = True
  97. host, bucket_id = head.split('@')
  98. else:
  99. bucket_id = head
  100. return dict(
  101. scheme=split_uri.scheme,
  102. bucket_id=bucket_id,
  103. key_id=key_id,
  104. port=port,
  105. host=host,
  106. ordinary_calling_format=ordinary_calling_format,
  107. access_id=access_id,
  108. access_secret=access_secret,
  109. )
  110. def _consolidate_params(uri, transport_params):
  111. """Consolidates the parsed Uri with the additional parameters.
  112. This is necessary because the user can pass some of the parameters can in
  113. two different ways:
  114. 1) Via the URI itself
  115. 2) Via the transport parameters
  116. These are not mutually exclusive, but we have to pick one over the other
  117. in a sensible way in order to proceed.
  118. """
  119. transport_params = dict(transport_params)
  120. def inject(**kwargs):
  121. try:
  122. client_kwargs = transport_params['client_kwargs']
  123. except KeyError:
  124. client_kwargs = transport_params['client_kwargs'] = {}
  125. try:
  126. init_kwargs = client_kwargs['S3.Client']
  127. except KeyError:
  128. init_kwargs = client_kwargs['S3.Client'] = {}
  129. init_kwargs.update(**kwargs)
  130. client = transport_params.get('client')
  131. if client is not None and (uri['access_id'] or uri['access_secret']):
  132. logger.warning(
  133. 'ignoring credentials parsed from URL because they conflict with '
  134. 'transport_params["client"]. Set transport_params["client"] to None '
  135. 'to suppress this warning.'
  136. )
  137. uri.update(access_id=None, access_secret=None)
  138. elif (uri['access_id'] and uri['access_secret']):
  139. inject(
  140. aws_access_key_id=uri['access_id'],
  141. aws_secret_access_key=uri['access_secret'],
  142. )
  143. uri.update(access_id=None, access_secret=None)
  144. if client is not None and uri['host'] != DEFAULT_HOST:
  145. logger.warning(
  146. 'ignoring endpoint_url parsed from URL because they conflict with '
  147. 'transport_params["client"]. Set transport_params["client"] to None '
  148. 'to suppress this warning.'
  149. )
  150. uri.update(host=None)
  151. elif uri['host'] != DEFAULT_HOST:
  152. inject(endpoint_url='https://%(host)s:%(port)d' % uri)
  153. uri.update(host=None)
  154. return uri, transport_params
  155. def open_uri(uri, mode, transport_params):
  156. deprecated = (
  157. 'multipart_upload_kwargs',
  158. 'object_kwargs',
  159. 'resource',
  160. 'resource_kwargs',
  161. 'session',
  162. 'singlepart_upload_kwargs',
  163. )
  164. detected = [k for k in deprecated if k in transport_params]
  165. if detected:
  166. doc_url = (
  167. 'https://github.com/RaRe-Technologies/smart_open/blob/develop/'
  168. 'MIGRATING_FROM_OLDER_VERSIONS.rst'
  169. )
  170. #
  171. # We use warnings.warn /w UserWarning instead of logger.warn here because
  172. #
  173. # 1) Not everyone has logging enabled; and
  174. # 2) check_kwargs (below) already uses logger.warn with a similar message
  175. #
  176. # https://github.com/RaRe-Technologies/smart_open/issues/614
  177. #
  178. message = (
  179. 'ignoring the following deprecated transport parameters: %r. '
  180. 'See <%s> for details' % (detected, doc_url)
  181. )
  182. warnings.warn(message, UserWarning)
  183. parsed_uri = parse_uri(uri)
  184. parsed_uri, transport_params = _consolidate_params(parsed_uri, transport_params)
  185. kwargs = smart_open.utils.check_kwargs(open, transport_params)
  186. return open(parsed_uri['bucket_id'], parsed_uri['key_id'], mode, **kwargs)
  187. def open(
  188. bucket_id,
  189. key_id,
  190. mode,
  191. version_id=None,
  192. buffer_size=DEFAULT_BUFFER_SIZE,
  193. min_part_size=DEFAULT_MIN_PART_SIZE,
  194. multipart_upload=True,
  195. defer_seek=False,
  196. client=None,
  197. client_kwargs=None,
  198. writebuffer=None,
  199. ):
  200. """Open an S3 object for reading or writing.
  201. Parameters
  202. ----------
  203. bucket_id: str
  204. The name of the bucket this object resides in.
  205. key_id: str
  206. The name of the key within the bucket.
  207. mode: str
  208. The mode for opening the object. Must be either "rb" or "wb".
  209. buffer_size: int, optional
  210. The buffer size to use when performing I/O.
  211. min_part_size: int, optional
  212. The minimum part size for multipart uploads. For writing only.
  213. multipart_upload: bool, optional
  214. Default: `True`
  215. If set to `True`, will use multipart upload for writing to S3. If set
  216. to `False`, S3 upload will use the S3 Single-Part Upload API, which
  217. is more ideal for small file sizes.
  218. For writing only.
  219. version_id: str, optional
  220. Version of the object, used when reading object.
  221. If None, will fetch the most recent version.
  222. defer_seek: boolean, optional
  223. Default: `False`
  224. If set to `True` on a file opened for reading, GetObject will not be
  225. called until the first seek() or read().
  226. Avoids redundant API queries when seeking before reading.
  227. client: object, optional
  228. The S3 client to use when working with boto3.
  229. If you don't specify this, then smart_open will create a new client for you.
  230. client_kwargs: dict, optional
  231. Additional parameters to pass to the relevant functions of the client.
  232. The keys are fully qualified method names, e.g. `S3.Client.create_multipart_upload`.
  233. The values are kwargs to pass to that method each time it is called.
  234. writebuffer: IO[bytes], optional
  235. By default, this module will buffer data in memory using io.BytesIO
  236. when writing. Pass another binary IO instance here to use it instead.
  237. For example, you may pass a file object to buffer to local disk instead
  238. of in RAM. Use this to keep RAM usage low at the expense of additional
  239. disk IO. If you pass in an open file, then you are responsible for
  240. cleaning it up after writing completes.
  241. """
  242. logger.debug('%r', locals())
  243. if mode not in constants.BINARY_MODES:
  244. raise NotImplementedError('bad mode: %r expected one of %r' % (mode, constants.BINARY_MODES))
  245. if (mode == constants.WRITE_BINARY) and (version_id is not None):
  246. raise ValueError("version_id must be None when writing")
  247. if mode == constants.READ_BINARY:
  248. fileobj = Reader(
  249. bucket_id,
  250. key_id,
  251. version_id=version_id,
  252. buffer_size=buffer_size,
  253. defer_seek=defer_seek,
  254. client=client,
  255. client_kwargs=client_kwargs,
  256. )
  257. elif mode == constants.WRITE_BINARY:
  258. if multipart_upload:
  259. fileobj = MultipartWriter(
  260. bucket_id,
  261. key_id,
  262. min_part_size=min_part_size,
  263. client=client,
  264. client_kwargs=client_kwargs,
  265. writebuffer=writebuffer,
  266. )
  267. else:
  268. fileobj = SinglepartWriter(
  269. bucket_id,
  270. key_id,
  271. client=client,
  272. client_kwargs=client_kwargs,
  273. writebuffer=writebuffer,
  274. )
  275. else:
  276. assert False, 'unexpected mode: %r' % mode
  277. fileobj.name = key_id
  278. return fileobj
  279. def _get(client, bucket, key, version, range_string):
  280. try:
  281. if version:
  282. return client.get_object(Bucket=bucket, Key=key, VersionId=version, Range=range_string)
  283. else:
  284. return client.get_object(Bucket=bucket, Key=key, Range=range_string)
  285. except botocore.client.ClientError as error:
  286. wrapped_error = IOError(
  287. 'unable to access bucket: %r key: %r version: %r error: %s' % (
  288. bucket, key, version, error
  289. )
  290. )
  291. wrapped_error.backend_error = error
  292. raise wrapped_error from error
  293. def _unwrap_ioerror(ioe):
  294. """Given an IOError from _get, return the 'Error' dictionary from boto."""
  295. try:
  296. return ioe.backend_error.response['Error']
  297. except (AttributeError, KeyError):
  298. return None
  299. class _SeekableRawReader(object):
  300. """Read an S3 object.
  301. This class is internal to the S3 submodule.
  302. """
  303. def __init__(
  304. self,
  305. client,
  306. bucket,
  307. key,
  308. version_id=None,
  309. ):
  310. self._client = client
  311. self._bucket = bucket
  312. self._key = key
  313. self._version_id = version_id
  314. self._content_length = None
  315. self._position = 0
  316. self._body = None
  317. def seek(self, offset, whence=constants.WHENCE_START):
  318. """Seek to the specified position.
  319. :param int offset: The offset in bytes.
  320. :param int whence: Where the offset is from.
  321. :returns: the position after seeking.
  322. :rtype: int
  323. """
  324. if whence not in constants.WHENCE_CHOICES:
  325. raise ValueError('invalid whence, expected one of %r' % constants.WHENCE_CHOICES)
  326. #
  327. # Close old body explicitly.
  328. # When first seek() after __init__(), self._body is not exist.
  329. #
  330. if self._body is not None:
  331. self._body.close()
  332. self._body = None
  333. start = None
  334. stop = None
  335. if whence == constants.WHENCE_START:
  336. start = max(0, offset)
  337. elif whence == constants.WHENCE_CURRENT:
  338. start = max(0, offset + self._position)
  339. else:
  340. stop = max(0, -offset)
  341. #
  342. # If we can figure out that we've read past the EOF, then we can save
  343. # an extra API call.
  344. #
  345. if self._content_length is None:
  346. reached_eof = False
  347. elif start is not None and start >= self._content_length:
  348. reached_eof = True
  349. elif stop == 0:
  350. reached_eof = True
  351. else:
  352. reached_eof = False
  353. if reached_eof:
  354. self._body = io.BytesIO()
  355. self._position = self._content_length
  356. else:
  357. self._open_body(start, stop)
  358. return self._position
  359. def _open_body(self, start=None, stop=None):
  360. """Open a connection to download the specified range of bytes. Store
  361. the open file handle in self._body.
  362. If no range is specified, start defaults to self._position.
  363. start and stop follow the semantics of the http range header,
  364. so a stop without a start will read bytes beginning at stop.
  365. As a side effect, set self._content_length. Set self._position
  366. to self._content_length if start is past end of file.
  367. """
  368. if start is None and stop is None:
  369. start = self._position
  370. range_string = smart_open.utils.make_range_string(start, stop)
  371. try:
  372. # Optimistically try to fetch the requested content range.
  373. response = _get(
  374. self._client,
  375. self._bucket,
  376. self._key,
  377. self._version_id,
  378. range_string,
  379. )
  380. except IOError as ioe:
  381. # Handle requested content range exceeding content size.
  382. error_response = _unwrap_ioerror(ioe)
  383. if error_response is None or error_response.get('Code') != _OUT_OF_RANGE:
  384. raise
  385. self._position = self._content_length = int(error_response['ActualObjectSize'])
  386. self._body = io.BytesIO()
  387. else:
  388. #
  389. # Keep track of how many times boto3's built-in retry mechanism
  390. # activated.
  391. #
  392. # https://boto3.amazonaws.com/v1/documentation/api/latest/guide/retries.html#checking-retry-attempts-in-an-aws-service-response
  393. #
  394. logger.debug(
  395. '%s: RetryAttempts: %d',
  396. self,
  397. response['ResponseMetadata']['RetryAttempts'],
  398. )
  399. units, start, stop, length = smart_open.utils.parse_content_range(response['ContentRange'])
  400. self._content_length = length
  401. self._position = start
  402. self._body = response['Body']
  403. def read(self, size=-1):
  404. """Read from the continuous connection with the remote peer."""
  405. if self._body is None:
  406. # This is necessary for the very first read() after __init__().
  407. self._open_body()
  408. if self._position >= self._content_length:
  409. return b''
  410. #
  411. # Boto3 has built-in error handling and retry mechanisms:
  412. #
  413. # https://boto3.amazonaws.com/v1/documentation/api/latest/guide/error-handling.html
  414. # https://boto3.amazonaws.com/v1/documentation/api/latest/guide/retries.html
  415. #
  416. # Unfortunately, it isn't always enough. There is still a non-zero
  417. # possibility that an exception will slip past these mechanisms and
  418. # terminate the read prematurely. Luckily, at this stage, it's very
  419. # simple to recover from the problem: wait a little bit, reopen the
  420. # HTTP connection and try again. Usually, a single retry attempt is
  421. # enough to recover, but we try multiple times "just in case".
  422. #
  423. for attempt, seconds in enumerate([1, 2, 4, 8, 16], 1):
  424. try:
  425. if size == -1:
  426. binary = self._body.read()
  427. else:
  428. binary = self._body.read(size)
  429. except (
  430. ConnectionResetError,
  431. botocore.exceptions.BotoCoreError,
  432. urllib3.exceptions.HTTPError,
  433. ) as err:
  434. logger.warning(
  435. '%s: caught %r while reading %d bytes, sleeping %ds before retry',
  436. self,
  437. err,
  438. size,
  439. seconds,
  440. )
  441. time.sleep(seconds)
  442. self._open_body()
  443. else:
  444. self._position += len(binary)
  445. return binary
  446. raise IOError('%s: failed to read %d bytes after %d attempts' % (self, size, attempt))
  447. def __str__(self):
  448. return 'smart_open.s3._SeekableReader(%r, %r)' % (self._bucket, self._key)
  449. def _initialize_boto3(rw, client, client_kwargs, bucket, key):
  450. """Created the required objects for accessing S3. Ideally, they have
  451. been already created for us and we can just reuse them."""
  452. if client_kwargs is None:
  453. client_kwargs = {}
  454. if client is None:
  455. init_kwargs = client_kwargs.get('S3.Client', {})
  456. client = boto3.client('s3', **init_kwargs)
  457. assert client
  458. rw._client = _ClientWrapper(client, client_kwargs)
  459. rw._bucket = bucket
  460. rw._key = key
  461. class Reader(io.BufferedIOBase):
  462. """Reads bytes from S3.
  463. Implements the io.BufferedIOBase interface of the standard library."""
  464. def __init__(
  465. self,
  466. bucket,
  467. key,
  468. version_id=None,
  469. buffer_size=DEFAULT_BUFFER_SIZE,
  470. line_terminator=constants.BINARY_NEWLINE,
  471. defer_seek=False,
  472. client=None,
  473. client_kwargs=None,
  474. ):
  475. self._version_id = version_id
  476. self._buffer_size = buffer_size
  477. _initialize_boto3(self, client, client_kwargs, bucket, key)
  478. self._raw_reader = _SeekableRawReader(
  479. self._client,
  480. bucket,
  481. key,
  482. self._version_id,
  483. )
  484. self._current_pos = 0
  485. self._buffer = smart_open.bytebuffer.ByteBuffer(buffer_size)
  486. self._eof = False
  487. self._line_terminator = line_terminator
  488. #
  489. # This member is part of the io.BufferedIOBase interface.
  490. #
  491. self.raw = None
  492. if not defer_seek:
  493. self.seek(0)
  494. #
  495. # io.BufferedIOBase methods.
  496. #
  497. def close(self):
  498. """Flush and close this stream."""
  499. pass
  500. def readable(self):
  501. """Return True if the stream can be read from."""
  502. return True
  503. def read(self, size=-1):
  504. """Read up to size bytes from the object and return them."""
  505. if size == 0:
  506. return b''
  507. elif size < 0:
  508. # call read() before setting _current_pos to make sure _content_length is set
  509. out = self._read_from_buffer() + self._raw_reader.read()
  510. self._current_pos = self._raw_reader._content_length
  511. return out
  512. #
  513. # Return unused data first
  514. #
  515. if len(self._buffer) >= size:
  516. return self._read_from_buffer(size)
  517. #
  518. # If the stream is finished, return what we have.
  519. #
  520. if self._eof:
  521. return self._read_from_buffer()
  522. self._fill_buffer(size)
  523. return self._read_from_buffer(size)
  524. def read1(self, size=-1):
  525. """This is the same as read()."""
  526. return self.read(size=size)
  527. def readinto(self, b):
  528. """Read up to len(b) bytes into b, and return the number of bytes
  529. read."""
  530. data = self.read(len(b))
  531. if not data:
  532. return 0
  533. b[:len(data)] = data
  534. return len(data)
  535. def readline(self, limit=-1):
  536. """Read up to and including the next newline. Returns the bytes read."""
  537. if limit != -1:
  538. raise NotImplementedError('limits other than -1 not implemented yet')
  539. #
  540. # A single line may span multiple buffers.
  541. #
  542. line = io.BytesIO()
  543. while not (self._eof and len(self._buffer) == 0):
  544. line_part = self._buffer.readline(self._line_terminator)
  545. line.write(line_part)
  546. self._current_pos += len(line_part)
  547. if line_part.endswith(self._line_terminator):
  548. break
  549. else:
  550. self._fill_buffer()
  551. return line.getvalue()
  552. def seekable(self):
  553. """If False, seek(), tell() and truncate() will raise IOError.
  554. We offer only seek support, and no truncate support."""
  555. return True
  556. def seek(self, offset, whence=constants.WHENCE_START):
  557. """Seek to the specified position.
  558. :param int offset: The offset in bytes.
  559. :param int whence: Where the offset is from.
  560. Returns the position after seeking."""
  561. # Convert relative offset to absolute, since self._raw_reader
  562. # doesn't know our current position.
  563. if whence == constants.WHENCE_CURRENT:
  564. whence = constants.WHENCE_START
  565. offset += self._current_pos
  566. self._current_pos = self._raw_reader.seek(offset, whence)
  567. self._buffer.empty()
  568. self._eof = self._current_pos == self._raw_reader._content_length
  569. return self._current_pos
  570. def tell(self):
  571. """Return the current position within the file."""
  572. return self._current_pos
  573. def truncate(self, size=None):
  574. """Unsupported."""
  575. raise io.UnsupportedOperation
  576. def detach(self):
  577. """Unsupported."""
  578. raise io.UnsupportedOperation
  579. def terminate(self):
  580. """Do nothing."""
  581. pass
  582. def to_boto3(self, resource):
  583. """Create an **independent** `boto3.s3.Object` instance that points to
  584. the same S3 object as this instance.
  585. Changes to the returned object will not affect the current instance.
  586. """
  587. assert resource, 'resource must be a boto3.resource instance'
  588. obj = resource.Object(self._bucket, self._key)
  589. if self._version_id is not None:
  590. return obj.Version(self._version_id)
  591. else:
  592. return obj
  593. #
  594. # Internal methods.
  595. #
  596. def _read_from_buffer(self, size=-1):
  597. """Remove at most size bytes from our buffer and return them."""
  598. size = size if size >= 0 else len(self._buffer)
  599. part = self._buffer.read(size)
  600. self._current_pos += len(part)
  601. return part
  602. def _fill_buffer(self, size=-1):
  603. size = max(size, self._buffer._chunk_size)
  604. while len(self._buffer) < size and not self._eof:
  605. bytes_read = self._buffer.fill(self._raw_reader)
  606. if bytes_read == 0:
  607. logger.debug('%s: reached EOF while filling buffer', self)
  608. self._eof = True
  609. def __str__(self):
  610. return "smart_open.s3.Reader(%r, %r)" % (self._bucket, self._key)
  611. def __repr__(self):
  612. return (
  613. "smart_open.s3.Reader("
  614. "bucket=%r, "
  615. "key=%r, "
  616. "version_id=%r, "
  617. "buffer_size=%r, "
  618. "line_terminator=%r)"
  619. ) % (
  620. self._bucket,
  621. self._key,
  622. self._version_id,
  623. self._buffer_size,
  624. self._line_terminator,
  625. )
  626. class MultipartWriter(io.BufferedIOBase):
  627. """Writes bytes to S3 using the multi part API.
  628. Implements the io.BufferedIOBase interface of the standard library."""
  629. def __init__(
  630. self,
  631. bucket,
  632. key,
  633. min_part_size=DEFAULT_MIN_PART_SIZE,
  634. client=None,
  635. client_kwargs=None,
  636. writebuffer=None,
  637. ):
  638. if min_part_size < MIN_MIN_PART_SIZE:
  639. logger.warning("S3 requires minimum part size >= 5MB; \
  640. multipart upload may fail")
  641. self._min_part_size = min_part_size
  642. _initialize_boto3(self, client, client_kwargs, bucket, key)
  643. try:
  644. partial = functools.partial(
  645. self._client.create_multipart_upload,
  646. Bucket=bucket,
  647. Key=key,
  648. )
  649. self._upload_id = _retry_if_failed(partial)['UploadId']
  650. except botocore.client.ClientError as error:
  651. raise ValueError(
  652. 'the bucket %r does not exist, or is forbidden for access (%r)' % (
  653. bucket, error
  654. )
  655. ) from error
  656. if writebuffer is None:
  657. self._buf = io.BytesIO()
  658. else:
  659. self._buf = writebuffer
  660. self._total_bytes = 0
  661. self._total_parts = 0
  662. self._parts = []
  663. #
  664. # This member is part of the io.BufferedIOBase interface.
  665. #
  666. self.raw = None
  667. def flush(self):
  668. pass
  669. #
  670. # Override some methods from io.IOBase.
  671. #
  672. def close(self):
  673. if self._buf.tell():
  674. self._upload_next_part()
  675. if self._total_bytes and self._upload_id:
  676. partial = functools.partial(
  677. self._client.complete_multipart_upload,
  678. Bucket=self._bucket,
  679. Key=self._key,
  680. UploadId=self._upload_id,
  681. MultipartUpload={'Parts': self._parts},
  682. )
  683. _retry_if_failed(partial)
  684. logger.debug('%s: completed multipart upload', self)
  685. elif self._upload_id:
  686. #
  687. # AWS complains with "The XML you provided was not well-formed or
  688. # did not validate against our published schema" when the input is
  689. # completely empty => abort the upload, no file created.
  690. #
  691. # We work around this by creating an empty file explicitly.
  692. #
  693. assert self._upload_id, "no multipart upload in progress"
  694. self._client.abort_multipart_upload(
  695. Bucket=self._bucket,
  696. Key=self._key,
  697. UploadId=self._upload_id,
  698. )
  699. self._client.put_object(
  700. Bucket=self._bucket,
  701. Key=self._key,
  702. Body=b'',
  703. )
  704. logger.debug('%s: wrote 0 bytes to imitate multipart upload', self)
  705. self._upload_id = None
  706. @property
  707. def closed(self):
  708. return self._upload_id is None
  709. def writable(self):
  710. """Return True if the stream supports writing."""
  711. return True
  712. def seekable(self):
  713. """If False, seek(), tell() and truncate() will raise IOError.
  714. We offer only tell support, and no seek or truncate support."""
  715. return True
  716. def seek(self, offset, whence=constants.WHENCE_START):
  717. """Unsupported."""
  718. raise io.UnsupportedOperation
  719. def truncate(self, size=None):
  720. """Unsupported."""
  721. raise io.UnsupportedOperation
  722. def tell(self):
  723. """Return the current stream position."""
  724. return self._total_bytes
  725. #
  726. # io.BufferedIOBase methods.
  727. #
  728. def detach(self):
  729. raise io.UnsupportedOperation("detach() not supported")
  730. def write(self, b):
  731. """Write the given buffer (bytes, bytearray, memoryview or any buffer
  732. interface implementation) to the S3 file.
  733. For more information about buffers, see https://docs.python.org/3/c-api/buffer.html
  734. There's buffering happening under the covers, so this may not actually
  735. do any HTTP transfer right away."""
  736. length = self._buf.write(b)
  737. self._total_bytes += length
  738. if self._buf.tell() >= self._min_part_size:
  739. self._upload_next_part()
  740. return length
  741. def terminate(self):
  742. """Cancel the underlying multipart upload."""
  743. assert self._upload_id, "no multipart upload in progress"
  744. self._client.abort_multipart_upload(
  745. Bucket=self._bucket,
  746. Key=self._key,
  747. UploadId=self._upload_id,
  748. )
  749. self._upload_id = None
  750. def to_boto3(self, resource):
  751. """Create an **independent** `boto3.s3.Object` instance that points to
  752. the same S3 object as this instance.
  753. Changes to the returned object will not affect the current instance.
  754. """
  755. assert resource, 'resource must be a boto3.resource instance'
  756. return resource.Object(self._bucket, self._key)
  757. #
  758. # Internal methods.
  759. #
  760. def _upload_next_part(self):
  761. part_num = self._total_parts + 1
  762. logger.info(
  763. "%s: uploading part_num: %i, %i bytes (total %.3fGB)",
  764. self,
  765. part_num,
  766. self._buf.tell(),
  767. self._total_bytes / 1024.0 ** 3,
  768. )
  769. self._buf.seek(0)
  770. #
  771. # Network problems in the middle of an upload are particularly
  772. # troublesome. We don't want to abort the entire upload just because
  773. # of a temporary connection problem, so this part needs to be
  774. # especially robust.
  775. #
  776. upload = _retry_if_failed(
  777. functools.partial(
  778. self._client.upload_part,
  779. Bucket=self._bucket,
  780. Key=self._key,
  781. UploadId=self._upload_id,
  782. PartNumber=part_num,
  783. Body=self._buf,
  784. )
  785. )
  786. self._parts.append({'ETag': upload['ETag'], 'PartNumber': part_num})
  787. logger.debug("%s: upload of part_num #%i finished", self, part_num)
  788. self._total_parts += 1
  789. self._buf.seek(0)
  790. self._buf.truncate(0)
  791. def __enter__(self):
  792. return self
  793. def __exit__(self, exc_type, exc_val, exc_tb):
  794. if exc_type is not None:
  795. self.terminate()
  796. else:
  797. self.close()
  798. def __str__(self):
  799. return "smart_open.s3.MultipartWriter(%r, %r)" % (self._bucket, self._key)
  800. def __repr__(self):
  801. return "smart_open.s3.MultipartWriter(bucket=%r, key=%r, min_part_size=%r)" % (
  802. self._bucket,
  803. self._key,
  804. self._min_part_size,
  805. )
  806. class SinglepartWriter(io.BufferedIOBase):
  807. """Writes bytes to S3 using the single part API.
  808. Implements the io.BufferedIOBase interface of the standard library.
  809. This class buffers all of its input in memory until its `close` method is called. Only then will
  810. the data be written to S3 and the buffer is released."""
  811. def __init__(
  812. self,
  813. bucket,
  814. key,
  815. client=None,
  816. client_kwargs=None,
  817. writebuffer=None,
  818. ):
  819. _initialize_boto3(self, client, client_kwargs, bucket, key)
  820. try:
  821. self._client.head_bucket(Bucket=bucket)
  822. except botocore.client.ClientError as e:
  823. raise ValueError('the bucket %r does not exist, or is forbidden for access' % bucket) from e
  824. if writebuffer is None:
  825. self._buf = io.BytesIO()
  826. else:
  827. self._buf = writebuffer
  828. self._total_bytes = 0
  829. #
  830. # This member is part of the io.BufferedIOBase interface.
  831. #
  832. self.raw = None
  833. def flush(self):
  834. pass
  835. #
  836. # Override some methods from io.IOBase.
  837. #
  838. def close(self):
  839. if self._buf is None:
  840. return
  841. self._buf.seek(0)
  842. try:
  843. self._client.put_object(
  844. Bucket=self._bucket,
  845. Key=self._key,
  846. Body=self._buf,
  847. )
  848. except botocore.client.ClientError as e:
  849. raise ValueError(
  850. 'the bucket %r does not exist, or is forbidden for access' % self._bucket) from e
  851. logger.debug("%s: direct upload finished", self)
  852. self._buf = None
  853. @property
  854. def closed(self):
  855. return self._buf is None
  856. def writable(self):
  857. """Return True if the stream supports writing."""
  858. return True
  859. def seekable(self):
  860. """If False, seek(), tell() and truncate() will raise IOError.
  861. We offer only tell support, and no seek or truncate support."""
  862. return True
  863. def seek(self, offset, whence=constants.WHENCE_START):
  864. """Unsupported."""
  865. raise io.UnsupportedOperation
  866. def truncate(self, size=None):
  867. """Unsupported."""
  868. raise io.UnsupportedOperation
  869. def tell(self):
  870. """Return the current stream position."""
  871. return self._total_bytes
  872. #
  873. # io.BufferedIOBase methods.
  874. #
  875. def detach(self):
  876. raise io.UnsupportedOperation("detach() not supported")
  877. def write(self, b):
  878. """Write the given buffer (bytes, bytearray, memoryview or any buffer
  879. interface implementation) into the buffer. Content of the buffer will be
  880. written to S3 on close as a single-part upload.
  881. For more information about buffers, see https://docs.python.org/3/c-api/buffer.html"""
  882. length = self._buf.write(b)
  883. self._total_bytes += length
  884. return length
  885. def terminate(self):
  886. """Nothing to cancel in single-part uploads."""
  887. return
  888. #
  889. # Internal methods.
  890. #
  891. def __enter__(self):
  892. return self
  893. def __exit__(self, exc_type, exc_val, exc_tb):
  894. if exc_type is not None:
  895. self.terminate()
  896. else:
  897. self.close()
  898. def __str__(self):
  899. return "smart_open.s3.SinglepartWriter(%r, %r)" % (self._object.bucket_name, self._object.key)
  900. def __repr__(self):
  901. return "smart_open.s3.SinglepartWriter(bucket=%r, key=%r)" % (self._bucket, self._key)
  902. def _retry_if_failed(
  903. partial,
  904. attempts=_UPLOAD_ATTEMPTS,
  905. sleep_seconds=_SLEEP_SECONDS,
  906. exceptions=None):
  907. if exceptions is None:
  908. exceptions = (botocore.exceptions.EndpointConnectionError, )
  909. for attempt in range(attempts):
  910. try:
  911. return partial()
  912. except exceptions:
  913. logger.critical(
  914. 'Unable to connect to the endpoint. Check your network connection. '
  915. 'Sleeping and retrying %d more times '
  916. 'before giving up.' % (attempts - attempt - 1)
  917. )
  918. time.sleep(sleep_seconds)
  919. else:
  920. logger.critical('Unable to connect to the endpoint. Giving up.')
  921. raise IOError('Unable to connect to the endpoint after %d attempts' % attempts)
  922. def _accept_all(key):
  923. return True
  924. def iter_bucket(
  925. bucket_name,
  926. prefix='',
  927. accept_key=None,
  928. key_limit=None,
  929. workers=16,
  930. retries=3,
  931. **session_kwargs):
  932. """
  933. Iterate and download all S3 objects under `s3://bucket_name/prefix`.
  934. Parameters
  935. ----------
  936. bucket_name: str
  937. The name of the bucket.
  938. prefix: str, optional
  939. Limits the iteration to keys starting with the prefix.
  940. accept_key: callable, optional
  941. This is a function that accepts a key name (unicode string) and
  942. returns True/False, signalling whether the given key should be downloaded.
  943. The default behavior is to accept all keys.
  944. key_limit: int, optional
  945. If specified, the iterator will stop after yielding this many results.
  946. workers: int, optional
  947. The number of subprocesses to use.
  948. retries: int, optional
  949. The number of time to retry a failed download.
  950. session_kwargs: dict, optional
  951. Keyword arguments to pass when creating a new session.
  952. For a list of available names and values, see:
  953. https://boto3.amazonaws.com/v1/documentation/api/latest/reference/core/session.html#boto3.session.Session
  954. Yields
  955. ------
  956. str
  957. The full key name (does not include the bucket name).
  958. bytes
  959. The full contents of the key.
  960. Notes
  961. -----
  962. The keys are processed in parallel, using `workers` processes (default: 16),
  963. to speed up downloads greatly. If multiprocessing is not available, thus
  964. _MULTIPROCESSING is False, this parameter will be ignored.
  965. Examples
  966. --------
  967. >>> # get all JSON files under "mybucket/foo/"
  968. >>> for key, content in iter_bucket(
  969. ... bucket_name, prefix='foo/',
  970. ... accept_key=lambda key: key.endswith('.json')):
  971. ... print key, len(content)
  972. >>> # limit to 10k files, using 32 parallel workers (default is 16)
  973. >>> for key, content in iter_bucket(bucket_name, key_limit=10000, workers=32):
  974. ... print key, len(content)
  975. """
  976. if accept_key is None:
  977. accept_key = _accept_all
  978. #
  979. # If people insist on giving us bucket instances, silently extract the name
  980. # before moving on. Works for boto3 as well as boto.
  981. #
  982. try:
  983. bucket_name = bucket_name.name
  984. except AttributeError:
  985. pass
  986. total_size, key_no = 0, -1
  987. key_iterator = _list_bucket(
  988. bucket_name,
  989. prefix=prefix,
  990. accept_key=accept_key,
  991. **session_kwargs)
  992. download_key = functools.partial(
  993. _download_key,
  994. bucket_name=bucket_name,
  995. retries=retries,
  996. **session_kwargs)
  997. with smart_open.concurrency.create_pool(processes=workers) as pool:
  998. result_iterator = pool.imap_unordered(download_key, key_iterator)
  999. for key_no, (key, content) in enumerate(result_iterator):
  1000. if True or key_no % 1000 == 0:
  1001. logger.info(
  1002. "yielding key #%i: %s, size %i (total %.1fMB)",
  1003. key_no, key, len(content), total_size / 1024.0 ** 2
  1004. )
  1005. yield key, content
  1006. total_size += len(content)
  1007. if key_limit is not None and key_no + 1 >= key_limit:
  1008. # we were asked to output only a limited number of keys => we're done
  1009. break
  1010. logger.info("processed %i keys, total size %i" % (key_no + 1, total_size))
  1011. def _list_bucket(
  1012. bucket_name,
  1013. prefix='',
  1014. accept_key=lambda k: True,
  1015. **session_kwargs):
  1016. session = boto3.session.Session(**session_kwargs)
  1017. client = session.client('s3')
  1018. ctoken = None
  1019. while True:
  1020. # list_objects_v2 doesn't like a None value for ContinuationToken
  1021. # so we don't set it if we don't have one.
  1022. if ctoken:
  1023. kwargs = dict(Bucket=bucket_name, Prefix=prefix, ContinuationToken=ctoken)
  1024. else:
  1025. kwargs = dict(Bucket=bucket_name, Prefix=prefix)
  1026. response = client.list_objects_v2(**kwargs)
  1027. try:
  1028. content = response['Contents']
  1029. except KeyError:
  1030. pass
  1031. else:
  1032. for c in content:
  1033. key = c['Key']
  1034. if accept_key(key):
  1035. yield key
  1036. ctoken = response.get('NextContinuationToken', None)
  1037. if not ctoken:
  1038. break
  1039. def _download_key(key_name, bucket_name=None, retries=3, **session_kwargs):
  1040. if bucket_name is None:
  1041. raise ValueError('bucket_name may not be None')
  1042. #
  1043. # https://boto3.amazonaws.com/v1/documentation/api/latest/guide/resources.html#multithreading-or-multiprocessing-with-resources
  1044. #
  1045. session = boto3.session.Session(**session_kwargs)
  1046. s3 = session.resource('s3')
  1047. bucket = s3.Bucket(bucket_name)
  1048. # Sometimes, https://github.com/boto/boto/issues/2409 can happen
  1049. # because of network issues on either side.
  1050. # Retry up to 3 times to ensure its not a transient issue.
  1051. for x in range(retries + 1):
  1052. try:
  1053. content_bytes = _download_fileobj(bucket, key_name)
  1054. except botocore.client.ClientError:
  1055. # Actually fail on last pass through the loop
  1056. if x == retries:
  1057. raise
  1058. # Otherwise, try again, as this might be a transient timeout
  1059. pass
  1060. else:
  1061. return key_name, content_bytes
  1062. def _download_fileobj(bucket, key_name):
  1063. #
  1064. # This is a separate function only because it makes it easier to inject
  1065. # exceptions during tests.
  1066. #
  1067. buf = io.BytesIO()
  1068. bucket.download_fileobj(key_name, buf)
  1069. return buf.getvalue()