gfycat.py 1.5 KB

123456789101112131415161718192021222324252627282930313233343536373839404142
  1. #!/usr/bin/env python3
  2. import json
  3. import re
  4. from typing import Optional
  5. from bs4 import BeautifulSoup
  6. from praw.models import Submission
  7. from bdfr.exceptions import SiteDownloaderError
  8. from bdfr.resource import Resource
  9. from bdfr.site_authenticator import SiteAuthenticator
  10. from bdfr.site_downloaders.redgifs import Redgifs
  11. class Gfycat(Redgifs):
  12. def __init__(self, post: Submission):
  13. super().__init__(post)
  14. def find_resources(self, authenticator: Optional[SiteAuthenticator] = None) -> list[Resource]:
  15. return super().find_resources(authenticator)
  16. @staticmethod
  17. def _get_link(url: str) -> set[str]:
  18. gfycat_id = re.match(r'.*/(.*?)/?$', url).group(1)
  19. url = 'https://gfycat.com/' + gfycat_id
  20. response = Gfycat.retrieve_url(url)
  21. if re.search(r'(redgifs|gifdeliverynetwork)', response.url):
  22. url = url.lower() # Fixes error with old gfycat/redgifs links
  23. return Redgifs._get_link(url)
  24. soup = BeautifulSoup(response.text, 'html.parser')
  25. content = soup.find('script', attrs={'data-react-helmet': 'true', 'type': 'application/ld+json'})
  26. try:
  27. out = json.loads(content.contents[0])['video']['contentUrl']
  28. except (IndexError, KeyError, AttributeError) as e:
  29. raise SiteDownloaderError(f'Failed to download Gfycat link {url}: {e}')
  30. except json.JSONDecodeError as e:
  31. raise SiteDownloaderError(f'Did not receive valid JSON data: {e}')
  32. return {out,}