main.py 7.2 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193
  1. # -*- coding: utf-8 -*-
  2. import re
  3. import requests
  4. from flask import Flask, Response, redirect, request
  5. from requests.exceptions import (
  6. ChunkedEncodingError,
  7. ContentDecodingError, ConnectionError, StreamConsumedError)
  8. from requests.utils import (
  9. stream_decode_response_unicode, iter_slices, CaseInsensitiveDict)
  10. from urllib3.exceptions import (
  11. DecodeError, ReadTimeoutError, ProtocolError)
  12. # config
  13. # 分支文件使用jsDelivr镜像的开关,0为关闭,默认关闭
  14. jsdelivr = 0
  15. size_limit = 1024 * 1024 * 1024 * 999 # 允许的文件大小,默认999GB,相当于无限制了 https://github.com/hunshcn/gh-proxy/issues/8
  16. """
  17. 先生效白名单再匹配黑名单,pass_list匹配到的会直接302到jsdelivr而忽略设置
  18. 生效顺序 白->黑->pass,可以前往https://github.com/hunshcn/gh-proxy/issues/41 查看示例
  19. 每个规则一行,可以封禁某个用户的所有仓库,也可以封禁某个用户的特定仓库,下方用黑名单示例,白名单同理
  20. user1 # 封禁user1的所有仓库
  21. user1/repo1 # 封禁user1的repo1
  22. */repo1 # 封禁所有叫做repo1的仓库
  23. """
  24. white_list = '''
  25. '''
  26. black_list = '''
  27. '''
  28. pass_list = '''
  29. '''
  30. HOST = '127.0.0.1' # 监听地址,建议监听本地然后由web服务器反代
  31. PORT = 80 # 监听端口
  32. ASSET_URL = 'https://hunshcn.github.io/gh-proxy' # 主页
  33. white_list = [tuple([x.replace(' ', '') for x in i.split('/')]) for i in white_list.split('\n') if i]
  34. black_list = [tuple([x.replace(' ', '') for x in i.split('/')]) for i in black_list.split('\n') if i]
  35. pass_list = [tuple([x.replace(' ', '') for x in i.split('/')]) for i in pass_list.split('\n') if i]
  36. app = Flask(__name__)
  37. CHUNK_SIZE = 1024 * 10
  38. index_html = requests.get(ASSET_URL, timeout=10).text
  39. icon_r = requests.get(ASSET_URL + '/favicon.ico', timeout=10).content
  40. exp1 = re.compile(r'^(?:https?://)?github\.com/(?P<author>.+?)/(?P<repo>.+?)/(?:releases|archive)/.*$')
  41. exp2 = re.compile(r'^(?:https?://)?github\.com/(?P<author>.+?)/(?P<repo>.+?)/(?:blob|raw)/.*$')
  42. exp3 = re.compile(r'^(?:https?://)?github\.com/(?P<author>.+?)/(?P<repo>.+?)/(?:info|git-).*$')
  43. exp4 = re.compile(r'^(?:https?://)?raw\.(?:githubusercontent|github)\.com/(?P<author>.+?)/(?P<repo>.+?)/.+?/.+$')
  44. exp5 = re.compile(r'^(?:https?://)?gist\.(?:githubusercontent|github)\.com/(?P<author>.+?)/.+?/.+$')
  45. requests.sessions.default_headers = lambda: CaseInsensitiveDict()
  46. @app.route('/')
  47. def index():
  48. if 'q' in request.args:
  49. return redirect('/' + request.args.get('q'))
  50. return index_html
  51. @app.route('/favicon.ico')
  52. def icon():
  53. return Response(icon_r, content_type='image/vnd.microsoft.icon')
  54. def iter_content(self, chunk_size=1, decode_unicode=False):
  55. """rewrite requests function, set decode_content with False"""
  56. def generate():
  57. # Special case for urllib3.
  58. if hasattr(self.raw, 'stream'):
  59. try:
  60. for chunk in self.raw.stream(chunk_size, decode_content=False):
  61. yield chunk
  62. except ProtocolError as e:
  63. raise ChunkedEncodingError(e)
  64. except DecodeError as e:
  65. raise ContentDecodingError(e)
  66. except ReadTimeoutError as e:
  67. raise ConnectionError(e)
  68. else:
  69. # Standard file-like object.
  70. while True:
  71. chunk = self.raw.read(chunk_size)
  72. if not chunk:
  73. break
  74. yield chunk
  75. self._content_consumed = True
  76. if self._content_consumed and isinstance(self._content, bool):
  77. raise StreamConsumedError()
  78. elif chunk_size is not None and not isinstance(chunk_size, int):
  79. raise TypeError("chunk_size must be an int, it is instead a %s." % type(chunk_size))
  80. # simulate reading small chunks of the content
  81. reused_chunks = iter_slices(self._content, chunk_size)
  82. stream_chunks = generate()
  83. chunks = reused_chunks if self._content_consumed else stream_chunks
  84. if decode_unicode:
  85. chunks = stream_decode_response_unicode(chunks, self)
  86. return chunks
  87. def check_url(u):
  88. for exp in (exp1, exp2, exp3, exp4, exp5):
  89. m = exp.match(u)
  90. if m:
  91. return m
  92. return False
  93. @app.route('/<path:u>', methods=['GET', 'POST'])
  94. def handler(u):
  95. u = u if u.startswith('http') else 'https://' + u
  96. if u.rfind('://', 3, 9) == -1:
  97. u = u.replace('s:/', 's://', 1) # uwsgi会将//传递为/
  98. pass_by = False
  99. m = check_url(u)
  100. if m:
  101. m = tuple(m.groups())
  102. if white_list:
  103. for i in white_list:
  104. if m[:len(i)] == i or i[0] == '*' and len(m) == 2 and m[1] == i[1]:
  105. break
  106. else:
  107. return Response('Forbidden by white list.', status=403)
  108. for i in black_list:
  109. if m[:len(i)] == i or i[0] == '*' and len(m) == 2 and m[1] == i[1]:
  110. return Response('Forbidden by black list.', status=403)
  111. for i in pass_list:
  112. if m[:len(i)] == i or i[0] == '*' and len(m) == 2 and m[1] == i[1]:
  113. pass_by = True
  114. break
  115. else:
  116. return Response('Invalid input.', status=403)
  117. if (jsdelivr or pass_by) and exp2.match(u):
  118. u = u.replace('/blob/', '@', 1).replace('github.com', 'cdn.jsdelivr.net/gh', 1)
  119. return redirect(u)
  120. elif (jsdelivr or pass_by) and exp4.match(u):
  121. u = re.sub(r'(\.com/.*?/.+?)/(.+?/)', r'\1@\2', u, 1)
  122. _u = u.replace('raw.githubusercontent.com', 'cdn.jsdelivr.net/gh', 1)
  123. u = u.replace('raw.github.com', 'cdn.jsdelivr.net/gh', 1) if _u == u else _u
  124. return redirect(u)
  125. else:
  126. if exp2.match(u):
  127. u = u.replace('/blob/', '/raw/', 1)
  128. if pass_by:
  129. url = u + request.url.replace(request.base_url, '', 1)
  130. if url.startswith('https:/') and not url.startswith('https://'):
  131. url = 'https://' + url[7:]
  132. return redirect(url)
  133. return proxy(u)
  134. def proxy(u, allow_redirects=False):
  135. headers = {}
  136. r_headers = dict(request.headers)
  137. if 'Host' in r_headers:
  138. r_headers.pop('Host')
  139. try:
  140. url = u + request.url.replace(request.base_url, '', 1)
  141. if url.startswith('https:/') and not url.startswith('https://'):
  142. url = 'https://' + url[7:]
  143. r = requests.request(method=request.method, url=url, data=request.data, headers=r_headers, stream=True, allow_redirects=allow_redirects)
  144. headers = dict(r.headers)
  145. if 'Content-length' in r.headers and int(r.headers['Content-length']) > size_limit:
  146. return redirect(u + request.url.replace(request.base_url, '', 1))
  147. def generate():
  148. for chunk in iter_content(r, chunk_size=CHUNK_SIZE):
  149. yield chunk
  150. if 'Location' in r.headers:
  151. _location = r.headers.get('Location')
  152. if check_url(_location):
  153. headers['Location'] = '/' + _location
  154. else:
  155. return proxy(_location, True)
  156. return Response(generate(), headers=headers, status=r.status_code)
  157. except Exception as e:
  158. headers['content-type'] = 'text/html; charset=UTF-8'
  159. return Response('server error ' + str(e), status=500, headers=headers)
  160. app.debug = True
  161. if __name__ == '__main__':
  162. app.run(host=HOST, port=PORT)