Python 3 Start
This commit is contained in:
parent
9f029c8ceb
commit
141fe8d80c
67 changed files with 264 additions and 274 deletions
10
util/hook.py
10
util/hook.py
|
@ -8,7 +8,7 @@ def _hook_add(func, add, name=''):
|
|||
func._hook.append(add)
|
||||
|
||||
if not hasattr(func, '_filename'):
|
||||
func._filename = func.func_code.co_filename
|
||||
func._filename = func.__code__.co_filename
|
||||
|
||||
if not hasattr(func, '_args'):
|
||||
argspec = inspect.getargspec(func)
|
||||
|
@ -39,7 +39,7 @@ def _hook_add(func, add, name=''):
|
|||
|
||||
|
||||
def sieve(func):
|
||||
if func.func_code.co_argcount != 5:
|
||||
if func.__code__.co_argcount != 5:
|
||||
raise ValueError(
|
||||
'sieves must take 5 arguments: (bot, input, func, type, args)')
|
||||
_hook_add(func, ['sieve', (func,)])
|
||||
|
@ -50,7 +50,7 @@ def command(arg=None, **kwargs):
|
|||
args = {}
|
||||
|
||||
def command_wrapper(func):
|
||||
args.setdefault('name', func.func_name)
|
||||
args.setdefault('name', func.__name__)
|
||||
_hook_add(func, ['command', (func, args)], 'command')
|
||||
return func
|
||||
|
||||
|
@ -67,7 +67,7 @@ def event(arg=None, **kwargs):
|
|||
args = kwargs
|
||||
|
||||
def event_wrapper(func):
|
||||
args['name'] = func.func_name
|
||||
args['name'] = func.__name__
|
||||
args.setdefault('events', ['*'])
|
||||
_hook_add(func, ['event', (func, args)], 'event')
|
||||
return func
|
||||
|
@ -89,7 +89,7 @@ def regex(regex, flags=0, **kwargs):
|
|||
args = kwargs
|
||||
|
||||
def regex_wrapper(func):
|
||||
args['name'] = func.func_name
|
||||
args['name'] = func.__name__
|
||||
args['regex'] = regex
|
||||
args['re'] = re.compile(regex, flags)
|
||||
_hook_add(func, ['regex', (func, args)], 'regex')
|
||||
|
|
37
util/http.py
37
util/http.py
|
@ -1,18 +1,15 @@
|
|||
# convenience wrapper for urllib2 & friends
|
||||
|
||||
import cookielib
|
||||
import http.cookiejar
|
||||
import json
|
||||
import urllib
|
||||
import urllib2
|
||||
import urlparse
|
||||
|
||||
from urllib import quote, quote_plus as _quote_plus
|
||||
import urllib.request, urllib.parse, urllib.error
|
||||
import urllib.request, urllib.error, urllib.parse
|
||||
import urllib.parse
|
||||
from urllib.parse import quote, quote_plus as _quote_plus
|
||||
|
||||
from lxml import etree, html
|
||||
from bs4 import BeautifulSoup
|
||||
|
||||
# used in plugins that import this
|
||||
from urllib2 import URLError, HTTPError
|
||||
from urllib.error import URLError, HTTPError
|
||||
|
||||
ua_cloudbot = 'Cloudbot/DEV http://github.com/CloudDev/CloudBot'
|
||||
|
||||
|
@ -24,7 +21,7 @@ ua_internetexplorer = 'Mozilla/4.0 (compatible; MSIE 7.0; Windows NT 5.1)'
|
|||
ua_chrome = 'Mozilla/5.0 (X11; Linux i686) AppleWebKit/537.4 (KHTML, ' \
|
||||
'like Gecko) Chrome/22.0.1229.79 Safari/537.4'
|
||||
|
||||
jar = cookielib.CookieJar()
|
||||
jar = http.cookiejar.CookieJar()
|
||||
|
||||
|
||||
def get(*args, **kwargs):
|
||||
|
@ -63,13 +60,13 @@ def open(url, query_params=None, user_agent=None, post_data=None,
|
|||
|
||||
url = prepare_url(url, query_params)
|
||||
|
||||
request = urllib2.Request(url, post_data)
|
||||
request = urllib.request.Request(url, post_data)
|
||||
|
||||
if get_method is not None:
|
||||
request.get_method = lambda: get_method
|
||||
|
||||
if headers is not None:
|
||||
for header_key, header_value in headers.iteritems():
|
||||
for header_key, header_value in headers.items():
|
||||
request.add_header(header_key, header_value)
|
||||
|
||||
request.add_header('User-Agent', user_agent)
|
||||
|
@ -78,9 +75,9 @@ def open(url, query_params=None, user_agent=None, post_data=None,
|
|||
request.add_header('Referer', referer)
|
||||
|
||||
if cookies:
|
||||
opener = urllib2.build_opener(urllib2.HTTPCookieProcessor(jar))
|
||||
opener = urllib.request.build_opener(urllib.request.HTTPCookieProcessor(jar))
|
||||
else:
|
||||
opener = urllib2.build_opener()
|
||||
opener = urllib.request.build_opener()
|
||||
|
||||
if timeout:
|
||||
return opener.open(request, timeout=timeout)
|
||||
|
@ -90,20 +87,20 @@ def open(url, query_params=None, user_agent=None, post_data=None,
|
|||
|
||||
def prepare_url(url, queries):
|
||||
if queries:
|
||||
scheme, netloc, path, query, fragment = urlparse.urlsplit(url)
|
||||
scheme, netloc, path, query, fragment = urllib.parse.urlsplit(url)
|
||||
|
||||
query = dict(urlparse.parse_qsl(query))
|
||||
query = dict(urllib.parse.parse_qsl(query))
|
||||
query.update(queries)
|
||||
query = urllib.urlencode(dict((to_utf8(key), to_utf8(value))
|
||||
for key, value in query.iteritems()))
|
||||
query = urllib.parse.urlencode(dict((to_utf8(key), to_utf8(value))
|
||||
for key, value in query.items()))
|
||||
|
||||
url = urlparse.urlunsplit((scheme, netloc, path, query, fragment))
|
||||
url = urllib.parse.urlunsplit((scheme, netloc, path, query, fragment))
|
||||
|
||||
return url
|
||||
|
||||
|
||||
def to_utf8(s):
|
||||
if isinstance(s, unicode):
|
||||
if isinstance(s, str):
|
||||
return s.encode('utf8', 'ignore')
|
||||
else:
|
||||
return str(s)
|
||||
|
|
|
@ -1,5 +1,5 @@
|
|||
import http
|
||||
import web
|
||||
from . import http
|
||||
from . import web
|
||||
|
||||
|
||||
def eval_py(code, paste_multiline=True):
|
||||
|
|
16
util/text.py
16
util/text.py
|
@ -6,8 +6,8 @@
|
|||
|
||||
import re
|
||||
|
||||
from HTMLParser import HTMLParser
|
||||
import htmlentitydefs
|
||||
from html.parser import HTMLParser
|
||||
import html.entities
|
||||
|
||||
|
||||
class HTMLTextExtractor(HTMLParser):
|
||||
|
@ -19,15 +19,15 @@ class HTMLTextExtractor(HTMLParser):
|
|||
self.result.append(d)
|
||||
|
||||
def handle_charref(self, number):
|
||||
codepoint = int(number[1:], 16) if number[0] in (u'x', u'X') else int(number)
|
||||
self.result.append(unichr(codepoint))
|
||||
codepoint = int(number[1:], 16) if number[0] in ('x', 'X') else int(number)
|
||||
self.result.append(chr(codepoint))
|
||||
|
||||
def handle_entityref(self, name):
|
||||
codepoint = htmlentitydefs.name2codepoint[name]
|
||||
self.result.append(unichr(codepoint))
|
||||
codepoint = html.entities.name2codepoint[name]
|
||||
self.result.append(chr(codepoint))
|
||||
|
||||
def get_text(self):
|
||||
return u''.join(self.result)
|
||||
return ''.join(self.result)
|
||||
|
||||
|
||||
def strip_html(html):
|
||||
|
@ -39,7 +39,7 @@ def strip_html(html):
|
|||
def munge(text, munge_count=0):
|
||||
"""munges up text."""
|
||||
reps = 0
|
||||
for n in xrange(len(text)):
|
||||
for n in range(len(text)):
|
||||
rep = character_replacements.get(text[n])
|
||||
if rep:
|
||||
text = text[:n] + rep.decode('utf8') + text[n + 1:]
|
||||
|
|
|
@ -24,7 +24,7 @@ class TextGenerator(object):
|
|||
|
||||
# replace static variables in the template with provided values
|
||||
if self.variables:
|
||||
for key, value in self.variables.items():
|
||||
for key, value in list(self.variables.items()):
|
||||
text = text.replace("{%s}" % key, value)
|
||||
|
||||
# get a list of all text parts we need
|
||||
|
@ -33,7 +33,7 @@ class TextGenerator(object):
|
|||
for required_part in required_parts:
|
||||
ppart = self.parts[required_part]
|
||||
# check if the part is a single string or a list
|
||||
if not isinstance(ppart, basestring):
|
||||
if not isinstance(ppart, str):
|
||||
part = random.choice(self.parts[required_part])
|
||||
else:
|
||||
part = self.parts[required_part]
|
||||
|
@ -43,7 +43,7 @@ class TextGenerator(object):
|
|||
|
||||
def generate_strings(self, amount, template=None):
|
||||
strings = []
|
||||
for i in xrange(amount):
|
||||
for i in range(amount):
|
||||
strings.append(self.generate_string())
|
||||
return strings
|
||||
|
||||
|
|
|
@ -72,7 +72,7 @@ def timesince(d, now=None):
|
|||
since = delta.days * 24 * 60 * 60 + delta.seconds
|
||||
if since <= 0:
|
||||
# d is in the future compared to now, stop processing.
|
||||
return u'0 ' + 'minutes'
|
||||
return '0 ' + 'minutes'
|
||||
for i, (seconds, name) in enumerate(chunks):
|
||||
count = since // seconds
|
||||
if count != 0:
|
||||
|
|
|
@ -25,8 +25,8 @@ __license__ = "Python"
|
|||
|
||||
import re
|
||||
import unicodedata
|
||||
import urlparse
|
||||
from urllib import quote, unquote
|
||||
import urllib.parse
|
||||
from urllib.parse import quote, unquote
|
||||
|
||||
default_port = {
|
||||
'http': 80,
|
||||
|
@ -52,7 +52,7 @@ normalizers = (Normalizer(re.compile(
|
|||
def normalize(url, assume_scheme=False):
|
||||
"""Normalize a URL."""
|
||||
|
||||
scheme, auth, path, query, fragment = urlparse.urlsplit(url.strip())
|
||||
scheme, auth, path, query, fragment = urllib.parse.urlsplit(url.strip())
|
||||
userinfo, host, port = re.search('([^@]*@)?([^:]*):?(.*)', auth).groups()
|
||||
|
||||
# Always provide the URI scheme in lowercase characters.
|
||||
|
@ -78,7 +78,7 @@ def normalize(url, assume_scheme=False):
|
|||
# Always use uppercase A-through-F characters when percent-encoding.
|
||||
# All portions of the URI must be utf-8 encoded NFC from Unicode strings
|
||||
def clean(string):
|
||||
string = unicode(unquote(string), 'utf-8', 'replace')
|
||||
string = str(unquote(string), 'utf-8', 'replace')
|
||||
return unicodedata.normalize('NFC', string).encode('utf-8')
|
||||
|
||||
path = quote(clean(path), "~:/?#[]@!$&'()*+,;=")
|
||||
|
@ -118,7 +118,7 @@ def normalize(url, assume_scheme=False):
|
|||
|
||||
# For schemes that define a port, use an empty port if the default is
|
||||
# desired
|
||||
if port and scheme in default_port.keys():
|
||||
if port and scheme in list(default_port.keys()):
|
||||
if port.isdigit():
|
||||
port = str(int(port))
|
||||
if int(port) == default_port[scheme]:
|
||||
|
@ -130,7 +130,7 @@ def normalize(url, assume_scheme=False):
|
|||
auth += ":" + port
|
||||
if url.endswith("#") and query == "" and fragment == "":
|
||||
path += "#"
|
||||
normal_url = urlparse.urlunsplit((scheme, auth, path, query,
|
||||
normal_url = urllib.parse.urlunsplit((scheme, auth, path, query,
|
||||
fragment)).replace("http:///", "http://")
|
||||
for norm in normalizers:
|
||||
m = norm.regex.match(normal_url)
|
||||
|
|
18
util/web.py
18
util/web.py
|
@ -1,16 +1,13 @@
|
|||
""" web.py - handy functions for web services """
|
||||
|
||||
import http
|
||||
import urlnorm
|
||||
from . import http
|
||||
from . import urlnorm
|
||||
import json
|
||||
import urllib
|
||||
import yql
|
||||
import urllib.request, urllib.parse, urllib.error
|
||||
|
||||
short_url = "http://is.gd/create.php"
|
||||
paste_url = "http://hastebin.com"
|
||||
yql_env = "http://datatables.org/alltables.env"
|
||||
|
||||
YQL = yql.Public()
|
||||
|
||||
|
||||
class ShortenError(Exception):
|
||||
|
@ -25,7 +22,7 @@ class ShortenError(Exception):
|
|||
def isgd(url):
|
||||
""" shortens a URL with the is.gd API """
|
||||
url = urlnorm.normalize(url.encode('utf-8'), assume_scheme='http')
|
||||
params = urllib.urlencode({'format': 'json', 'url': url})
|
||||
params = urllib.parse.urlencode({'format': 'json', 'url': url})
|
||||
request = http.get_json("http://is.gd/create.php?%s" % params)
|
||||
|
||||
if "errorcode" in request:
|
||||
|
@ -46,9 +43,4 @@ def haste(text, ext='txt'):
|
|||
""" pastes text to a hastebin server """
|
||||
page = http.get(paste_url + "/documents", post_data=text)
|
||||
data = json.loads(page)
|
||||
return ("%s/%s.%s" % (paste_url, data['key'], ext))
|
||||
|
||||
|
||||
def query(query, params={}):
|
||||
""" runs a YQL query and returns the results """
|
||||
return YQL.execute(query, params, env=yql_env)
|
||||
return "{}/{}.{}".format(paste_url, data['key'], ext)
|
||||
|
|
Reference in a new issue