Python 3 Start

This commit is contained in:
Luke Rogers 2014-03-06 11:45:00 +13:00
parent 9f029c8ceb
commit 141fe8d80c
67 changed files with 264 additions and 274 deletions

View file

@ -193,11 +193,11 @@ def say(inp, conn=None, chan=None):
the command was used in."""
inp = inp.split(" ")
if inp[0][0] == "#":
message = u" ".join(inp[1:])
out = u"PRIVMSG {} :{}".format(inp[0], message)
message = " ".join(inp[1:])
out = "PRIVMSG {} :{}".format(inp[0], message)
else:
message = u" ".join(inp[0:])
out = u"PRIVMSG {} :{}".format(chan, message)
message = " ".join(inp[0:])
out = "PRIVMSG {} :{}".format(chan, message)
conn.send(out)
@ -213,11 +213,11 @@ def me(inp, conn=None, chan=None):
for x in inp[1:]:
message = message + x + " "
message = message[:-1]
out = u"PRIVMSG {} :\x01ACTION {}\x01".format(inp[0], message)
out = "PRIVMSG {} :\x01ACTION {}\x01".format(inp[0], message)
else:
message = ""
for x in inp[0:]:
message = message + x + " "
message = message[:-1]
out = u"PRIVMSG {} :\x01ACTION {}\x01".format(chan, message)
out = "PRIVMSG {} :\x01ACTION {}\x01".format(chan, message)
conn.send(out)

View file

@ -27,7 +27,7 @@ def sieve_suite(bot, input, func, kind, args):
if _bucket.consume(MESSAGE_COST):
return input
else:
print "pong!"
print("pong!")
return None
@ -39,11 +39,11 @@ def sieve_suite(bot, input, func, kind, args):
acl = conn.config.get('acls', {}).get(func.__name__)
if acl:
if 'deny-except' in acl:
allowed_channels = map(unicode.lower, acl['deny-except'])
allowed_channels = list(map(str.lower, acl['deny-except']))
if input.chan.lower() not in allowed_channels:
return None
if 'allow-except' in acl:
denied_channels = map(unicode.lower, acl['allow-except'])
denied_channels = list(map(str.lower, acl['allow-except']))
if input.chan.lower() in denied_channels:
return None

View file

@ -5,21 +5,21 @@ from util import http, hook
exchanges = {
"blockchain": {
"api_url": "https://blockchain.info/ticker",
"func": lambda data: u"Blockchain // Buy: \x0307${:,.2f}\x0f -"
u" Sell: \x0307${:,.2f}\x0f".format(data["USD"]["buy"], data["USD"]["sell"])
"func": lambda data: "Blockchain // Buy: \x0307${:,.2f}\x0f -"
" Sell: \x0307${:,.2f}\x0f".format(data["USD"]["buy"], data["USD"]["sell"])
},
"coinbase": {
"api_url": "https://coinbase.com/api/v1/prices/spot_rate",
"func": lambda data: u"Coinbase // Current: \x0307${:,.2f}\x0f".format(float(data['amount']))
"func": lambda data: "Coinbase // Current: \x0307${:,.2f}\x0f".format(float(data['amount']))
},
"bitpay": {
"api_url": "https://bitpay.com/api/rates",
"func": lambda data: u"Bitpay // Current: \x0307${:,.2f}\x0f".format(data[0]['rate'])
"func": lambda data: "Bitpay // Current: \x0307${:,.2f}\x0f".format(data[0]['rate'])
},
"bitstamp": {
"api_url": "https://www.bitstamp.net/api/ticker/",
"func": lambda data: u"BitStamp // Current: \x0307${:,.2f}\x0f - High: \x0307${:,.2f}\x0f -"
u" Low: \x0307${:,.2f}\x0f - Volume: {:,.2f} BTC".format(float(data['last']),
"func": lambda data: "BitStamp // Current: \x0307${:,.2f}\x0f - High: \x0307${:,.2f}\x0f -"
" Low: \x0307${:,.2f}\x0f - Volume: {:,.2f} BTC".format(float(data['last']),
float(data['high']),
float(data['low']),
float(data['volume']))

View file

@ -5,4 +5,4 @@ from sqlalchemy.orm import scoped_session, sessionmaker
@hook.command
def dbtest(inp, db=None):
print db
print(db)

View file

@ -18,15 +18,15 @@ split_re = re.compile(r'([\d+-]*)d?(F|\d*)', re.I)
def n_rolls(count, n):
"""roll an n-sided die count times"""
if n == "F":
return [random.randint(-1, 1) for x in xrange(min(count, 100))]
return [random.randint(-1, 1) for x in range(min(count, 100))]
if n < 2: # it's a coin
if count < 100:
return [random.randint(0, 1) for x in xrange(count)]
return [random.randint(0, 1) for x in range(count)]
else: # fake it
return [int(random.normalvariate(.5 * count, (.75 * count) ** .5))]
else:
if count < 100:
return [random.randint(1, n) for x in xrange(count)]
return [random.randint(1, n) for x in range(count)]
else: # fake it
return [int(random.normalvariate(.5 * (1 + n) * count,
(((n + 1) * (2 * n + 1) / 6. -
@ -75,7 +75,7 @@ def dice(inp):
try:
if count > 0:
d = n_rolls(count, side)
rolls += map(str, d)
rolls += list(map(str, d))
total += sum(d)
else:
d = n_rolls(-count, side)

View file

@ -41,7 +41,7 @@ def define(inp):
for article in sections:
result += article[0]
if len(article) > 2:
result += u' '.join(u'{}. {}'.format(n + 1, section)
result += ' '.join('{}. {}'.format(n + 1, section)
for n, section in enumerate(article[1:]))
else:
result += article[1] + ' '

View file

@ -1,4 +1,4 @@
import urlparse
import urllib.parse
from util import hook, http
@ -10,7 +10,7 @@ def down(inp):
if 'http://' not in inp:
inp = 'http://' + inp
inp = 'http://' + urlparse.urlparse(inp).netloc
inp = 'http://' + urllib.parse.urlparse(inp).netloc
# http://mail.python.org/pipermail/python-list/2006-December/589854.html
try:

View file

@ -1,4 +1,4 @@
from urllib import quote_plus
from urllib.parse import quote_plus
from util import hook, http
@ -44,7 +44,7 @@ def bancount(inp):
services = request["stats"]["service"]
out = []
for service, ban_count in services.items():
for service, ban_count in list(services.items()):
if ban_count != 0:
out.append("{}: \x02{}\x02".format(service, ban_count))
else:

View file

@ -1,7 +1,7 @@
import os.path
import json
import gzip
from StringIO import StringIO
from io import StringIO
import pygeoip

View file

@ -1,5 +1,5 @@
import json
import urllib2
import urllib.request, urllib.error, urllib.parse
from util import hook, http
@ -43,12 +43,12 @@ def ghissues(inp):
number = False
try:
data = json.loads(http.open(url).read())
print url
print(url)
if not number:
try:
data = data[0]
except IndexError:
print data
print(data)
return "Repo has no open issues"
except ValueError:
return "Invalid data returned. Check arguments (.github issues username/repo [number]"
@ -56,9 +56,9 @@ def ghissues(inp):
fmt1 = "Issue: #%s (%s) by %s: %s %s" # (number, state, user.login, title, gitio.gitio(data.url))
number = data["number"]
if data["state"] == "open":
state = u"\x033\x02OPEN\x02\x0f"
state = "\x033\x02OPEN\x02\x0f"
else:
state = u"\x034\x02CLOSED\x02\x0f by {}".format(data["closed_by"]["login"])
state = "\x034\x02CLOSED\x02\x0f by {}".format(data["closed_by"]["login"])
user = data["user"]["login"]
title = data["title"]
summary = truncate(data["body"])
@ -93,12 +93,12 @@ def gitio(inp):
url = 'url=' + str(url)
if code:
url = url + '&code=' + str(code)
req = urllib2.Request(url='http://git.io', data=url)
req = urllib.request.Request(url='http://git.io', data=url)
# try getting url, catch http error
try:
f = urllib2.urlopen(req)
except urllib2.HTTPError:
f = urllib.request.urlopen(req)
except urllib.error.HTTPError:
return "Failed to get URL!"
urlinfo = str(f.info())
@ -110,7 +110,7 @@ def gitio(inp):
if row.find("Location") != -1:
location = row
print status
print(status)
if not "201" in status:
return "Failed to get URL!"

View file

@ -48,4 +48,4 @@ def google(inp):
content = http.html.fromstring(content).text_content()
content = text.truncate_str(content, 150)
return u'{} -- \x02{}\x02: "{}"'.format(result['unescapedUrl'], title, content)
return '{} -- \x02{}\x02: "{}"'.format(result['unescapedUrl'], title, content)

View file

@ -3,7 +3,7 @@ A Google API key is required and retrieved from the bot config file.
Since December 1, 2011, the Google Translate API is a paid service only.
"""
import htmlentitydefs
import html.entities
import re
from util import hook, http
@ -22,15 +22,15 @@ def unescape(text):
# character reference
try:
if text[:3] == "&#x":
return unichr(int(text[3:-1], 16))
return chr(int(text[3:-1], 16))
else:
return unichr(int(text[2:-1]))
return chr(int(text[2:-1]))
except ValueError:
pass
else:
# named entity
try:
text = unichr(htmlentitydefs.name2codepoint[text[1:-1]])
text = chr(html.entities.name2codepoint[text[1:-1]])
except KeyError:
pass
return text # leave as is
@ -83,7 +83,7 @@ def translate(inp, bot=None):
if not api_key:
return "This command requires a paid API key."
args = inp.split(u' ', 2)
args = inp.split(' ', 2)
try:
if len(args) >= 2:
@ -100,7 +100,7 @@ def translate(inp, bot=None):
return goog_trans(api_key, args[1] + ' ' + args[2], sl, 'en')
return goog_trans(api_key, args[2], sl, tl)
return goog_trans(api_key, inp, '', 'en')
except IOError, e:
except IOError as e:
return e

View file

@ -1,5 +1,5 @@
from util import hook
from urllib import unquote
from urllib.parse import unquote
@hook.command(autohelp=False)
def googleurl(inp, db=None, nick=None):

View file

@ -10,7 +10,7 @@ def help_command(inp, notice=None, conn=None, bot=None):
funcs = {}
disabled = bot.config.get('disabled_plugins', [])
disabled_comm = bot.config.get('disabled_commands', [])
for command, (func, args) in bot.commands.iteritems():
for command, (func, args) in bot.commands.items():
fn = re.match(r'^plugins.(.+).py$', func._filename)
if fn.group(1).lower() not in disabled:
if command not in disabled_comm:
@ -21,7 +21,7 @@ def help_command(inp, notice=None, conn=None, bot=None):
else:
funcs[func] = command
commands = dict((value, key) for key, value in funcs.iteritems())
commands = dict((value, key) for key, value in funcs.items())
if not inp:
out = [""]

View file

@ -46,7 +46,7 @@ def horoscope(inp, db=None, notice=None, nick=None):
title = soup.find_all('h1', {'class': 'h1b'})[1]
horoscope_text = soup.find('div', {'class': 'fontdef1'})
result = u"\x02%s\x02 %s" % (title, horoscope_text)
result = "\x02%s\x02 %s" % (title, horoscope_text)
result = text.strip_html(result)
#result = unicode(result, "utf8").replace('flight ','')

View file

@ -1,4 +1,4 @@
from urllib import urlencode
from urllib.parse import urlencode
import re
from util import hook, http, timeformat

View file

@ -1,4 +1,4 @@
import urlparse
import urllib.parse
from util import hook, http, urlnorm
@ -8,14 +8,15 @@ def isup(inp):
"""isup -- uses isup.me to see if a site is up or not"""
# slightly overcomplicated, esoteric URL parsing
scheme, auth, path, query, fragment = urlparse.urlsplit(inp.strip())
scheme, auth, path, query, fragment = urllib.parse.urlsplit(inp.strip())
domain = auth.encode('utf-8') or path.encode('utf-8')
url = urlnorm.normalize(domain, assume_scheme="http")
try:
soup = http.get_soup('http://isup.me/' + domain)
except http.HTTPError, http.URLError:
except http.HTTPError as xxx_todo_changeme:
http.URLError = xxx_todo_changeme
return "Could not get status."
content = soup.find('div').text.strip()

View file

@ -6,7 +6,7 @@ from util import hook, web, http
def lmgtfy(inp):
"""lmgtfy [phrase] - Posts a google link for the specified phrase"""
link = u"http://lmgtfy.com/?q={}".format(http.quote_plus(inp))
link = "http://lmgtfy.com/?q={}".format(http.quote_plus(inp))
try:
return web.isgd(link)

View file

@ -111,6 +111,6 @@ def log(paraml, input=None, bot=None):
fd = get_log_fd(bot.data_dir, input.server, input.chan)
fd.write(timestamp + ' ' + beau + '\n')
out = "{} {} {}".format(timestamp, input.chan, beau.encode('utf8', 'ignore'))
out = "{} {} {}".format(timestamp, input.chan, beau)
bot.logger.debug(out)

View file

@ -1,7 +1,7 @@
# metacritic.com scraper
import re
from urllib2 import HTTPError
from urllib.error import HTTPError
from util import hook, http

View file

@ -56,7 +56,7 @@ def plugin_random():
while not results:
plugin_number = random.randint(1, count_total)
print "trying {}".format(plugin_number)
print("trying {}".format(plugin_number))
try:
results = http.get_json(random_url.format(plugin_number))
except (http.HTTPError, http.URLError) as e:
@ -84,7 +84,7 @@ def format_output(data):
description = text.truncate_str(data['description'], 30)
url = data['website']
authors = data['authors'][0]
authors = authors[0] + u"\u200b" + authors[1:]
authors = authors[0] + "\u200b" + authors[1:]
stage = data['stage']
current_version = data['versions'][0]
@ -97,11 +97,11 @@ def format_output(data):
link = web.try_isgd(current_version['link'])
if description:
line_a = u"\x02{}\x02, by \x02{}\x02 - {} - ({}) \x02{}".format(name, authors, description, stage, url)
line_a = "\x02{}\x02, by \x02{}\x02 - {} - ({}) \x02{}".format(name, authors, description, stage, url)
else:
line_a = u"\x02{}\x02, by \x02{}\x02 ({}) \x02{}".format(name, authors, stage, url)
line_a = "\x02{}\x02, by \x02{}\x02 ({}) \x02{}".format(name, authors, stage, url)
line_b = u"Last release: \x02v{}\x02 for \x02{}\x02 at {} \x02{}\x02".format(version_number, bukkit_versions,
line_b = "Last release: \x02v{}\x02 for \x02{}\x02 at {} \x02{}\x02".format(version_number, bukkit_versions,
last_update, link)
return line_a, line_b

View file

@ -13,12 +13,12 @@ except ImportError:
has_dns = False
mc_colors = [(u'\xa7f', u'\x0300'), (u'\xa70', u'\x0301'), (u'\xa71', u'\x0302'), (u'\xa72', u'\x0303'),
(u'\xa7c', u'\x0304'), (u'\xa74', u'\x0305'), (u'\xa75', u'\x0306'), (u'\xa76', u'\x0307'),
(u'\xa7e', u'\x0308'), (u'\xa7a', u'\x0309'), (u'\xa73', u'\x0310'), (u'\xa7b', u'\x0311'),
(u'\xa71', u'\x0312'), (u'\xa7d', u'\x0313'), (u'\xa78', u'\x0314'), (u'\xa77', u'\x0315'),
(u'\xa7l', u'\x02'), (u'\xa79', u'\x0310'), (u'\xa7o', u'\t'), (u'\xa7m', u'\x13'),
(u'\xa7r', u'\x0f'), (u'\xa7n', u'\x15')]
mc_colors = [('\xa7f', '\x0300'), ('\xa70', '\x0301'), ('\xa71', '\x0302'), ('\xa72', '\x0303'),
('\xa7c', '\x0304'), ('\xa74', '\x0305'), ('\xa75', '\x0306'), ('\xa76', '\x0307'),
('\xa7e', '\x0308'), ('\xa7a', '\x0309'), ('\xa73', '\x0310'), ('\xa7b', '\x0311'),
('\xa71', '\x0312'), ('\xa7d', '\x0313'), ('\xa78', '\x0314'), ('\xa77', '\x0315'),
('\xa7l', '\x02'), ('\xa79', '\x0310'), ('\xa7o', '\t'), ('\xa7m', '\x13'),
('\xa7r', '\x0f'), ('\xa7n', '\x15')]
## EXCEPTIONS
@ -98,9 +98,9 @@ def mcping_modern(host, port):
try:
version = data["version"]["name"]
try:
desc = u" ".join(data["description"]["text"].split())
desc = " ".join(data["description"]["text"].split())
except TypeError:
desc = u" ".join(data["description"].split())
desc = " ".join(data["description"].split())
max_players = data["players"]["max"]
online = data["players"]["online"]
except Exception as e:
@ -136,10 +136,10 @@ def mcping_legacy(host, port):
length = struct.unpack('!h', sock.recv(2))[0]
values = sock.recv(length * 2).decode('utf-16be')
data = values.split(u'\x00') # try to decode data using new format
data = values.split('\x00') # try to decode data using new format
if len(data) == 1:
# failed to decode data, server is using old format
data = values.split(u'\xa7')
data = values.split('\xa7')
output = {
"motd": format_colors(" ".join(data[0].split())),
"motd_raw": data[0],
@ -199,17 +199,17 @@ def parse_input(inp):
def format_colors(motd):
for original, replacement in mc_colors:
motd = motd.replace(original, replacement)
motd = motd.replace(u"\xa7k", "")
motd = motd.replace("\xa7k", "")
return motd
def format_output(data):
if data["version"]:
return u"{motd}\x0f - {version}\x0f - {players}/{players_max}" \
u" players.".format(**data).replace("\n", u"\x0f - ")
return "{motd}\x0f - {version}\x0f - {players}/{players_max}" \
" players.".format(**data).replace("\n", "\x0f - ")
else:
return u"{motd}\x0f - {players}/{players_max}" \
u" players.".format(**data).replace("\n", u"\x0f - ")
return "{motd}\x0f - {players}/{players_max}" \
" players.".format(**data).replace("\n", "\x0f - ")
@hook.command

View file

@ -21,7 +21,7 @@ def mcstatus(inp):
green = []
yellow = []
red = []
for server, status in data.items():
for server, status in list(data.items()):
if status == "green":
green.append(server)
elif status == "yellow":

View file

@ -87,15 +87,15 @@ def mcuser(inp):
profile["lt"] = ", legacy" if profile["legacy"] else ""
if profile["paid"]:
return u"The account \x02{name}\x02 ({id}{lt}) exists. It is a \x02paid\x02" \
u" account.".format(**profile)
return "The account \x02{name}\x02 ({id}{lt}) exists. It is a \x02paid\x02" \
" account.".format(**profile)
else:
return u"The account \x02{name}\x02 ({id}{lt}) exists. It \x034\x02is NOT\x02\x0f a paid" \
u" account.".format(**profile)
return "The account \x02{name}\x02 ({id}{lt}) exists. It \x034\x02is NOT\x02\x0f a paid" \
" account.".format(**profile)
elif name_status == "free":
return u"The account \x02{}\x02 does not exist.".format(user)
return "The account \x02{}\x02 does not exist.".format(user)
elif name_status == "invalid":
return u"The name \x02{}\x02 contains invalid characters.".format(user)
return "The name \x02{}\x02 contains invalid characters.".format(user)
else:
# if you see this, panic
return "Unknown Error."

View file

@ -45,7 +45,7 @@ def mcwiki(inp):
summary = " ".join(p.text_content().splitlines())
summary = re.sub("\[\d+\]", "", summary)
summary = text.truncate_str(summary, 200)
return u"{} :: {}".format(summary, url)
return "{} :: {}".format(summary, url)
# this shouldn't happen
return "Unknown Error."

View file

@ -46,18 +46,18 @@ def format_item(item, show_url=True):
tags.append("\x02Featured\x02")
if item["IsShellShockerItem"]:
tags.append(u"\x02SHELL SHOCKER\u00AE\x02")
tags.append("\x02SHELL SHOCKER\u00AE\x02")
# join all the tags together in a comma separated string ("tag1, tag2, tag3")
tag_text = u", ".join(tags)
tag_text = ", ".join(tags)
if show_url:
# create the item URL and shorten it
url = web.try_isgd(ITEM_URL.format(item["NeweggItemNumber"]))
return u"\x02{}\x02 ({}) - {} - {} - {}".format(title, price, rating,
return "\x02{}\x02 ({}) - {} - {} - {}".format(title, price, rating,
tag_text, url)
else:
return u"\x02{}\x02 ({}) - {} - {}".format(title, price, rating,
return "\x02{}\x02 ({}) - {} - {}".format(title, price, rating,
tag_text)

View file

@ -15,7 +15,7 @@ def test(s):
def newgrounds_url(match):
location = match.group(4).split("/")[-1]
if not test(location):
print "Not a valid Newgrounds portal ID. Example: http://www.newgrounds.com/portal/view/593993"
print("Not a valid Newgrounds portal ID. Example: http://www.newgrounds.com/portal/view/593993")
return None
soup = http.get_soup("http://www.newgrounds.com/portal/view/" + location)
@ -31,7 +31,7 @@ def newgrounds_url(match):
# get rating
try:
rating_info = soup.find('dd', {'class': 'star-variable'})['title'].split("Stars &ndash;")[0].strip()
rating = u" - rated \x02{}\x02/\x025.0\x02".format(rating_info)
rating = " - rated \x02{}\x02/\x025.0\x02".format(rating_info)
except:
rating = ""

View file

@ -29,7 +29,7 @@ def password(inp, notice=None):
# add numbers
if "numeric" in inp or "number" in inp:
okay = okay + [str(x) for x in xrange(0, 10)]
okay = okay + [str(x) for x in range(0, 10)]
# add symbols
if "symbol" in inp:

View file

@ -37,4 +37,4 @@ def pre(inp):
return '{} - {}{} - {} ({} ago)'.format(section, name, size, date_string, since)
print pre("top gear")
print(pre("top gear"))

View file

@ -1,4 +1,4 @@
import urllib
import urllib.request, urllib.parse, urllib.error
import json
import re
@ -11,7 +11,7 @@ def getdata(inp, types, api_key, api_secret):
consumer = oauth.Consumer(api_key, api_secret)
client = oauth.Client(consumer)
response = client.request('http://api.rdio.com/1/', 'POST',
urllib.urlencode({'method': 'search', 'query': inp, 'types': types, 'count': '1'}))
urllib.parse.urlencode({'method': 'search', 'query': inp, 'types': types, 'count': '1'}))
data = json.loads(response[1])
return data
@ -34,16 +34,16 @@ def rdio(inp, bot=None):
artist = info['artist']
album = info['album']
url = info['shortUrl']
return u"\x02{}\x02 by \x02{}\x02 - {} {}".format(name, artist, album, url)
return "\x02{}\x02 by \x02{}\x02 - {} {}".format(name, artist, album, url)
elif 'artist' in info and not 'album' in info: # Album
name = info['name']
artist = info['artist']
url = info['shortUrl']
return u"\x02{}\x02 by \x02{}\x02 - {}".format(name, artist, url)
return "\x02{}\x02 by \x02{}\x02 - {}".format(name, artist, url)
else: # Artist
name = info['name']
url = info['shortUrl']
return u"\x02{}\x02 - {}".format(name, url)
return "\x02{}\x02 - {}".format(name, url)
@hook.command
@ -62,7 +62,7 @@ def rdiot(inp, bot=None):
artist = info['artist']
album = info['album']
url = info['shortUrl']
return u"\x02{}\x02 by \x02{}\x02 - {} - {}".format(name, artist, album, url)
return "\x02{}\x02 by \x02{}\x02 - {} - {}".format(name, artist, album, url)
@hook.command
@ -79,7 +79,7 @@ def rdioar(inp, bot=None):
return "No results."
name = info['name']
url = info['shortUrl']
return u"\x02{}\x02 - {}".format(name, url)
return "\x02{}\x02 - {}".format(name, url)
@hook.command
@ -97,7 +97,7 @@ def rdioal(inp, bot=None):
name = info['name']
artist = info['artist']
url = info['shortUrl']
return u"\x02{}\x02 by \x02{}\x02 - {}".format(name, artist, url)
return "\x02{}\x02 by \x02{}\x02 - {}".format(name, artist, url)
rdio_re = (r'(.*:)//(rd.io|www.rdio.com|rdio.com)(:[0-9]+)?(.*)', re.I)
@ -113,7 +113,7 @@ def rdio_url(match, bot=None):
consumer = oauth.Consumer(api_key, api_secret)
client = oauth.Client(consumer)
response = client.request('http://api.rdio.com/1/', 'POST',
urllib.urlencode({'method': 'getObjectFromUrl', 'url': url}))
urllib.parse.urlencode({'method': 'getObjectFromUrl', 'url': url}))
data = json.loads(response[1])
info = data['result']
if 'name' in info:
@ -121,11 +121,11 @@ def rdio_url(match, bot=None):
name = info['name']
artist = info['artist']
album = info['album']
return u"Rdio track: \x02{}\x02 by \x02{}\x02 - {}".format(name, artist, album)
return "Rdio track: \x02{}\x02 by \x02{}\x02 - {}".format(name, artist, album)
elif 'artist' in info and not 'album' in info: # Album
name = info['name']
artist = info['artist']
return u"Rdio album: \x02{}\x02 by \x02{}\x02".format(name, artist)
return "Rdio album: \x02{}\x02 by \x02{}\x02".format(name, artist)
else: # Artist
name = info['name']
return u"Rdio artist: \x02{}\x02".format(name)
return "Rdio artist: \x02{}\x02".format(name)

View file

@ -22,7 +22,7 @@ def reddit_url(match):
timeago = thread.xpath("//div[@id='siteTable']//p[@class='tagline']/time/text()")[0]
comments = thread.xpath("//div[@id='siteTable']//a[@class='comments']/text()")[0]
return u'\x02{}\x02 - posted by \x02{}\x02 {} ago - {} upvotes, {} downvotes - {}'.format(
return '\x02{}\x02 - posted by \x02{}\x02 {} ago - {} upvotes, {} downvotes - {}'.format(
title, author, timeago, upvotes, downvotes, comments)
@ -74,6 +74,6 @@ def reddit(inp):
else:
item["warning"] = ""
return u"\x02{title} : {subreddit}\x02 - posted by \x02{author}\x02" \
return "\x02{title} : {subreddit}\x02 - posted by \x02{author}\x02" \
" {timesince} ago - {ups} upvotes, {downs} downvotes -" \
" {link}{warning}".format(**item)

View file

@ -35,5 +35,5 @@ def rottentomatoes(inp, bot=None):
fresh = critics_score * review_count / 100
rotten = review_count - fresh
return u"{} - Critics Rating: \x02{}%\x02 ({} liked, {} disliked) " \
return "{} - Critics Rating: \x02{}%\x02 ({} liked, {} disliked) " \
"Audience Rating: \x02{}%\x02 - {}".format(title, critics_score, fresh, rotten, audience_score, url)

View file

@ -31,7 +31,7 @@ def rss(inp, message=None):
link = web.isgd(row["link"])
except (web.ShortenError, http.HTTPError, http.URLError):
link = row["link"]
message(u"{} - {}".format(title, link))
message("{} - {}".format(title, link))
@hook.command(autohelp=False)

View file

@ -1,4 +1,4 @@
from urllib import urlencode
from urllib.parse import urlencode
import re
from util import hook, http, web, text
@ -13,17 +13,17 @@ def soundcloud(url, api_key):
data = http.get_json(api_url + '/resolve.json?' + urlencode({'url': url, 'client_id': api_key}))
if data['description']:
desc = u": {} ".format(text.truncate_str(data['description'], 50))
desc = ": {} ".format(text.truncate_str(data['description'], 50))
else:
desc = ""
if data['genre']:
genre = u"- Genre: \x02{}\x02 ".format(data['genre'])
genre = "- Genre: \x02{}\x02 ".format(data['genre'])
else:
genre = ""
url = web.try_isgd(data['permalink_url'])
return u"SoundCloud track: \x02{}\x02 by \x02{}\x02 {}{}- {} plays, {} downloads, {} comments - {}".format(
return "SoundCloud track: \x02{}\x02 by \x02{}\x02 {}{}- {} plays, {} downloads, {} comments - {}".format(
data['title'], data['user']['username'], desc, genre, data['playback_count'], data['download_count'],
data['comment_count'], url)
@ -32,7 +32,7 @@ def soundcloud(url, api_key):
def soundcloud_url(match, bot=None):
api_key = bot.config.get("api_keys", {}).get("soundcloud")
if not api_key:
print "Error: no api key set"
print("Error: no api key set")
return None
url = match.group(1).split(' ')[-1] + "//" + (match.group(2) if match.group(2) else "") + match.group(3) + \
match.group(4).split(' ')[0]
@ -43,7 +43,7 @@ def soundcloud_url(match, bot=None):
def sndsc_url(match, bot=None):
api_key = bot.config.get("api_keys", {}).get("soundcloud")
if not api_key:
print "Error: no api key set"
print("Error: no api key set")
return None
url = match.group(1).split(' ')[-1] + "//" + (match.group(2) if match.group(2) else "") + match.group(3) + \
match.group(4).split(' ')[0]

View file

@ -1,5 +1,5 @@
import re
from urllib import urlencode
from urllib.parse import urlencode
from util import hook, http, web
@ -45,7 +45,7 @@ def spotify(inp):
except IndexError:
return "Could not find track."
url = sptfy(gateway.format(type, id))
return u"\x02{}\x02 by \x02{}\x02 - \x02{}\x02".format(data["tracks"][0]["name"],
return "\x02{}\x02 by \x02{}\x02 - \x02{}\x02".format(data["tracks"][0]["name"],
data["tracks"][0]["artists"][0]["name"], url)
@ -62,7 +62,7 @@ def spalbum(inp):
except IndexError:
return "Could not find album."
url = sptfy(gateway.format(type, id))
return u"\x02{}\x02 by \x02{}\x02 - \x02{}\x02".format(data["albums"][0]["name"],
return "\x02{}\x02 by \x02{}\x02 - \x02{}\x02".format(data["albums"][0]["name"],
data["albums"][0]["artists"][0]["name"], url)
@ -79,7 +79,7 @@ def spartist(inp):
except IndexError:
return "Could not find artist."
url = sptfy(gateway.format(type, id))
return u"\x02{}\x02 - \x02{}\x02".format(data["artists"][0]["name"], url)
return "\x02{}\x02 - \x02{}\x02".format(data["artists"][0]["name"], url)
@hook.regex(*http_re)
@ -94,13 +94,13 @@ def spotify_url(match):
name = data["track"]["name"]
artist = data["track"]["artists"][0]["name"]
album = data["track"]["album"]["name"]
return u"Spotify Track: \x02{}\x02 by \x02{}\x02 from the album \x02{}\x02 - \x02{}\x02".format(name, artist,
return "Spotify Track: \x02{}\x02 by \x02{}\x02 from the album \x02{}\x02 - \x02{}\x02".format(name, artist,
album, sptfy(
gateway.format(type, spotify_id)))
elif type == "artist":
return u"Spotify Artist: \x02{}\x02 - \x02{}\x02".format(data["artist"]["name"],
return "Spotify Artist: \x02{}\x02 - \x02{}\x02".format(data["artist"]["name"],
sptfy(gateway.format(type, spotify_id)))
elif type == "album":
return u"Spotify Album: \x02{}\x02 - \x02{}\x02 - \x02{}\x02".format(data["album"]["artist"],
return "Spotify Album: \x02{}\x02 - \x02{}\x02 - \x02{}\x02".format(data["album"]["artist"],
data["album"]["name"],
sptfy(gateway.format(type, spotify_id)))

View file

@ -57,8 +57,8 @@ def get_steam_info(url):
data["price"] = soup.find('div', {'class': 'game_purchase_price price'}).text.strip()
return u"\x02{name}\x02: {desc}, \x02Genre\x02: {genre}, \x02Release Date\x02: {release date}," \
u" \x02Price\x02: {price}".format(**data)
return "\x02{name}\x02: {desc}, \x02Genre\x02: {genre}, \x02Release Date\x02: {release date}," \
" \x02Price\x02: {price}".format(**data)
@hook.regex(*steam_re)

View file

@ -1,5 +1,5 @@
import csv
import StringIO
import io
from util import hook, http, text
@ -29,7 +29,7 @@ def is_number(s):
def unicode_dictreader(utf8_data, **kwargs):
csv_reader = csv.DictReader(utf8_data, **kwargs)
for row in csv_reader:
yield dict([(key.lower(), unicode(value, 'utf-8')) for key, value in row.iteritems()])
yield dict([(key.lower(), str(value, 'utf-8')) for key, value in row.items()])
@hook.command('sc')
@ -66,7 +66,7 @@ def steamcalc(inp, reply=None):
except (http.HTTPError, http.URLError):
return "Could not get data for this user."
csv_data = StringIO.StringIO(request) # we use StringIO because CSV can't read a string
csv_data = io.StringIO(request) # we use StringIO because CSV can't read a string
reader = unicode_dictreader(csv_data)
# put the games in a list

View file

@ -22,9 +22,9 @@ def stock(inp):
quote['color'] = "3"
quote['PercentChange'] = 100 * change / (price - change)
print quote
print(quote)
return u"\x02{Name}\x02 (\x02{symbol}\x02) - {LastTradePriceOnly} " \
return "\x02{Name}\x02 (\x02{symbol}\x02) - {LastTradePriceOnly} " \
"\x03{color}{Change} ({PercentChange:.2f}%)\x03 " \
"Day Range: {DaysRange} " \
"MCAP: {MarketCapitalization}".format(**quote)

View file

@ -18,7 +18,7 @@ def suggest(inp):
if not suggestions:
return 'no suggestions found'
out = u", ".join(suggestions)
out = ", ".join(suggestions)
# defuckify text
soup = BeautifulSoup(out)

View file

@ -20,4 +20,4 @@ def title(inp):
if not page_title:
return "Could not find title."
return u"{} [{}]".format(page_title, real_url)
return "{} [{}]".format(page_title, real_url)

View file

@ -44,7 +44,7 @@ def get_episode_info(episode, api_key):
first_aired = episode.findtext("FirstAired")
try:
air_date = datetime.date(*map(int, first_aired.split('-')))
air_date = datetime.date(*list(map(int, first_aired.split('-'))))
except (ValueError, TypeError):
return None

View file

@ -1,5 +1,5 @@
import re
from HTMLParser import HTMLParser
from html.parser import HTMLParser
from util import hook, http
@ -36,7 +36,7 @@ def multitwitch_url(match):
out = ""
for i in usernames:
if not test(i):
print "Not a valid username"
print("Not a valid username")
return None
if out == "":
out = twitch_lookup(i)
@ -50,7 +50,7 @@ def twitch_url(match):
bit = match.group(4).split("#")[0]
location = "/".join(bit.split("/")[1:])
if not test(location):
print "Not a valid username"
print("Not a valid username")
return None
return twitch_lookup(location)
@ -100,9 +100,9 @@ def twitch_lookup(location):
title = data['title']
playing = data['meta_game']
viewers = "\x033\x02Online now!\x02\x0f " + str(data["channel_count"]) + " viewer"
print viewers
print(viewers)
viewers = viewers + "s" if not " 1 view" in viewers else viewers
print viewers
print(viewers)
return h.unescape(fmt.format(title, channel, playing, viewers))
else:
try:

View file

@ -42,13 +42,13 @@ def twitter_url(match, bot=None):
text = " ".join(tweet.text.split())
if user.verified:
prefix = u"\u2713"
prefix = "\u2713"
else:
prefix = ""
time = timesince.timesince(tweet.created_at, datetime.utcnow())
return u"{}@\x02{}\x02 ({}): {} ({} ago)".format(prefix, user.screen_name, user.name, text, time)
return "{}@\x02{}\x02 ({}): {} ({} ago)".format(prefix, user.screen_name, user.name, text, time)
@hook.command("tw")
@ -71,7 +71,7 @@ def twitter(inp, bot=None):
if e[0][0]['code'] == 34:
return "Could not find tweet."
else:
return u"Error {}: {}".format(e[0][0]['code'], e[0][0]['message'])
return "Error {}: {}".format(e[0][0]['code'], e[0][0]['message'])
user = tweet.user
@ -95,21 +95,21 @@ def twitter(inp, bot=None):
if e[0][0]['code'] == 34:
return "Could not find user."
else:
return u"Error {}: {}".format(e[0][0]['code'], e[0][0]['message'])
return "Error {}: {}".format(e[0][0]['code'], e[0][0]['message'])
# get the users tweets
user_timeline = api.user_timeline(id=user.id, count=tweet_number + 1)
# if the timeline is empty, return an error
if not user_timeline:
return u"The user \x02{}\x02 has no tweets.".format(user.screen_name)
return "The user \x02{}\x02 has no tweets.".format(user.screen_name)
# grab the newest tweet from the users timeline
try:
tweet = user_timeline[tweet_number]
except IndexError:
tweet_count = len(user_timeline)
return u"The user \x02{}\x02 only has \x02{}\x02 tweets.".format(user.screen_name, tweet_count)
return "The user \x02{}\x02 only has \x02{}\x02 tweets.".format(user.screen_name, tweet_count)
elif re.match(r'^#\w+$', inp):
# user is searching by hashtag
@ -127,13 +127,13 @@ def twitter(inp, bot=None):
text = " ".join(tweet.text.split())
if user.verified:
prefix = u"\u2713"
prefix = "\u2713"
else:
prefix = ""
time = timesince.timesince(tweet.created_at, datetime.utcnow())
return u"{}@\x02{}\x02 ({}): {} ({} ago)".format(prefix, user.screen_name, user.name, text, time)
return "{}@\x02{}\x02 ({}): {} ({} ago)".format(prefix, user.screen_name, user.name, text, time)
@hook.command("twinfo")
@ -155,20 +155,20 @@ def twuser(inp, bot=None):
return "Unknown error"
if user.verified:
prefix = u"\u2713"
prefix = "\u2713"
else:
prefix = ""
if user.location:
loc_str = u" is located in \x02{}\x02 and".format(user.location)
loc_str = " is located in \x02{}\x02 and".format(user.location)
else:
loc_str = ""
if user.description:
desc_str = u" The users description is \"{}\"".format(user.description)
desc_str = " The users description is \"{}\"".format(user.description)
else:
desc_str = ""
return u"{}@\x02{}\x02 ({}){} has \x02{:,}\x02 tweets and \x02{:,}\x02 followers.{}" \
return "{}@\x02{}\x02 ({}){} has \x02{:,}\x02 tweets and \x02{:,}\x02 followers.{}" \
"".format(prefix, user.screen_name, user.name, loc_str, user.statuses_count, user.followers_count,
desc_str)

View file

@ -41,7 +41,7 @@ def urban(inp):
url = definitions[id_num - 1]['permalink']
output = u"[%i/%i] %s :: %s" % \
output = "[%i/%i] %s :: %s" % \
(id_num, len(definitions), definition, url)
return output

View file

@ -161,9 +161,9 @@ def munge(inp):
@hook.command
def rainbow(inp):
inp = unicode(inp)
inp = str(inp)
inp = strip(inp)
col = colors.items()
col = list(colors.items())
out = ""
l = len(colors)
for i, t in enumerate(inp):
@ -176,8 +176,8 @@ def rainbow(inp):
@hook.command
def wrainbow(inp):
inp = unicode(inp)
col = colors.items()
inp = str(inp)
col = list(colors.items())
inp = strip(inp).split(' ')
out = []
l = len(colors)

View file

@ -1,5 +1,5 @@
import json
import urllib2
import urllib.request, urllib.error, urllib.parse
from util import hook, http, web
@ -8,14 +8,14 @@ def get_sound_info(game, search):
search = search.replace(" ", "+")
try:
data = http.get_json("http://p2sounds.blha303.com.au/search/%s/%s?format=json" % (game, search))
except urllib2.HTTPError as e:
except urllib.error.HTTPError as e:
return "Error: " + json.loads(e.read())["error"]
items = []
for item in data["items"]:
if "music" in game:
textsplit = item["text"].split('"')
text = ""
for i in xrange(len(textsplit)):
for i in range(len(textsplit)):
if i % 2 != 0 and i < 6:
if text:
text += " / " + textsplit[i]

View file

@ -36,9 +36,9 @@ def wolframalpha(inp, bot=None):
if subpod:
results.append(subpod)
if results:
pod_texts.append(title + u': ' + u', '.join(results))
pod_texts.append(title + ': ' + ', '.join(results))
ret = u' - '.join(pod_texts)
ret = ' - '.join(pod_texts)
if not pod_texts:
return 'No results.'
@ -46,7 +46,7 @@ def wolframalpha(inp, bot=None):
ret = re.sub(r'\\(.)', r'\1', ret)
def unicode_sub(match):
return unichr(int(match.group(1), 16))
return chr(int(match.group(1), 16))
ret = re.sub(r'\\:([0-9a-z]{4})', unicode_sub, ret)
@ -55,4 +55,4 @@ def wolframalpha(inp, bot=None):
if not ret:
return 'No results.'
return u"{} - {}".format(ret, short_url)
return "{} - {}".format(ret, short_url)

View file

@ -25,7 +25,7 @@ def xkcd_search(term):
if result:
url = result.find('div', {'class': 'tinylink'}).text
xkcd_id = url[:-1].split("/")[-1]
print xkcd_id
print(xkcd_id)
return xkcd_info(xkcd_id, url=True)
else:
return "No results found!"

View file

@ -13,4 +13,4 @@ def answer(inp):
# we split the answer and .join() it to remove newlines/extra spaces
answer_text = text.truncate_str(' '.join(result["ChosenAnswer"].split()), 80)
return u'\x02{}\x02 "{}" - {}'.format(result["Subject"], answer_text, short_url)
return '\x02{}\x02 "{}" - {}'.format(result["Subject"], answer_text, short_url)

View file

@ -25,13 +25,13 @@ def get_video_description(video_id):
data = request['data']
out = u'\x02{}\x02'.format(data['title'])
out = '\x02{}\x02'.format(data['title'])
if not data.get('duration'):
return out
length = data['duration']
out += u' - length \x02{}\x02'.format(timeformat.format_time(length, simple=True))
out += ' - length \x02{}\x02'.format(timeformat.format_time(length, simple=True))
if 'ratingCount' in data:
# format
@ -39,12 +39,12 @@ def get_video_description(video_id):
dislikes = plural(data['ratingCount'] - int(data['likeCount']), "dislike")
percent = 100 * float(data['likeCount']) / float(data['ratingCount'])
out += u' - {}, {} (\x02{:.1f}\x02%)'.format(likes,
out += ' - {}, {} (\x02{:.1f}\x02%)'.format(likes,
dislikes, percent)
if 'viewCount' in data:
views = data['viewCount']
out += u' - \x02{:,}\x02 view{}'.format(views, "s"[views == 1:])
out += ' - \x02{:,}\x02 view{}'.format(views, "s"[views == 1:])
try:
uploader = http.get_json(base_url + "users/{}?alt=json".format(data["uploader"]))["entry"]["author"][0]["name"][
@ -53,11 +53,11 @@ def get_video_description(video_id):
uploader = data["uploader"]
upload_time = time.strptime(data['uploaded'], "%Y-%m-%dT%H:%M:%S.000Z")
out += u' - \x02{}\x02 on \x02{}\x02'.format(uploader,
out += ' - \x02{}\x02 on \x02{}\x02'.format(uploader,
time.strftime("%Y.%m.%d", upload_time))
if 'contentRating' in data:
out += u' - \x034NSFW\x02'
out += ' - \x034NSFW\x02'
return out
@ -83,7 +83,7 @@ def youtube(inp):
video_id = request['data']['items'][0]['id']
return get_video_description(video_id) + u" - " + video_url % video_id
return get_video_description(video_id) + " - " + video_url % video_id
@hook.command('ytime')
@ -115,8 +115,8 @@ def youtime(inp):
length_text = timeformat.format_time(length, simple=True)
total_text = timeformat.format_time(total, accuracy=8)
return u'The video \x02{}\x02 has a length of {} and has been viewed {:,} times for ' \
u'a total run time of {}!'.format(data['title'], length_text, views,
return 'The video \x02{}\x02 has a length of {} and has been viewed {:,} times for ' \
'a total run time of {}!'.format(data['title'], length_text, views,
total_text)
@ -134,4 +134,4 @@ def ytplaylist_url(match):
author = soup.find('img', {'class': 'channel-header-profile-image'})['title']
num_videos = soup.find('ul', {'class': 'header-stats'}).findAll('li')[0].text.split(' ')[0]
views = soup.find('ul', {'class': 'header-stats'}).findAll('li')[1].text.split(' ')[0]
return u"\x02%s\x02 - \x02%s\x02 views - \x02%s\x02 videos - \x02%s\x02" % (title, views, num_videos, author)
return "\x02%s\x02 - \x02%s\x02 views - \x02%s\x02 videos - \x02%s\x02" % (title, views, num_videos, author)