Python 3 Start

This commit is contained in:
Luke Rogers 2014-03-06 11:45:00 +13:00
parent 9f029c8ceb
commit 141fe8d80c
67 changed files with 264 additions and 274 deletions

View File

@ -8,7 +8,7 @@ import signal
# check python version
if sys.version_info < (2, 7, 0):
print "CloudBot requires Python 2.7 or newer."
print("CloudBot requires Python 2.7 or newer.")
sys.exit(1)
# set up enviroment
@ -18,7 +18,7 @@ os.chdir(sys.path[0] or '.') # do stuff relative to the install directory
if os.path.exists(os.path.abspath('lib')):
sys.path += ['lib']
print 'CloudBot2 <http://git.io/cloudbotirc>'
print('CloudBot2 <http://git.io/cloudbotirc>')
def exit_gracefully(signum, frame):
# this doesn't really work that well

View File

@ -2,7 +2,7 @@ import time
import logging
import re
import os
import Queue
import queue
import collections
import threading
@ -60,11 +60,11 @@ class Bot(threading.Thread):
# start IRC connections
self.connect()
print self.connections
print(self.connections)
for conn in self.connections:
conn.permissions = PermissionManager(self, conn)
print conn
print(conn)
# run plugin loader
self.plugins = collections.defaultdict(list)
@ -82,11 +82,11 @@ class Bot(threading.Thread):
try:
incoming = conn.parsed_queue.get_nowait()
if incoming == StopIteration:
print "StopIteration"
print("StopIteration")
# IRC engine has signalled timeout, so reconnect (ugly)
conn.connection.reconnect()
main.main(self, conn, incoming)
except Queue.Empty:
except queue.Empty:
pass
# if no messages are in the incoming queue, sleep

View File

@ -26,9 +26,9 @@ class Config(dict):
if not os.path.exists(self.path):
# if there is no config, show an error and die
self.logger.critical("No config file found, bot shutting down!")
print "No config file found! Bot shutting down in five seconds."
print "Copy 'config.default' to 'config.json' for defaults."
print "For help, see http://git.io/cloudbotirc. Thank you for using CloudBot!"
print("No config file found! Bot shutting down in five seconds.")
print("Copy 'config.default' to 'config.json' for defaults.")
print("For help, see http://git.io/cloudbotirc. Thank you for using CloudBot!")
time.sleep(5)
sys.exit()

View File

@ -1,6 +1,6 @@
import os
import sqlite3
import thread
import _thread
threaddbs = {}
@ -11,7 +11,7 @@ def get_db_connection(conn, name=''):
if not name:
name = '{}.db'.format(conn.name)
threadid = thread.get_ident()
threadid = _thread.get_ident()
if name in threaddbs and threadid in threaddbs[name]:
return threaddbs[name][threadid]
filename = os.path.join(bot.data_dir, name)

View File

@ -2,7 +2,7 @@ import re
import socket
import time
import threading
import Queue
import queue
from core import permissions
@ -30,7 +30,7 @@ def censor(text):
class ReceiveThread(threading.Thread):
"""receives messages from IRC and puts them in the input_queue"""
def __init__(self, sock, input_queue, timeout):
self.input_buffer = ""
self.input_buffer = b""
self.input_queue = input_queue
self.socket = sock
self.timeout = timeout
@ -70,8 +70,9 @@ class ReceiveThread(threading.Thread):
return
continue
while '\r\n' in self.input_buffer:
line, self.input_buffer = self.input_buffer.split('\r\n', 1)
while b'\r\n' in self.input_buffer:
line, self.input_buffer = self.input_buffer.split(b'\r\n', 1)
print(decode(line))
self.input_queue.put(decode(line))
@ -95,7 +96,7 @@ class SSLReceiveThread(ReceiveThread):
class SendThread(threading.Thread):
"""sends messages from output_queue to IRC"""
def __init__(self, sock, conn_name, output_queue):
self.output_buffer = ""
self.output_buffer = b""
self.output_queue = output_queue
self.conn_name = conn_name
self.socket = sock
@ -106,7 +107,7 @@ class SendThread(threading.Thread):
def run(self):
while not self.shutdown:
line = self.output_queue.get().splitlines()[0][:500]
self.output_buffer += line.encode('utf-8', 'replace') + '\r\n'
self.output_buffer += line.encode('utf-8', 'replace') + b'\r\n'
while self.output_buffer:
sent = self.socket.send(self.output_buffer)
self.output_buffer = self.output_buffer[sent:]
@ -215,13 +216,13 @@ class IRC(object):
self.vars = {}
self.history = {}
self.parsed_queue = Queue.Queue() # responses from the server are placed here
self.parsed_queue = queue.Queue() # responses from the server are placed here
# format: [rawline, prefix, command, params,
# nick, user, host, paramlist, msg]
self.parsed_queue = Queue.Queue()
self.input_queue = Queue.Queue()
self.output_queue = Queue.Queue()
self.parsed_queue = queue.Queue()
self.input_queue = queue.Queue()
self.output_queue = queue.Queue()
# create the IRC connection and connect
self.connection = self.create_connection()
@ -270,19 +271,19 @@ class IRC(object):
def ctcp(self, target, ctcp_type, text):
""" makes the bot send a PRIVMSG CTCP to a target """
out = u"\x01{} {}\x01".format(ctcp_type, text)
out = "\x01{} {}\x01".format(ctcp_type, text)
self.cmd("PRIVMSG", [target, out])
def cmd(self, command, params=None):
if params:
params[-1] = u':' + params[-1]
self.send(u"{} {}".format(command, ' '.join(params)))
params[-1] = ':' + params[-1]
self.send("{} {}".format(command, ' '.join(params)))
else:
self.send(command)
def send(self, string):
try:
self.logger.info(u"{} >> {}".format(self.name.upper(), string))
self.logger.info("{} >> {}".format(self.name.upper(), string))
except:
# if this doesn't work, no big deal
pass

View File

@ -10,7 +10,7 @@ from core import main
def make_signature(f):
return f.func_code.co_filename, f.func_name, f.func_code.co_firstlineno
return f.__code__.co_filename, f.__name__, f.__code__.co_firstlineno
def format_plug(plug, kind='', lpad=0):
@ -71,19 +71,19 @@ class PluginLoader(object):
return
# remove plugins already loaded from this file
for name, data in self.bot.plugins.iteritems():
for name, data in self.bot.plugins.items():
self.bot.plugins[name] = [x for x in data
if x[0]._filename != filename]
# stop all currently running instances of the plugins from this file
for func, handler in list(self.bot.threads.iteritems()):
for func, handler in list(self.bot.threads.items()):
if func._filename == filename:
handler.stop()
del self.bot.threads[func]
# find objects with hooks in the plugin namespace
# TODO: kill it with fire, kill it all
for obj in namespace.itervalues():
for obj in namespace.values():
if hasattr(obj, '_hook'): # check for magic
if obj._thread:
self.bot.threads[obj] = main.Handler(self.bot, obj)
@ -104,11 +104,11 @@ class PluginLoader(object):
self.bot.logger.info("Unloading plugins from: {}".format(filename))
# remove plugins loaded from this file
for plugin_type, plugins in self.bot.plugins.iteritems():
for plugin_type, plugins in self.bot.plugins.items():
self.bot.plugins[plugin_type] = [x for x in plugins if x[0]._filename != filename]
# stop all currently running instances of the plugins from this file
for func, handler in list(self.bot.threads.iteritems()):
for func, handler in list(self.bot.threads.items()):
if func._filename == filename:
handler.stop()
del self.bot.threads[func]

View File

@ -1,11 +1,11 @@
import thread
import _thread
import traceback
import Queue
import queue
import re
from sqlalchemy.orm import scoped_session
thread.stack_size(1024 * 512) # reduce vm size
_thread.stack_size(1024 * 512) # reduce vm size
class Input(dict):
@ -25,7 +25,7 @@ class Input(dict):
if target == nick:
conn.msg(target, message)
else:
conn.msg(target, u"({}) {}".format(nick, message))
conn.msg(target, "({}) {}".format(nick, message))
def action(message, target=chan):
"""sends an action to the current channel/user or a specific channel/user"""
@ -76,7 +76,7 @@ def run(bot, func, input):
return
finally:
if uses_db:
print "Close"
print("Close")
input.db.close()
else:
kw = dict((key, input[key]) for key in args if key in input)
@ -96,7 +96,7 @@ def run(bot, func, input):
bot.logger.exception("Error in plugin {}:".format(func._filename))
return
if out is not None:
input.reply(unicode(out))
input.reply(str(out))
def do_sieve(sieve, bot, input, func, type, args):
@ -113,8 +113,8 @@ class Handler(object):
def __init__(self, bot, func):
self.func = func
self.bot = bot
self.input_queue = Queue.Queue()
thread.start_new_thread(self.start, ())
self.input_queue = queue.Queue()
_thread.start_new_thread(self.start, ())
def start(self):
uses_db = 'db' in self.func._args
@ -157,14 +157,14 @@ def dispatch(bot, input, kind, func, args, autohelp=False):
if func._thread:
bot.threads[func].put(input)
else:
thread.start_new_thread(run, (bot, func, input))
_thread.start_new_thread(run, (bot, func, input))
def match_command(bot, command):
commands = list(bot.commands)
# do some fuzzy matching
prefix = filter(lambda x: x.startswith(command), commands)
prefix = [x for x in commands if x.startswith(command)]
if len(prefix) == 1:
return prefix[0]
elif prefix and command not in prefix:

View File

@ -24,7 +24,7 @@ class PermissionManager(object):
self.logger.info("Reloading permissions for {}.".format(self.conn.name))
groups = self.conn.config.get("permissions", [])
# work out the permissions and users each group has
for key, value in groups.iteritems():
for key, value in groups.items():
self.group_perms[key] = []
self.group_users[key] = []
for permission in value["perms"]:
@ -32,7 +32,7 @@ class PermissionManager(object):
for user in value["users"]:
self.group_users[key].append(user)
for group, users in self.group_users.iteritems():
for group, users in self.group_users.items():
group_perms = self.group_perms[group]
for perm in group_perms:
self.perm_users[perm] = []

View File

@ -193,11 +193,11 @@ def say(inp, conn=None, chan=None):
the command was used in."""
inp = inp.split(" ")
if inp[0][0] == "#":
message = u" ".join(inp[1:])
out = u"PRIVMSG {} :{}".format(inp[0], message)
message = " ".join(inp[1:])
out = "PRIVMSG {} :{}".format(inp[0], message)
else:
message = u" ".join(inp[0:])
out = u"PRIVMSG {} :{}".format(chan, message)
message = " ".join(inp[0:])
out = "PRIVMSG {} :{}".format(chan, message)
conn.send(out)
@ -213,11 +213,11 @@ def me(inp, conn=None, chan=None):
for x in inp[1:]:
message = message + x + " "
message = message[:-1]
out = u"PRIVMSG {} :\x01ACTION {}\x01".format(inp[0], message)
out = "PRIVMSG {} :\x01ACTION {}\x01".format(inp[0], message)
else:
message = ""
for x in inp[0:]:
message = message + x + " "
message = message[:-1]
out = u"PRIVMSG {} :\x01ACTION {}\x01".format(chan, message)
out = "PRIVMSG {} :\x01ACTION {}\x01".format(chan, message)
conn.send(out)

View File

@ -27,7 +27,7 @@ def sieve_suite(bot, input, func, kind, args):
if _bucket.consume(MESSAGE_COST):
return input
else:
print "pong!"
print("pong!")
return None
@ -39,11 +39,11 @@ def sieve_suite(bot, input, func, kind, args):
acl = conn.config.get('acls', {}).get(func.__name__)
if acl:
if 'deny-except' in acl:
allowed_channels = map(unicode.lower, acl['deny-except'])
allowed_channels = list(map(str.lower, acl['deny-except']))
if input.chan.lower() not in allowed_channels:
return None
if 'allow-except' in acl:
denied_channels = map(unicode.lower, acl['allow-except'])
denied_channels = list(map(str.lower, acl['allow-except']))
if input.chan.lower() in denied_channels:
return None

View File

@ -5,21 +5,21 @@ from util import http, hook
exchanges = {
"blockchain": {
"api_url": "https://blockchain.info/ticker",
"func": lambda data: u"Blockchain // Buy: \x0307${:,.2f}\x0f -"
u" Sell: \x0307${:,.2f}\x0f".format(data["USD"]["buy"], data["USD"]["sell"])
"func": lambda data: "Blockchain // Buy: \x0307${:,.2f}\x0f -"
" Sell: \x0307${:,.2f}\x0f".format(data["USD"]["buy"], data["USD"]["sell"])
},
"coinbase": {
"api_url": "https://coinbase.com/api/v1/prices/spot_rate",
"func": lambda data: u"Coinbase // Current: \x0307${:,.2f}\x0f".format(float(data['amount']))
"func": lambda data: "Coinbase // Current: \x0307${:,.2f}\x0f".format(float(data['amount']))
},
"bitpay": {
"api_url": "https://bitpay.com/api/rates",
"func": lambda data: u"Bitpay // Current: \x0307${:,.2f}\x0f".format(data[0]['rate'])
"func": lambda data: "Bitpay // Current: \x0307${:,.2f}\x0f".format(data[0]['rate'])
},
"bitstamp": {
"api_url": "https://www.bitstamp.net/api/ticker/",
"func": lambda data: u"BitStamp // Current: \x0307${:,.2f}\x0f - High: \x0307${:,.2f}\x0f -"
u" Low: \x0307${:,.2f}\x0f - Volume: {:,.2f} BTC".format(float(data['last']),
"func": lambda data: "BitStamp // Current: \x0307${:,.2f}\x0f - High: \x0307${:,.2f}\x0f -"
" Low: \x0307${:,.2f}\x0f - Volume: {:,.2f} BTC".format(float(data['last']),
float(data['high']),
float(data['low']),
float(data['volume']))

View File

@ -5,4 +5,4 @@ from sqlalchemy.orm import scoped_session, sessionmaker
@hook.command
def dbtest(inp, db=None):
print db
print(db)

View File

@ -18,15 +18,15 @@ split_re = re.compile(r'([\d+-]*)d?(F|\d*)', re.I)
def n_rolls(count, n):
"""roll an n-sided die count times"""
if n == "F":
return [random.randint(-1, 1) for x in xrange(min(count, 100))]
return [random.randint(-1, 1) for x in range(min(count, 100))]
if n < 2: # it's a coin
if count < 100:
return [random.randint(0, 1) for x in xrange(count)]
return [random.randint(0, 1) for x in range(count)]
else: # fake it
return [int(random.normalvariate(.5 * count, (.75 * count) ** .5))]
else:
if count < 100:
return [random.randint(1, n) for x in xrange(count)]
return [random.randint(1, n) for x in range(count)]
else: # fake it
return [int(random.normalvariate(.5 * (1 + n) * count,
(((n + 1) * (2 * n + 1) / 6. -
@ -75,7 +75,7 @@ def dice(inp):
try:
if count > 0:
d = n_rolls(count, side)
rolls += map(str, d)
rolls += list(map(str, d))
total += sum(d)
else:
d = n_rolls(-count, side)

View File

@ -41,7 +41,7 @@ def define(inp):
for article in sections:
result += article[0]
if len(article) > 2:
result += u' '.join(u'{}. {}'.format(n + 1, section)
result += ' '.join('{}. {}'.format(n + 1, section)
for n, section in enumerate(article[1:]))
else:
result += article[1] + ' '

View File

@ -1,4 +1,4 @@
import urlparse
import urllib.parse
from util import hook, http
@ -10,7 +10,7 @@ def down(inp):
if 'http://' not in inp:
inp = 'http://' + inp
inp = 'http://' + urlparse.urlparse(inp).netloc
inp = 'http://' + urllib.parse.urlparse(inp).netloc
# http://mail.python.org/pipermail/python-list/2006-December/589854.html
try:

View File

@ -1,4 +1,4 @@
from urllib import quote_plus
from urllib.parse import quote_plus
from util import hook, http
@ -44,7 +44,7 @@ def bancount(inp):
services = request["stats"]["service"]
out = []
for service, ban_count in services.items():
for service, ban_count in list(services.items()):
if ban_count != 0:
out.append("{}: \x02{}\x02".format(service, ban_count))
else:

View File

@ -1,7 +1,7 @@
import os.path
import json
import gzip
from StringIO import StringIO
from io import StringIO
import pygeoip

View File

@ -1,5 +1,5 @@
import json
import urllib2
import urllib.request, urllib.error, urllib.parse
from util import hook, http
@ -43,12 +43,12 @@ def ghissues(inp):
number = False
try:
data = json.loads(http.open(url).read())
print url
print(url)
if not number:
try:
data = data[0]
except IndexError:
print data
print(data)
return "Repo has no open issues"
except ValueError:
return "Invalid data returned. Check arguments (.github issues username/repo [number]"
@ -56,9 +56,9 @@ def ghissues(inp):
fmt1 = "Issue: #%s (%s) by %s: %s %s" # (number, state, user.login, title, gitio.gitio(data.url))
number = data["number"]
if data["state"] == "open":
state = u"\x033\x02OPEN\x02\x0f"
state = "\x033\x02OPEN\x02\x0f"
else:
state = u"\x034\x02CLOSED\x02\x0f by {}".format(data["closed_by"]["login"])
state = "\x034\x02CLOSED\x02\x0f by {}".format(data["closed_by"]["login"])
user = data["user"]["login"]
title = data["title"]
summary = truncate(data["body"])
@ -93,12 +93,12 @@ def gitio(inp):
url = 'url=' + str(url)
if code:
url = url + '&code=' + str(code)
req = urllib2.Request(url='http://git.io', data=url)
req = urllib.request.Request(url='http://git.io', data=url)
# try getting url, catch http error
try:
f = urllib2.urlopen(req)
except urllib2.HTTPError:
f = urllib.request.urlopen(req)
except urllib.error.HTTPError:
return "Failed to get URL!"
urlinfo = str(f.info())
@ -110,7 +110,7 @@ def gitio(inp):
if row.find("Location") != -1:
location = row
print status
print(status)
if not "201" in status:
return "Failed to get URL!"

View File

@ -48,4 +48,4 @@ def google(inp):
content = http.html.fromstring(content).text_content()
content = text.truncate_str(content, 150)
return u'{} -- \x02{}\x02: "{}"'.format(result['unescapedUrl'], title, content)
return '{} -- \x02{}\x02: "{}"'.format(result['unescapedUrl'], title, content)

View File

@ -3,7 +3,7 @@ A Google API key is required and retrieved from the bot config file.
Since December 1, 2011, the Google Translate API is a paid service only.
"""
import htmlentitydefs
import html.entities
import re
from util import hook, http
@ -22,15 +22,15 @@ def unescape(text):
# character reference
try:
if text[:3] == "&#x":
return unichr(int(text[3:-1], 16))
return chr(int(text[3:-1], 16))
else:
return unichr(int(text[2:-1]))
return chr(int(text[2:-1]))
except ValueError:
pass
else:
# named entity
try:
text = unichr(htmlentitydefs.name2codepoint[text[1:-1]])
text = chr(html.entities.name2codepoint[text[1:-1]])
except KeyError:
pass
return text # leave as is
@ -83,7 +83,7 @@ def translate(inp, bot=None):
if not api_key:
return "This command requires a paid API key."
args = inp.split(u' ', 2)
args = inp.split(' ', 2)
try:
if len(args) >= 2:
@ -100,7 +100,7 @@ def translate(inp, bot=None):
return goog_trans(api_key, args[1] + ' ' + args[2], sl, 'en')
return goog_trans(api_key, args[2], sl, tl)
return goog_trans(api_key, inp, '', 'en')
except IOError, e:
except IOError as e:
return e

View File

@ -1,5 +1,5 @@
from util import hook
from urllib import unquote
from urllib.parse import unquote
@hook.command(autohelp=False)
def googleurl(inp, db=None, nick=None):

View File

@ -10,7 +10,7 @@ def help_command(inp, notice=None, conn=None, bot=None):
funcs = {}
disabled = bot.config.get('disabled_plugins', [])
disabled_comm = bot.config.get('disabled_commands', [])
for command, (func, args) in bot.commands.iteritems():
for command, (func, args) in bot.commands.items():
fn = re.match(r'^plugins.(.+).py$', func._filename)
if fn.group(1).lower() not in disabled:
if command not in disabled_comm:
@ -21,7 +21,7 @@ def help_command(inp, notice=None, conn=None, bot=None):
else:
funcs[func] = command
commands = dict((value, key) for key, value in funcs.iteritems())
commands = dict((value, key) for key, value in funcs.items())
if not inp:
out = [""]

View File

@ -46,7 +46,7 @@ def horoscope(inp, db=None, notice=None, nick=None):
title = soup.find_all('h1', {'class': 'h1b'})[1]
horoscope_text = soup.find('div', {'class': 'fontdef1'})
result = u"\x02%s\x02 %s" % (title, horoscope_text)
result = "\x02%s\x02 %s" % (title, horoscope_text)
result = text.strip_html(result)
#result = unicode(result, "utf8").replace('flight ','')

View File

@ -1,4 +1,4 @@
from urllib import urlencode
from urllib.parse import urlencode
import re
from util import hook, http, timeformat

View File

@ -1,4 +1,4 @@
import urlparse
import urllib.parse
from util import hook, http, urlnorm
@ -8,14 +8,15 @@ def isup(inp):
"""isup -- uses isup.me to see if a site is up or not"""
# slightly overcomplicated, esoteric URL parsing
scheme, auth, path, query, fragment = urlparse.urlsplit(inp.strip())
scheme, auth, path, query, fragment = urllib.parse.urlsplit(inp.strip())
domain = auth.encode('utf-8') or path.encode('utf-8')
url = urlnorm.normalize(domain, assume_scheme="http")
try:
soup = http.get_soup('http://isup.me/' + domain)
except http.HTTPError, http.URLError:
except http.HTTPError as xxx_todo_changeme:
http.URLError = xxx_todo_changeme
return "Could not get status."
content = soup.find('div').text.strip()

View File

@ -6,7 +6,7 @@ from util import hook, web, http
def lmgtfy(inp):
"""lmgtfy [phrase] - Posts a google link for the specified phrase"""
link = u"http://lmgtfy.com/?q={}".format(http.quote_plus(inp))
link = "http://lmgtfy.com/?q={}".format(http.quote_plus(inp))
try:
return web.isgd(link)

View File

@ -111,6 +111,6 @@ def log(paraml, input=None, bot=None):
fd = get_log_fd(bot.data_dir, input.server, input.chan)
fd.write(timestamp + ' ' + beau + '\n')
out = "{} {} {}".format(timestamp, input.chan, beau.encode('utf8', 'ignore'))
out = "{} {} {}".format(timestamp, input.chan, beau)
bot.logger.debug(out)

View File

@ -1,7 +1,7 @@
# metacritic.com scraper
import re
from urllib2 import HTTPError
from urllib.error import HTTPError
from util import hook, http

View File

@ -56,7 +56,7 @@ def plugin_random():
while not results:
plugin_number = random.randint(1, count_total)
print "trying {}".format(plugin_number)
print("trying {}".format(plugin_number))
try:
results = http.get_json(random_url.format(plugin_number))
except (http.HTTPError, http.URLError) as e:
@ -84,7 +84,7 @@ def format_output(data):
description = text.truncate_str(data['description'], 30)
url = data['website']
authors = data['authors'][0]
authors = authors[0] + u"\u200b" + authors[1:]
authors = authors[0] + "\u200b" + authors[1:]
stage = data['stage']
current_version = data['versions'][0]
@ -97,11 +97,11 @@ def format_output(data):
link = web.try_isgd(current_version['link'])
if description:
line_a = u"\x02{}\x02, by \x02{}\x02 - {} - ({}) \x02{}".format(name, authors, description, stage, url)
line_a = "\x02{}\x02, by \x02{}\x02 - {} - ({}) \x02{}".format(name, authors, description, stage, url)
else:
line_a = u"\x02{}\x02, by \x02{}\x02 ({}) \x02{}".format(name, authors, stage, url)
line_a = "\x02{}\x02, by \x02{}\x02 ({}) \x02{}".format(name, authors, stage, url)
line_b = u"Last release: \x02v{}\x02 for \x02{}\x02 at {} \x02{}\x02".format(version_number, bukkit_versions,
line_b = "Last release: \x02v{}\x02 for \x02{}\x02 at {} \x02{}\x02".format(version_number, bukkit_versions,
last_update, link)
return line_a, line_b

View File

@ -13,12 +13,12 @@ except ImportError:
has_dns = False
mc_colors = [(u'\xa7f', u'\x0300'), (u'\xa70', u'\x0301'), (u'\xa71', u'\x0302'), (u'\xa72', u'\x0303'),
(u'\xa7c', u'\x0304'), (u'\xa74', u'\x0305'), (u'\xa75', u'\x0306'), (u'\xa76', u'\x0307'),
(u'\xa7e', u'\x0308'), (u'\xa7a', u'\x0309'), (u'\xa73', u'\x0310'), (u'\xa7b', u'\x0311'),
(u'\xa71', u'\x0312'), (u'\xa7d', u'\x0313'), (u'\xa78', u'\x0314'), (u'\xa77', u'\x0315'),
(u'\xa7l', u'\x02'), (u'\xa79', u'\x0310'), (u'\xa7o', u'\t'), (u'\xa7m', u'\x13'),
(u'\xa7r', u'\x0f'), (u'\xa7n', u'\x15')]
mc_colors = [('\xa7f', '\x0300'), ('\xa70', '\x0301'), ('\xa71', '\x0302'), ('\xa72', '\x0303'),
('\xa7c', '\x0304'), ('\xa74', '\x0305'), ('\xa75', '\x0306'), ('\xa76', '\x0307'),
('\xa7e', '\x0308'), ('\xa7a', '\x0309'), ('\xa73', '\x0310'), ('\xa7b', '\x0311'),
('\xa71', '\x0312'), ('\xa7d', '\x0313'), ('\xa78', '\x0314'), ('\xa77', '\x0315'),
('\xa7l', '\x02'), ('\xa79', '\x0310'), ('\xa7o', '\t'), ('\xa7m', '\x13'),
('\xa7r', '\x0f'), ('\xa7n', '\x15')]
## EXCEPTIONS
@ -98,9 +98,9 @@ def mcping_modern(host, port):
try:
version = data["version"]["name"]
try:
desc = u" ".join(data["description"]["text"].split())
desc = " ".join(data["description"]["text"].split())
except TypeError:
desc = u" ".join(data["description"].split())
desc = " ".join(data["description"].split())
max_players = data["players"]["max"]
online = data["players"]["online"]
except Exception as e:
@ -136,10 +136,10 @@ def mcping_legacy(host, port):
length = struct.unpack('!h', sock.recv(2))[0]
values = sock.recv(length * 2).decode('utf-16be')
data = values.split(u'\x00') # try to decode data using new format
data = values.split('\x00') # try to decode data using new format
if len(data) == 1:
# failed to decode data, server is using old format
data = values.split(u'\xa7')
data = values.split('\xa7')
output = {
"motd": format_colors(" ".join(data[0].split())),
"motd_raw": data[0],
@ -199,17 +199,17 @@ def parse_input(inp):
def format_colors(motd):
for original, replacement in mc_colors:
motd = motd.replace(original, replacement)
motd = motd.replace(u"\xa7k", "")
motd = motd.replace("\xa7k", "")
return motd
def format_output(data):
if data["version"]:
return u"{motd}\x0f - {version}\x0f - {players}/{players_max}" \
u" players.".format(**data).replace("\n", u"\x0f - ")
return "{motd}\x0f - {version}\x0f - {players}/{players_max}" \
" players.".format(**data).replace("\n", "\x0f - ")
else:
return u"{motd}\x0f - {players}/{players_max}" \
u" players.".format(**data).replace("\n", u"\x0f - ")
return "{motd}\x0f - {players}/{players_max}" \
" players.".format(**data).replace("\n", "\x0f - ")
@hook.command

View File

@ -21,7 +21,7 @@ def mcstatus(inp):
green = []
yellow = []
red = []
for server, status in data.items():
for server, status in list(data.items()):
if status == "green":
green.append(server)
elif status == "yellow":

View File

@ -87,15 +87,15 @@ def mcuser(inp):
profile["lt"] = ", legacy" if profile["legacy"] else ""
if profile["paid"]:
return u"The account \x02{name}\x02 ({id}{lt}) exists. It is a \x02paid\x02" \
u" account.".format(**profile)
return "The account \x02{name}\x02 ({id}{lt}) exists. It is a \x02paid\x02" \
" account.".format(**profile)
else:
return u"The account \x02{name}\x02 ({id}{lt}) exists. It \x034\x02is NOT\x02\x0f a paid" \
u" account.".format(**profile)
return "The account \x02{name}\x02 ({id}{lt}) exists. It \x034\x02is NOT\x02\x0f a paid" \
" account.".format(**profile)
elif name_status == "free":
return u"The account \x02{}\x02 does not exist.".format(user)
return "The account \x02{}\x02 does not exist.".format(user)
elif name_status == "invalid":
return u"The name \x02{}\x02 contains invalid characters.".format(user)
return "The name \x02{}\x02 contains invalid characters.".format(user)
else:
# if you see this, panic
return "Unknown Error."

View File

@ -45,7 +45,7 @@ def mcwiki(inp):
summary = " ".join(p.text_content().splitlines())
summary = re.sub("\[\d+\]", "", summary)
summary = text.truncate_str(summary, 200)
return u"{} :: {}".format(summary, url)
return "{} :: {}".format(summary, url)
# this shouldn't happen
return "Unknown Error."

View File

@ -46,18 +46,18 @@ def format_item(item, show_url=True):
tags.append("\x02Featured\x02")
if item["IsShellShockerItem"]:
tags.append(u"\x02SHELL SHOCKER\u00AE\x02")
tags.append("\x02SHELL SHOCKER\u00AE\x02")
# join all the tags together in a comma separated string ("tag1, tag2, tag3")
tag_text = u", ".join(tags)
tag_text = ", ".join(tags)
if show_url:
# create the item URL and shorten it
url = web.try_isgd(ITEM_URL.format(item["NeweggItemNumber"]))
return u"\x02{}\x02 ({}) - {} - {} - {}".format(title, price, rating,
return "\x02{}\x02 ({}) - {} - {} - {}".format(title, price, rating,
tag_text, url)
else:
return u"\x02{}\x02 ({}) - {} - {}".format(title, price, rating,
return "\x02{}\x02 ({}) - {} - {}".format(title, price, rating,
tag_text)

View File

@ -15,7 +15,7 @@ def test(s):
def newgrounds_url(match):
location = match.group(4).split("/")[-1]
if not test(location):
print "Not a valid Newgrounds portal ID. Example: http://www.newgrounds.com/portal/view/593993"
print("Not a valid Newgrounds portal ID. Example: http://www.newgrounds.com/portal/view/593993")
return None
soup = http.get_soup("http://www.newgrounds.com/portal/view/" + location)
@ -31,7 +31,7 @@ def newgrounds_url(match):
# get rating
try:
rating_info = soup.find('dd', {'class': 'star-variable'})['title'].split("Stars &ndash;")[0].strip()
rating = u" - rated \x02{}\x02/\x025.0\x02".format(rating_info)
rating = " - rated \x02{}\x02/\x025.0\x02".format(rating_info)
except:
rating = ""

View File

@ -29,7 +29,7 @@ def password(inp, notice=None):
# add numbers
if "numeric" in inp or "number" in inp:
okay = okay + [str(x) for x in xrange(0, 10)]
okay = okay + [str(x) for x in range(0, 10)]
# add symbols
if "symbol" in inp:

View File

@ -37,4 +37,4 @@ def pre(inp):
return '{} - {}{} - {} ({} ago)'.format(section, name, size, date_string, since)
print pre("top gear")
print(pre("top gear"))

View File

@ -1,4 +1,4 @@
import urllib
import urllib.request, urllib.parse, urllib.error
import json
import re
@ -11,7 +11,7 @@ def getdata(inp, types, api_key, api_secret):
consumer = oauth.Consumer(api_key, api_secret)
client = oauth.Client(consumer)
response = client.request('http://api.rdio.com/1/', 'POST',
urllib.urlencode({'method': 'search', 'query': inp, 'types': types, 'count': '1'}))
urllib.parse.urlencode({'method': 'search', 'query': inp, 'types': types, 'count': '1'}))
data = json.loads(response[1])
return data
@ -34,16 +34,16 @@ def rdio(inp, bot=None):
artist = info['artist']
album = info['album']
url = info['shortUrl']
return u"\x02{}\x02 by \x02{}\x02 - {} {}".format(name, artist, album, url)
return "\x02{}\x02 by \x02{}\x02 - {} {}".format(name, artist, album, url)
elif 'artist' in info and not 'album' in info: # Album
name = info['name']
artist = info['artist']
url = info['shortUrl']
return u"\x02{}\x02 by \x02{}\x02 - {}".format(name, artist, url)
return "\x02{}\x02 by \x02{}\x02 - {}".format(name, artist, url)
else: # Artist
name = info['name']
url = info['shortUrl']
return u"\x02{}\x02 - {}".format(name, url)
return "\x02{}\x02 - {}".format(name, url)
@hook.command
@ -62,7 +62,7 @@ def rdiot(inp, bot=None):
artist = info['artist']
album = info['album']
url = info['shortUrl']
return u"\x02{}\x02 by \x02{}\x02 - {} - {}".format(name, artist, album, url)
return "\x02{}\x02 by \x02{}\x02 - {} - {}".format(name, artist, album, url)
@hook.command
@ -79,7 +79,7 @@ def rdioar(inp, bot=None):
return "No results."
name = info['name']
url = info['shortUrl']
return u"\x02{}\x02 - {}".format(name, url)
return "\x02{}\x02 - {}".format(name, url)
@hook.command
@ -97,7 +97,7 @@ def rdioal(inp, bot=None):
name = info['name']
artist = info['artist']
url = info['shortUrl']
return u"\x02{}\x02 by \x02{}\x02 - {}".format(name, artist, url)
return "\x02{}\x02 by \x02{}\x02 - {}".format(name, artist, url)
rdio_re = (r'(.*:)//(rd.io|www.rdio.com|rdio.com)(:[0-9]+)?(.*)', re.I)
@ -113,7 +113,7 @@ def rdio_url(match, bot=None):
consumer = oauth.Consumer(api_key, api_secret)
client = oauth.Client(consumer)
response = client.request('http://api.rdio.com/1/', 'POST',
urllib.urlencode({'method': 'getObjectFromUrl', 'url': url}))
urllib.parse.urlencode({'method': 'getObjectFromUrl', 'url': url}))
data = json.loads(response[1])
info = data['result']
if 'name' in info:
@ -121,11 +121,11 @@ def rdio_url(match, bot=None):
name = info['name']
artist = info['artist']
album = info['album']
return u"Rdio track: \x02{}\x02 by \x02{}\x02 - {}".format(name, artist, album)
return "Rdio track: \x02{}\x02 by \x02{}\x02 - {}".format(name, artist, album)
elif 'artist' in info and not 'album' in info: # Album
name = info['name']
artist = info['artist']
return u"Rdio album: \x02{}\x02 by \x02{}\x02".format(name, artist)
return "Rdio album: \x02{}\x02 by \x02{}\x02".format(name, artist)
else: # Artist
name = info['name']
return u"Rdio artist: \x02{}\x02".format(name)
return "Rdio artist: \x02{}\x02".format(name)

View File

@ -22,7 +22,7 @@ def reddit_url(match):
timeago = thread.xpath("//div[@id='siteTable']//p[@class='tagline']/time/text()")[0]
comments = thread.xpath("//div[@id='siteTable']//a[@class='comments']/text()")[0]
return u'\x02{}\x02 - posted by \x02{}\x02 {} ago - {} upvotes, {} downvotes - {}'.format(
return '\x02{}\x02 - posted by \x02{}\x02 {} ago - {} upvotes, {} downvotes - {}'.format(
title, author, timeago, upvotes, downvotes, comments)
@ -74,6 +74,6 @@ def reddit(inp):
else:
item["warning"] = ""
return u"\x02{title} : {subreddit}\x02 - posted by \x02{author}\x02" \
return "\x02{title} : {subreddit}\x02 - posted by \x02{author}\x02" \
" {timesince} ago - {ups} upvotes, {downs} downvotes -" \
" {link}{warning}".format(**item)

View File

@ -35,5 +35,5 @@ def rottentomatoes(inp, bot=None):
fresh = critics_score * review_count / 100
rotten = review_count - fresh
return u"{} - Critics Rating: \x02{}%\x02 ({} liked, {} disliked) " \
return "{} - Critics Rating: \x02{}%\x02 ({} liked, {} disliked) " \
"Audience Rating: \x02{}%\x02 - {}".format(title, critics_score, fresh, rotten, audience_score, url)

View File

@ -31,7 +31,7 @@ def rss(inp, message=None):
link = web.isgd(row["link"])
except (web.ShortenError, http.HTTPError, http.URLError):
link = row["link"]
message(u"{} - {}".format(title, link))
message("{} - {}".format(title, link))
@hook.command(autohelp=False)

View File

@ -1,4 +1,4 @@
from urllib import urlencode
from urllib.parse import urlencode
import re
from util import hook, http, web, text
@ -13,17 +13,17 @@ def soundcloud(url, api_key):
data = http.get_json(api_url + '/resolve.json?' + urlencode({'url': url, 'client_id': api_key}))
if data['description']:
desc = u": {} ".format(text.truncate_str(data['description'], 50))
desc = ": {} ".format(text.truncate_str(data['description'], 50))
else:
desc = ""
if data['genre']:
genre = u"- Genre: \x02{}\x02 ".format(data['genre'])
genre = "- Genre: \x02{}\x02 ".format(data['genre'])
else:
genre = ""
url = web.try_isgd(data['permalink_url'])
return u"SoundCloud track: \x02{}\x02 by \x02{}\x02 {}{}- {} plays, {} downloads, {} comments - {}".format(
return "SoundCloud track: \x02{}\x02 by \x02{}\x02 {}{}- {} plays, {} downloads, {} comments - {}".format(
data['title'], data['user']['username'], desc, genre, data['playback_count'], data['download_count'],
data['comment_count'], url)
@ -32,7 +32,7 @@ def soundcloud(url, api_key):
def soundcloud_url(match, bot=None):
api_key = bot.config.get("api_keys", {}).get("soundcloud")
if not api_key:
print "Error: no api key set"
print("Error: no api key set")
return None
url = match.group(1).split(' ')[-1] + "//" + (match.group(2) if match.group(2) else "") + match.group(3) + \
match.group(4).split(' ')[0]
@ -43,7 +43,7 @@ def soundcloud_url(match, bot=None):
def sndsc_url(match, bot=None):
api_key = bot.config.get("api_keys", {}).get("soundcloud")
if not api_key:
print "Error: no api key set"
print("Error: no api key set")
return None
url = match.group(1).split(' ')[-1] + "//" + (match.group(2) if match.group(2) else "") + match.group(3) + \
match.group(4).split(' ')[0]

View File

@ -1,5 +1,5 @@
import re
from urllib import urlencode
from urllib.parse import urlencode
from util import hook, http, web
@ -45,7 +45,7 @@ def spotify(inp):
except IndexError:
return "Could not find track."
url = sptfy(gateway.format(type, id))
return u"\x02{}\x02 by \x02{}\x02 - \x02{}\x02".format(data["tracks"][0]["name"],
return "\x02{}\x02 by \x02{}\x02 - \x02{}\x02".format(data["tracks"][0]["name"],
data["tracks"][0]["artists"][0]["name"], url)
@ -62,7 +62,7 @@ def spalbum(inp):
except IndexError:
return "Could not find album."
url = sptfy(gateway.format(type, id))
return u"\x02{}\x02 by \x02{}\x02 - \x02{}\x02".format(data["albums"][0]["name"],
return "\x02{}\x02 by \x02{}\x02 - \x02{}\x02".format(data["albums"][0]["name"],
data["albums"][0]["artists"][0]["name"], url)
@ -79,7 +79,7 @@ def spartist(inp):
except IndexError:
return "Could not find artist."
url = sptfy(gateway.format(type, id))
return u"\x02{}\x02 - \x02{}\x02".format(data["artists"][0]["name"], url)
return "\x02{}\x02 - \x02{}\x02".format(data["artists"][0]["name"], url)
@hook.regex(*http_re)
@ -94,13 +94,13 @@ def spotify_url(match):
name = data["track"]["name"]
artist = data["track"]["artists"][0]["name"]
album = data["track"]["album"]["name"]
return u"Spotify Track: \x02{}\x02 by \x02{}\x02 from the album \x02{}\x02 - \x02{}\x02".format(name, artist,
return "Spotify Track: \x02{}\x02 by \x02{}\x02 from the album \x02{}\x02 - \x02{}\x02".format(name, artist,
album, sptfy(
gateway.format(type, spotify_id)))
elif type == "artist":
return u"Spotify Artist: \x02{}\x02 - \x02{}\x02".format(data["artist"]["name"],
return "Spotify Artist: \x02{}\x02 - \x02{}\x02".format(data["artist"]["name"],
sptfy(gateway.format(type, spotify_id)))
elif type == "album":
return u"Spotify Album: \x02{}\x02 - \x02{}\x02 - \x02{}\x02".format(data["album"]["artist"],
return "Spotify Album: \x02{}\x02 - \x02{}\x02 - \x02{}\x02".format(data["album"]["artist"],
data["album"]["name"],
sptfy(gateway.format(type, spotify_id)))

View File

@ -57,8 +57,8 @@ def get_steam_info(url):
data["price"] = soup.find('div', {'class': 'game_purchase_price price'}).text.strip()
return u"\x02{name}\x02: {desc}, \x02Genre\x02: {genre}, \x02Release Date\x02: {release date}," \
u" \x02Price\x02: {price}".format(**data)
return "\x02{name}\x02: {desc}, \x02Genre\x02: {genre}, \x02Release Date\x02: {release date}," \
" \x02Price\x02: {price}".format(**data)
@hook.regex(*steam_re)

View File

@ -1,5 +1,5 @@
import csv
import StringIO
import io
from util import hook, http, text
@ -29,7 +29,7 @@ def is_number(s):
def unicode_dictreader(utf8_data, **kwargs):
csv_reader = csv.DictReader(utf8_data, **kwargs)
for row in csv_reader:
yield dict([(key.lower(), unicode(value, 'utf-8')) for key, value in row.iteritems()])
yield dict([(key.lower(), str(value, 'utf-8')) for key, value in row.items()])
@hook.command('sc')
@ -66,7 +66,7 @@ def steamcalc(inp, reply=None):
except (http.HTTPError, http.URLError):
return "Could not get data for this user."
csv_data = StringIO.StringIO(request) # we use StringIO because CSV can't read a string
csv_data = io.StringIO(request) # we use StringIO because CSV can't read a string
reader = unicode_dictreader(csv_data)
# put the games in a list

View File

@ -22,9 +22,9 @@ def stock(inp):
quote['color'] = "3"
quote['PercentChange'] = 100 * change / (price - change)
print quote
print(quote)
return u"\x02{Name}\x02 (\x02{symbol}\x02) - {LastTradePriceOnly} " \
return "\x02{Name}\x02 (\x02{symbol}\x02) - {LastTradePriceOnly} " \
"\x03{color}{Change} ({PercentChange:.2f}%)\x03 " \
"Day Range: {DaysRange} " \
"MCAP: {MarketCapitalization}".format(**quote)

View File

@ -18,7 +18,7 @@ def suggest(inp):
if not suggestions:
return 'no suggestions found'
out = u", ".join(suggestions)
out = ", ".join(suggestions)
# defuckify text
soup = BeautifulSoup(out)

View File

@ -20,4 +20,4 @@ def title(inp):
if not page_title:
return "Could not find title."
return u"{} [{}]".format(page_title, real_url)
return "{} [{}]".format(page_title, real_url)

View File

@ -44,7 +44,7 @@ def get_episode_info(episode, api_key):
first_aired = episode.findtext("FirstAired")
try:
air_date = datetime.date(*map(int, first_aired.split('-')))
air_date = datetime.date(*list(map(int, first_aired.split('-'))))
except (ValueError, TypeError):
return None

View File

@ -1,5 +1,5 @@
import re
from HTMLParser import HTMLParser
from html.parser import HTMLParser
from util import hook, http
@ -36,7 +36,7 @@ def multitwitch_url(match):
out = ""
for i in usernames:
if not test(i):
print "Not a valid username"
print("Not a valid username")
return None
if out == "":
out = twitch_lookup(i)
@ -50,7 +50,7 @@ def twitch_url(match):
bit = match.group(4).split("#")[0]
location = "/".join(bit.split("/")[1:])
if not test(location):
print "Not a valid username"
print("Not a valid username")
return None
return twitch_lookup(location)
@ -100,9 +100,9 @@ def twitch_lookup(location):
title = data['title']
playing = data['meta_game']
viewers = "\x033\x02Online now!\x02\x0f " + str(data["channel_count"]) + " viewer"
print viewers
print(viewers)
viewers = viewers + "s" if not " 1 view" in viewers else viewers
print viewers
print(viewers)
return h.unescape(fmt.format(title, channel, playing, viewers))
else:
try:

View File

@ -42,13 +42,13 @@ def twitter_url(match, bot=None):
text = " ".join(tweet.text.split())
if user.verified:
prefix = u"\u2713"
prefix = "\u2713"
else:
prefix = ""
time = timesince.timesince(tweet.created_at, datetime.utcnow())
return u"{}@\x02{}\x02 ({}): {} ({} ago)".format(prefix, user.screen_name, user.name, text, time)
return "{}@\x02{}\x02 ({}): {} ({} ago)".format(prefix, user.screen_name, user.name, text, time)
@hook.command("tw")
@ -71,7 +71,7 @@ def twitter(inp, bot=None):
if e[0][0]['code'] == 34:
return "Could not find tweet."
else:
return u"Error {}: {}".format(e[0][0]['code'], e[0][0]['message'])
return "Error {}: {}".format(e[0][0]['code'], e[0][0]['message'])
user = tweet.user
@ -95,21 +95,21 @@ def twitter(inp, bot=None):
if e[0][0]['code'] == 34:
return "Could not find user."
else:
return u"Error {}: {}".format(e[0][0]['code'], e[0][0]['message'])
return "Error {}: {}".format(e[0][0]['code'], e[0][0]['message'])
# get the users tweets
user_timeline = api.user_timeline(id=user.id, count=tweet_number + 1)
# if the timeline is empty, return an error
if not user_timeline:
return u"The user \x02{}\x02 has no tweets.".format(user.screen_name)
return "The user \x02{}\x02 has no tweets.".format(user.screen_name)
# grab the newest tweet from the users timeline
try:
tweet = user_timeline[tweet_number]
except IndexError:
tweet_count = len(user_timeline)
return u"The user \x02{}\x02 only has \x02{}\x02 tweets.".format(user.screen_name, tweet_count)
return "The user \x02{}\x02 only has \x02{}\x02 tweets.".format(user.screen_name, tweet_count)
elif re.match(r'^#\w+$', inp):
# user is searching by hashtag
@ -127,13 +127,13 @@ def twitter(inp, bot=None):
text = " ".join(tweet.text.split())
if user.verified:
prefix = u"\u2713"
prefix = "\u2713"
else:
prefix = ""
time = timesince.timesince(tweet.created_at, datetime.utcnow())
return u"{}@\x02{}\x02 ({}): {} ({} ago)".format(prefix, user.screen_name, user.name, text, time)
return "{}@\x02{}\x02 ({}): {} ({} ago)".format(prefix, user.screen_name, user.name, text, time)
@hook.command("twinfo")
@ -155,20 +155,20 @@ def twuser(inp, bot=None):
return "Unknown error"
if user.verified:
prefix = u"\u2713"
prefix = "\u2713"
else:
prefix = ""
if user.location:
loc_str = u" is located in \x02{}\x02 and".format(user.location)
loc_str = " is located in \x02{}\x02 and".format(user.location)
else:
loc_str = ""
if user.description:
desc_str = u" The users description is \"{}\"".format(user.description)
desc_str = " The users description is \"{}\"".format(user.description)
else:
desc_str = ""
return u"{}@\x02{}\x02 ({}){} has \x02{:,}\x02 tweets and \x02{:,}\x02 followers.{}" \
return "{}@\x02{}\x02 ({}){} has \x02{:,}\x02 tweets and \x02{:,}\x02 followers.{}" \
"".format(prefix, user.screen_name, user.name, loc_str, user.statuses_count, user.followers_count,
desc_str)

View File

@ -41,7 +41,7 @@ def urban(inp):
url = definitions[id_num - 1]['permalink']
output = u"[%i/%i] %s :: %s" % \
output = "[%i/%i] %s :: %s" % \
(id_num, len(definitions), definition, url)
return output

View File

@ -161,9 +161,9 @@ def munge(inp):
@hook.command
def rainbow(inp):
inp = unicode(inp)
inp = str(inp)
inp = strip(inp)
col = colors.items()
col = list(colors.items())
out = ""
l = len(colors)
for i, t in enumerate(inp):
@ -176,8 +176,8 @@ def rainbow(inp):
@hook.command
def wrainbow(inp):
inp = unicode(inp)
col = colors.items()
inp = str(inp)
col = list(colors.items())
inp = strip(inp).split(' ')
out = []
l = len(colors)

View File

@ -1,5 +1,5 @@
import json
import urllib2
import urllib.request, urllib.error, urllib.parse
from util import hook, http, web
@ -8,14 +8,14 @@ def get_sound_info(game, search):
search = search.replace(" ", "+")
try:
data = http.get_json("http://p2sounds.blha303.com.au/search/%s/%s?format=json" % (game, search))
except urllib2.HTTPError as e:
except urllib.error.HTTPError as e:
return "Error: " + json.loads(e.read())["error"]
items = []
for item in data["items"]:
if "music" in game:
textsplit = item["text"].split('"')
text = ""
for i in xrange(len(textsplit)):
for i in range(len(textsplit)):
if i % 2 != 0 and i < 6:
if text:
text += " / " + textsplit[i]

View File

@ -36,9 +36,9 @@ def wolframalpha(inp, bot=None):
if subpod:
results.append(subpod)
if results:
pod_texts.append(title + u': ' + u', '.join(results))
pod_texts.append(title + ': ' + ', '.join(results))
ret = u' - '.join(pod_texts)
ret = ' - '.join(pod_texts)
if not pod_texts:
return 'No results.'
@ -46,7 +46,7 @@ def wolframalpha(inp, bot=None):
ret = re.sub(r'\\(.)', r'\1', ret)
def unicode_sub(match):
return unichr(int(match.group(1), 16))
return chr(int(match.group(1), 16))
ret = re.sub(r'\\:([0-9a-z]{4})', unicode_sub, ret)
@ -55,4 +55,4 @@ def wolframalpha(inp, bot=None):
if not ret:
return 'No results.'
return u"{} - {}".format(ret, short_url)
return "{} - {}".format(ret, short_url)

View File

@ -25,7 +25,7 @@ def xkcd_search(term):
if result:
url = result.find('div', {'class': 'tinylink'}).text
xkcd_id = url[:-1].split("/")[-1]
print xkcd_id
print(xkcd_id)
return xkcd_info(xkcd_id, url=True)
else:
return "No results found!"

View File

@ -13,4 +13,4 @@ def answer(inp):
# we split the answer and .join() it to remove newlines/extra spaces
answer_text = text.truncate_str(' '.join(result["ChosenAnswer"].split()), 80)
return u'\x02{}\x02 "{}" - {}'.format(result["Subject"], answer_text, short_url)
return '\x02{}\x02 "{}" - {}'.format(result["Subject"], answer_text, short_url)

View File

@ -25,13 +25,13 @@ def get_video_description(video_id):
data = request['data']
out = u'\x02{}\x02'.format(data['title'])
out = '\x02{}\x02'.format(data['title'])
if not data.get('duration'):
return out
length = data['duration']
out += u' - length \x02{}\x02'.format(timeformat.format_time(length, simple=True))
out += ' - length \x02{}\x02'.format(timeformat.format_time(length, simple=True))
if 'ratingCount' in data:
# format
@ -39,12 +39,12 @@ def get_video_description(video_id):
dislikes = plural(data['ratingCount'] - int(data['likeCount']), "dislike")
percent = 100 * float(data['likeCount']) / float(data['ratingCount'])
out += u' - {}, {} (\x02{:.1f}\x02%)'.format(likes,
out += ' - {}, {} (\x02{:.1f}\x02%)'.format(likes,
dislikes, percent)
if 'viewCount' in data:
views = data['viewCount']
out += u' - \x02{:,}\x02 view{}'.format(views, "s"[views == 1:])
out += ' - \x02{:,}\x02 view{}'.format(views, "s"[views == 1:])
try:
uploader = http.get_json(base_url + "users/{}?alt=json".format(data["uploader"]))["entry"]["author"][0]["name"][
@ -53,11 +53,11 @@ def get_video_description(video_id):
uploader = data["uploader"]
upload_time = time.strptime(data['uploaded'], "%Y-%m-%dT%H:%M:%S.000Z")
out += u' - \x02{}\x02 on \x02{}\x02'.format(uploader,
out += ' - \x02{}\x02 on \x02{}\x02'.format(uploader,
time.strftime("%Y.%m.%d", upload_time))
if 'contentRating' in data:
out += u' - \x034NSFW\x02'
out += ' - \x034NSFW\x02'
return out
@ -83,7 +83,7 @@ def youtube(inp):
video_id = request['data']['items'][0]['id']
return get_video_description(video_id) + u" - " + video_url % video_id
return get_video_description(video_id) + " - " + video_url % video_id
@hook.command('ytime')
@ -115,8 +115,8 @@ def youtime(inp):
length_text = timeformat.format_time(length, simple=True)
total_text = timeformat.format_time(total, accuracy=8)
return u'The video \x02{}\x02 has a length of {} and has been viewed {:,} times for ' \
u'a total run time of {}!'.format(data['title'], length_text, views,
return 'The video \x02{}\x02 has a length of {} and has been viewed {:,} times for ' \
'a total run time of {}!'.format(data['title'], length_text, views,
total_text)
@ -134,4 +134,4 @@ def ytplaylist_url(match):
author = soup.find('img', {'class': 'channel-header-profile-image'})['title']
num_videos = soup.find('ul', {'class': 'header-stats'}).findAll('li')[0].text.split(' ')[0]
views = soup.find('ul', {'class': 'header-stats'}).findAll('li')[1].text.split(' ')[0]
return u"\x02%s\x02 - \x02%s\x02 views - \x02%s\x02 videos - \x02%s\x02" % (title, views, num_videos, author)
return "\x02%s\x02 - \x02%s\x02 views - \x02%s\x02 videos - \x02%s\x02" % (title, views, num_videos, author)

View File

@ -2,9 +2,8 @@ sqlalchemy
oauth2
pygeoip
tweepy
BeautifulSoup==3.2.1
lxml==3.1beta1
pyenchant==1.6.5
lxml
pyenchant
pydns>=2.3.6
watchdog
yql

View File

@ -8,7 +8,7 @@ def _hook_add(func, add, name=''):
func._hook.append(add)
if not hasattr(func, '_filename'):
func._filename = func.func_code.co_filename
func._filename = func.__code__.co_filename
if not hasattr(func, '_args'):
argspec = inspect.getargspec(func)
@ -39,7 +39,7 @@ def _hook_add(func, add, name=''):
def sieve(func):
if func.func_code.co_argcount != 5:
if func.__code__.co_argcount != 5:
raise ValueError(
'sieves must take 5 arguments: (bot, input, func, type, args)')
_hook_add(func, ['sieve', (func,)])
@ -50,7 +50,7 @@ def command(arg=None, **kwargs):
args = {}
def command_wrapper(func):
args.setdefault('name', func.func_name)
args.setdefault('name', func.__name__)
_hook_add(func, ['command', (func, args)], 'command')
return func
@ -67,7 +67,7 @@ def event(arg=None, **kwargs):
args = kwargs
def event_wrapper(func):
args['name'] = func.func_name
args['name'] = func.__name__
args.setdefault('events', ['*'])
_hook_add(func, ['event', (func, args)], 'event')
return func
@ -89,7 +89,7 @@ def regex(regex, flags=0, **kwargs):
args = kwargs
def regex_wrapper(func):
args['name'] = func.func_name
args['name'] = func.__name__
args['regex'] = regex
args['re'] = re.compile(regex, flags)
_hook_add(func, ['regex', (func, args)], 'regex')

View File

@ -1,18 +1,15 @@
# convenience wrapper for urllib2 & friends
import cookielib
import http.cookiejar
import json
import urllib
import urllib2
import urlparse
from urllib import quote, quote_plus as _quote_plus
import urllib.request, urllib.parse, urllib.error
import urllib.request, urllib.error, urllib.parse
import urllib.parse
from urllib.parse import quote, quote_plus as _quote_plus
from lxml import etree, html
from bs4 import BeautifulSoup
# used in plugins that import this
from urllib2 import URLError, HTTPError
from urllib.error import URLError, HTTPError
ua_cloudbot = 'Cloudbot/DEV http://github.com/CloudDev/CloudBot'
@ -24,7 +21,7 @@ ua_internetexplorer = 'Mozilla/4.0 (compatible; MSIE 7.0; Windows NT 5.1)'
ua_chrome = 'Mozilla/5.0 (X11; Linux i686) AppleWebKit/537.4 (KHTML, ' \
'like Gecko) Chrome/22.0.1229.79 Safari/537.4'
jar = cookielib.CookieJar()
jar = http.cookiejar.CookieJar()
def get(*args, **kwargs):
@ -63,13 +60,13 @@ def open(url, query_params=None, user_agent=None, post_data=None,
url = prepare_url(url, query_params)
request = urllib2.Request(url, post_data)
request = urllib.request.Request(url, post_data)
if get_method is not None:
request.get_method = lambda: get_method
if headers is not None:
for header_key, header_value in headers.iteritems():
for header_key, header_value in headers.items():
request.add_header(header_key, header_value)
request.add_header('User-Agent', user_agent)
@ -78,9 +75,9 @@ def open(url, query_params=None, user_agent=None, post_data=None,
request.add_header('Referer', referer)
if cookies:
opener = urllib2.build_opener(urllib2.HTTPCookieProcessor(jar))
opener = urllib.request.build_opener(urllib.request.HTTPCookieProcessor(jar))
else:
opener = urllib2.build_opener()
opener = urllib.request.build_opener()
if timeout:
return opener.open(request, timeout=timeout)
@ -90,20 +87,20 @@ def open(url, query_params=None, user_agent=None, post_data=None,
def prepare_url(url, queries):
if queries:
scheme, netloc, path, query, fragment = urlparse.urlsplit(url)
scheme, netloc, path, query, fragment = urllib.parse.urlsplit(url)
query = dict(urlparse.parse_qsl(query))
query = dict(urllib.parse.parse_qsl(query))
query.update(queries)
query = urllib.urlencode(dict((to_utf8(key), to_utf8(value))
for key, value in query.iteritems()))
query = urllib.parse.urlencode(dict((to_utf8(key), to_utf8(value))
for key, value in query.items()))
url = urlparse.urlunsplit((scheme, netloc, path, query, fragment))
url = urllib.parse.urlunsplit((scheme, netloc, path, query, fragment))
return url
def to_utf8(s):
if isinstance(s, unicode):
if isinstance(s, str):
return s.encode('utf8', 'ignore')
else:
return str(s)

View File

@ -1,5 +1,5 @@
import http
import web
from . import http
from . import web
def eval_py(code, paste_multiline=True):

View File

@ -6,8 +6,8 @@
import re
from HTMLParser import HTMLParser
import htmlentitydefs
from html.parser import HTMLParser
import html.entities
class HTMLTextExtractor(HTMLParser):
@ -19,15 +19,15 @@ class HTMLTextExtractor(HTMLParser):
self.result.append(d)
def handle_charref(self, number):
codepoint = int(number[1:], 16) if number[0] in (u'x', u'X') else int(number)
self.result.append(unichr(codepoint))
codepoint = int(number[1:], 16) if number[0] in ('x', 'X') else int(number)
self.result.append(chr(codepoint))
def handle_entityref(self, name):
codepoint = htmlentitydefs.name2codepoint[name]
self.result.append(unichr(codepoint))
codepoint = html.entities.name2codepoint[name]
self.result.append(chr(codepoint))
def get_text(self):
return u''.join(self.result)
return ''.join(self.result)
def strip_html(html):
@ -39,7 +39,7 @@ def strip_html(html):
def munge(text, munge_count=0):
"""munges up text."""
reps = 0
for n in xrange(len(text)):
for n in range(len(text)):
rep = character_replacements.get(text[n])
if rep:
text = text[:n] + rep.decode('utf8') + text[n + 1:]

View File

@ -24,7 +24,7 @@ class TextGenerator(object):
# replace static variables in the template with provided values
if self.variables:
for key, value in self.variables.items():
for key, value in list(self.variables.items()):
text = text.replace("{%s}" % key, value)
# get a list of all text parts we need
@ -33,7 +33,7 @@ class TextGenerator(object):
for required_part in required_parts:
ppart = self.parts[required_part]
# check if the part is a single string or a list
if not isinstance(ppart, basestring):
if not isinstance(ppart, str):
part = random.choice(self.parts[required_part])
else:
part = self.parts[required_part]
@ -43,7 +43,7 @@ class TextGenerator(object):
def generate_strings(self, amount, template=None):
strings = []
for i in xrange(amount):
for i in range(amount):
strings.append(self.generate_string())
return strings

View File

@ -72,7 +72,7 @@ def timesince(d, now=None):
since = delta.days * 24 * 60 * 60 + delta.seconds
if since <= 0:
# d is in the future compared to now, stop processing.
return u'0 ' + 'minutes'
return '0 ' + 'minutes'
for i, (seconds, name) in enumerate(chunks):
count = since // seconds
if count != 0:

View File

@ -25,8 +25,8 @@ __license__ = "Python"
import re
import unicodedata
import urlparse
from urllib import quote, unquote
import urllib.parse
from urllib.parse import quote, unquote
default_port = {
'http': 80,
@ -52,7 +52,7 @@ normalizers = (Normalizer(re.compile(
def normalize(url, assume_scheme=False):
"""Normalize a URL."""
scheme, auth, path, query, fragment = urlparse.urlsplit(url.strip())
scheme, auth, path, query, fragment = urllib.parse.urlsplit(url.strip())
userinfo, host, port = re.search('([^@]*@)?([^:]*):?(.*)', auth).groups()
# Always provide the URI scheme in lowercase characters.
@ -78,7 +78,7 @@ def normalize(url, assume_scheme=False):
# Always use uppercase A-through-F characters when percent-encoding.
# All portions of the URI must be utf-8 encoded NFC from Unicode strings
def clean(string):
string = unicode(unquote(string), 'utf-8', 'replace')
string = str(unquote(string), 'utf-8', 'replace')
return unicodedata.normalize('NFC', string).encode('utf-8')
path = quote(clean(path), "~:/?#[]@!$&'()*+,;=")
@ -118,7 +118,7 @@ def normalize(url, assume_scheme=False):
# For schemes that define a port, use an empty port if the default is
# desired
if port and scheme in default_port.keys():
if port and scheme in list(default_port.keys()):
if port.isdigit():
port = str(int(port))
if int(port) == default_port[scheme]:
@ -130,7 +130,7 @@ def normalize(url, assume_scheme=False):
auth += ":" + port
if url.endswith("#") and query == "" and fragment == "":
path += "#"
normal_url = urlparse.urlunsplit((scheme, auth, path, query,
normal_url = urllib.parse.urlunsplit((scheme, auth, path, query,
fragment)).replace("http:///", "http://")
for norm in normalizers:
m = norm.regex.match(normal_url)

View File

@ -1,16 +1,13 @@
""" web.py - handy functions for web services """
import http
import urlnorm
from . import http
from . import urlnorm
import json
import urllib
import yql
import urllib.request, urllib.parse, urllib.error
short_url = "http://is.gd/create.php"
paste_url = "http://hastebin.com"
yql_env = "http://datatables.org/alltables.env"
YQL = yql.Public()
class ShortenError(Exception):
@ -25,7 +22,7 @@ class ShortenError(Exception):
def isgd(url):
""" shortens a URL with the is.gd API """
url = urlnorm.normalize(url.encode('utf-8'), assume_scheme='http')
params = urllib.urlencode({'format': 'json', 'url': url})
params = urllib.parse.urlencode({'format': 'json', 'url': url})
request = http.get_json("http://is.gd/create.php?%s" % params)
if "errorcode" in request:
@ -46,9 +43,4 @@ def haste(text, ext='txt'):
""" pastes text to a hastebin server """
page = http.get(paste_url + "/documents", post_data=text)
data = json.loads(page)
return ("%s/%s.%s" % (paste_url, data['key'], ext))
def query(query, params={}):
""" runs a YQL query and returns the results """
return YQL.execute(query, params, env=yql_env)
return "{}/{}.{}".format(paste_url, data['key'], ext)