This commit is contained in:
Luke Rogers 2011-11-20 22:23:31 +13:00
commit 37588421f3
100 changed files with 22673 additions and 0 deletions

43
plugins/8ball.py Normal file
View file

@ -0,0 +1,43 @@
from util import hook
import random
import re
r = "\x02\x0305" # red
g = "\x02\x0303" # green
y = "\x02\x0308" # yellow
answers = [g + "As I see it, yes",
g + "It is certain",
g + "It is decidedly so",
g + "Most likely",
g + "Outlook good",
g + "Signs point to yes",
g + "One would be wise to think so",#
g + "Naturally",#
g + "Without a doubt",
g + "Yes",
g + "Yes, definitely",
g + "You may rely on it",
y + "Reply hazy, try again",
y + "Ask again later",
y + "Better not tell you now",
y + "Cannot predict now",
y + "Concentrate and ask again",
y + "You know the answer better than I",#
y + "Maybe...",#
r + "You're kidding, right?",#
r + "Don't count on it",
r + "In your dreams", #
r + "My reply is no",
r + "My sources say no",
r + "Outlook not so good",
r + "Very doubtful"]
@hook.command('8ball')
def ask(inp, me=None):
".8ball <question> - The all knowing magic eight ball, in electronic form. Ask a question and the answer shall be provided."
global nextresponsenumber
inp = inp.strip()
if re.match("[a-zA-Z0-9]", inp[-1]):
inp += "?"
me("shakes the magic 8 ball... %s" % (random.choice(answers)))

119
plugins/admin.py Normal file
View file

@ -0,0 +1,119 @@
# Shitty plugin made by iloveportalz0r
# Broken by The Noodle
from util import hook
@hook.command
def join(inp, input=None, db=None, notice=None):
".join <channel> -- joins a channel"
if input.nick not in input.bot.config["admins"]:
notice("Only bot admins can use this command!")
return
chan = inp.split(' ', 1)
#if len(chan) != 1:
#return "Usage: omg please join <channel>"
notice("Joining " + inp)
input.conn.send("JOIN " + inp)
@hook.command
def cycle(inp, input=None, db=None, notice=None):
".cycle <channel> -- cycles a channel"
if input.nick not in input.bot.config["admins"]:
notice("Only bot admins can use this command!")
return
notice("Cycling " + inp + ".")
input.conn.send("PART " + inp)
input.conn.send("JOIN " + inp)
@hook.command
def part(inp, input=None, notice=None):
".part <channel> -- leaves a channel"
if input.nick not in input.bot.config["admins"]:
notice("Only bot admins can use this command!")
return
chan = inp.split(' ', 1)
#if len(chan) != 1:
#return "Usage: omg please part <channel>"
notice("Parting from " + inp + ".")
input.conn.send("PART " + inp)
@hook.command
def chnick(inp, input=None, notice=None):
".chnick <nick> - Change the nick!"
if input.nick not in input.bot.config["admins"]:
notice("Only bot admins can use this command!")
return
chan = inp.split(' ', 1)
#if len(chan) != 1:
#return "Usage: omg please part <channel>"
notice("Changing nick to " + inp + ".")
input.conn.send("NICK " + inp)
@hook.command
def raw(inp, input=None, notice=None):
".raw <command> - Send a RAW IRC command!"
if input.nick not in input.bot.config["admins"]:
notice("Only bot admins can use this command!")
return
chan = inp.split(' ', 1)
notice("Raw command sent.")
input.conn.send(inp)
@hook.command
def kick(inp, input=None, notice=None):
".kick [channel] <user> [reason] -- kick a user!"
if input.nick not in input.bot.config["admins"]:
notice("Only bot admins can use this command!")
return
stuff = inp.split(" ")
if stuff[0][0] == "#":
out = "KICK " + stuff[0] + " " + stuff[1]
if len(stuff) > 2:
reason = ""
for x in stuff[2:]:
reason = reason + x + " "
reason = reason[:-1]
out = out+" :"+reason
else:
out = "KICK " + input.chan + " " + stuff[0]
if len(stuff) > 1:
reason = ""
for x in stuff[1:]:
reason = reason + x + " "
reason = reason[:-1]
out = out + " :" + reason
input.conn.send(out)
@hook.command
def say(inp, input=None, notice=None):
".say [channel] <message> -- makes the bot say <message> in [channel]. if [channel] is blank the bot will say the <message> in the channel the command was used in."
if input.nick not in input.bot.config["admins"]:
notice("Only bot admins can use this command!")
return
stuff = inp.split(" ")
if stuff[0][0] == "#":
message = ""
for x in stuff[1:]:
message = message + x + " "
message = message[:-1]
out = "PRIVMSG " + stuff[0] + " :" + message
else:
message = ""
for x in stuff[0:]:
message = message + x + " "
message = message[:-1]
out = "PRIVMSG " + input.chan + " :" + message
input.conn.send(out)
@hook.command
def topic(inp, input=None, notice=None):
".topic [channel] <topic> -- change the topic of a channel"
if input.nick not in input.bot.config["admins"]:
notice("Only bot admins can use this command!")
return
stuff = inp.split(" ")
if stuff[0][0] == "#":
out = "TOPIC " + stuff[0] + " :" + stuff[1]
else:
out = "TOPIC " + input.chan + " :" + stuff[0]
input.conn.send(out)

33
plugins/antiflood.py Normal file
View file

@ -0,0 +1,33 @@
def yaml_load(filename):
import yaml
fileHandle = open(filename, 'r')
stuff = yaml.load(fileHandle.read())
fileHandle.close()
return stuff
def yaml_save(stuff, filename):
import yaml
fileHandle = open (filename, 'w' )
fileHandle.write (yaml.dump(stuff))
fileHandle.close()
from util import hook
@hook.event('*')
def tellinput(paraml, input=None, say=None):
# import time
# now = time.time()
# spam = yaml_load('spam')
# if spam[input.nick]:
# spam[input.nick].append(time.time())
# else:
# spam[input.nick] = [time.time()]
# for x in spam[input.nick]:
# if now - x > 5:
# spam[input.nick].pop(x)
# if len(spam[input.nick]) > 8:
# say(":O")
# say("HOW COULD YOU "+input.nick)
# say("lol!")
# yaml_save(spam,'spam')
return

88
plugins/bf.py Normal file
View file

@ -0,0 +1,88 @@
'''brainfuck interpreter adapted from (public domain) code at
http://brainfuck.sourceforge.net/brain.py'''
import re
import random
from util import hook
BUFFER_SIZE = 5000
MAX_STEPS = 1000000
@hook.command
def bf(inp):
".bf <prog> -- executes brainfuck program <prog>"""
program = re.sub('[^][<>+-.,]', '', inp)
# create a dict of brackets pairs, for speed later on
brackets = {}
open_brackets = []
for pos in range(len(program)):
if program[pos] == '[':
open_brackets.append(pos)
elif program[pos] == ']':
if len(open_brackets) > 0:
brackets[pos] = open_brackets[-1]
brackets[open_brackets[-1]] = pos
open_brackets.pop()
else:
return 'unbalanced brackets'
if len(open_brackets) != 0:
return 'unbalanced brackets'
# now we can start interpreting
ip = 0 # instruction pointer
mp = 0 # memory pointer
steps = 0
memory = [0] * BUFFER_SIZE # initial memory area
rightmost = 0
output = "" # we'll save the output here
# the main program loop:
while ip < len(program):
c = program[ip]
if c == '+':
memory[mp] = memory[mp] + 1 % 256
elif c == '-':
memory[mp] = memory[mp] - 1 % 256
elif c == '>':
mp += 1
if mp > rightmost:
rightmost = mp
if mp >= len(memory):
# no restriction on memory growth!
memory.extend([0] * BUFFER_SIZE)
elif c == '<':
mp = mp - 1 % len(memory)
elif c == '.':
output += chr(memory[mp])
if len(output) > 500:
break
elif c == ',':
memory[mp] = random.randint(1, 255)
elif c == '[':
if memory[mp] == 0:
ip = brackets[ip]
elif c == ']':
if memory[mp] != 0:
ip = brackets[ip]
ip += 1
steps += 1
if steps > MAX_STEPS:
if output == '':
output = '(no output)'
output += '[exceeded %d iterations]' % MAX_STEPS
break
stripped_output = re.sub(r'[\x00-\x1F]', '', output)
if stripped_output == '':
if output != '':
return 'no printable output'
return 'no output'
return stripped_output[:430].decode('utf8', 'ignore')

10
plugins/bitcoin.py Normal file
View file

@ -0,0 +1,10 @@
from util import http, hook
@hook.command(autohelp=False)
def bitcoin(inp, say=None):
".bitcoin -- gets current exchange rate for bitcoins from mtgox"
data = http.get_json("https://mtgox.com/code/data/ticker.php")
ticker = data['ticker']
say("Current: \x0307$%(buy).2f\x0f - High: \x0307$%(high).2f\x0f"
" - Low: \x0307$%(low).2f\x0f - Volume: %(vol)s" % ticker)

17
plugins/choose.py Normal file
View file

@ -0,0 +1,17 @@
import re
import random
from util import hook
@hook.command
def choose(inp):
".choose <choice1>, <choice2>, ... <choicen> -- makes a decision"
c = re.findall(r'([^,]+)', inp)
if len(c) == 1:
c = re.findall(r'(\S+)', inp)
if len(c) == 1:
return 'the decision is up to you'
return random.choice(c).strip()

38
plugins/coin.py Normal file
View file

@ -0,0 +1,38 @@
# # Lukeroge
from util import hook
import random
@hook.command(autohelp=False)
def coin(inp):
".coin - Flips a coin and shares the result."
flip = random.randint(0,1)
if flip == 1:
sidename = "heads"
else:
sidename = "tails"
message = "You flip a coin and it lands on " + sidename + "!"
return message
@hook.command(autohelp=False)
def coins(inp):
".coins - Flips two coins and shares the results."
flip2 = random.randint(0,1)
if flip2 == 1:
sidename2 = "heads"
else:
sidename2 = "tails"
flip = random.randint(0,1)
if flip == 1:
sidename = "heads"
else:
sidename = "tails"
message = "You flip two coins. You get a " + sidename + ", and a " + sidename2 + "!"
return message

89
plugins/dice.py Normal file
View file

@ -0,0 +1,89 @@
"""
dice.py: written by Scaevolus 2008, updated 2009
simulates dicerolls
"""
import re
import random
from util import hook
whitespace_re = re.compile(r'\s+')
valid_diceroll = r'^([+-]?(?:\d+|\d*d(?:\d+|F))(?:[+-](?:\d+|\d*d(?:\d+|F)))*)( .+)?$'
valid_diceroll_re = re.compile(valid_diceroll, re.I)
sign_re = re.compile(r'[+-]?(?:\d*d)?(?:\d+|F)', re.I)
split_re = re.compile(r'([\d+-]*)d?(F|\d*)', re.I)
def nrolls(count, n):
"roll an n-sided die count times"
if n == "F":
return [random.randint(-1, 1) for x in xrange(min(count, 100))]
if n < 2: # it's a coin
if count < 100:
return [random.randint(0, 1) for x in xrange(count)]
else: # fake it
return [int(random.normalvariate(.5*count, (.75*count)**.5))]
else:
if count < 100:
return [random.randint(1, n) for x in xrange(count)]
else: # fake it
return [int(random.normalvariate(.5*(1+n)*count,
(((n+1)*(2*n+1)/6.-(.5*(1+n))**2)*count)**.5))]
@hook.command('roll')
#@hook.regex(valid_diceroll, re.I)
@hook.command
def dice(inp):
".dice <diceroll> -- simulates dicerolls, e.g. .dice 2d20-d5+4 roll 2 " \
"D20s, subtract 1D5, add 4"
try: # if inp is a re.match object...
(inp, desc) = inp.groups()
except AttributeError:
(inp, desc) = valid_diceroll_re.match(inp).groups()
if "d" not in inp:
return
spec = whitespace_re.sub('', inp)
if not valid_diceroll_re.match(spec):
return "Invalid diceroll"
groups = sign_re.findall(spec)
total = 0
rolls = []
for roll in groups:
count, side = split_re.match(roll).groups()
count = int(count) if count not in " +-" else 1
if side.upper() == "F": # fudge dice are basically 1d3-2
for fudge in nrolls(count, "F"):
if fudge == 1:
rolls.append("\x033+\x0F")
elif fudge == -1:
rolls.append("\x034-\x0F")
else:
rolls.append("0")
total += fudge
elif side == "":
total += count
else:
side = int(side)
try:
if count > 0:
dice = nrolls(count, side)
rolls += map(str, dice)
total += sum(dice)
else:
dice = nrolls(-count, side)
rolls += [str(-x) for x in dice]
total -= sum(dice)
except OverflowError:
return "Thanks for overflowing a float, jerk >:["
if desc:
return "%s: %d (%s=%s)" % (desc.strip(), total, inp, ", ".join(rolls))
else:
return "%d (%s=%s)" % (total, inp, ", ".join(rolls))

110
plugins/dictionary.py Normal file
View file

@ -0,0 +1,110 @@
import re
from util import hook, http
@hook.command('u')
@hook.command
def urban(inp):
'''.u/.urban <phrase> -- looks up <phrase> on urbandictionary.com'''
url = 'http://www.urbandictionary.com/define.php'
page = http.get_html(url, term=inp)
words = page.xpath("//td[@class='word']")
defs = page.xpath("//div[@class='definition']")
if not defs:
return 'No definitions found :('
out = '' + words[0].text_content().strip() + ': ' + ' '.join(
defs[0].text_content().split())
if len(out) > 400:
out = out[:out.rfind(' ', 0, 400)] + '...'
return out
# define plugin by GhettoWizard & Scaevolus
@hook.command('dictionary')
@hook.command
def define(inp):
".define/.dictionary <word> -- fetches definition of <word>"
url = 'http://ninjawords.com/'
h = http.get_html(url + http.quote_plus(inp))
definition = h.xpath('//dd[@class="article"] | '
'//div[@class="definition"] |'
'//div[@class="example"]')
if not definition:
return 'No results for ' + inp + ' :('
def format_output(show_examples):
result = '%s: ' % h.xpath('//dt[@class="title-word"]/a/text()')[0]
correction = h.xpath('//span[@class="correct-word"]/text()')
if correction:
result = 'definition for "%s": ' % correction[0]
sections = []
for section in definition:
if section.attrib['class'] == 'article':
sections += [[section.text_content() + ': ']]
elif section.attrib['class'] == 'example':
if show_examples:
sections[-1][-1] += ' ' + section.text_content()
else:
sections[-1] += [section.text_content()]
for article in sections:
result += article[0]
if len(article) > 2:
result += ' '.join('%d. %s' % (n + 1, section)
for n, section in enumerate(article[1:]))
else:
result += article[1] + ' '
synonyms = h.xpath('//dd[@class="synonyms"]')
if synonyms:
result += synonyms[0].text_content()
result = re.sub(r'\s+', ' ', result)
result = re.sub('\xb0', '', result)
return result
result = format_output(True)
if len(result) > 450:
result = format_output(False)
if len(result) > 450:
result = result[:result.rfind(' ', 0, 450)]
result = re.sub(r'[^A-Za-z]+\.?$', '', result) + ' ...'
return result
@hook.command('e')
@hook.command
def etymology(inp):
".e/.etymology <word> -- Retrieves the etymology of chosen word"
url = 'http://www.etymonline.com/index.php'
h = http.get_html(url, term=inp)
etym = h.xpath('//dl')
if not etym:
return 'No etymology found for ' + inp + ' :('
etym = etym[0].text_content()
etym = ' '.join(etym.split())
if len(etym) > 400:
etym = etym[:etym.rfind(' ', 0, 400)] + ' ...'
return etym

20
plugins/down.py Normal file
View file

@ -0,0 +1,20 @@
import urlparse
from util import hook, http
@hook.command
def down(inp):
'''.down <url> -- checks to see if the site is down'''
if 'http://' not in inp:
inp = 'http://' + inp
inp = 'http://' + urlparse.urlparse(inp).netloc
# http://mail.python.org/pipermail/python-list/2006-December/589854.html
try:
http.get(inp, get_method='HEAD')
return inp + ' seems to be up'
except http.URLError:
return inp + ' seems to be down'

104
plugins/factoids.py Normal file
View file

@ -0,0 +1,104 @@
"""
remember.py: written by Scaevolus 20101
"""
from util import hook
import re
def db_init(db):
db.execute("create table if not exists mem(word, data, nick,"
" primary key(word))")
db.commit()
def get_memory(db, word):
row = db.execute("select data from mem where word=lower(?)", [word]).fetchone()
if row:
return row[0]
else:
return None
@hook.regex(r'^\+ ?(.*)')
@hook.command("r")
def remember(inp, nick='', db=None, say=None, input=None, notice=None):
"+<word> [+]<data> -- maps word to data in the memory"
if input.nick not in input.bot.config["admins"]:
return
binp = inp.group(0)
bind = binp.replace('+', '', 1)
db_init(db)
append = False
try:
head, tail = bind.split(None, 1)
except ValueError:
return remember.__doc__
data = get_memory(db, head)
if tail[0] == '+':
append = True
# ignore + symbol
new = tail[1:]
_head, _tail = data.split(None, 1)
# data is stored with the input so ignore it when re-adding it
import string
if len(tail) > 1 and tail[1] in (string.punctuation + ' '):
tail = _tail + new
else:
tail = _tail + ' ' + new
db.execute("replace into mem(word, data, nick) values"
" (lower(?),?,?)", (head, tail, nick))
db.commit()
if data:
if append:
notice("Appending %s to %s" % (new, data.replace('"', "''")))
else:
notice('Forgetting existing data (%s), remembering this instead!' % \
data.replace('"', "''"))
return
else:
notice('Remembered!')
return
@hook.command
def forget(inp, db=None):
"-<word> -- forgets the mapping that word had"
binp = inp.group(0)
bind = binp.replace('-', '', 1)
print bind
try:
head, tail = bind.split(None, 1)
except ValueError:
return remember.__doc__
db_init(db)
data = get_memory(db, binp)
if data:
db.execute("delete from mem where word=lower(?)",
(inp))
db.commit()
return 'forgot `%s`' % data.replace('`', "'")
else:
return "I don't know about that."
@hook.command("info")
@hook.regex(r'^\? ?(.+)')
def question(inp, say=None, db=None):
"?<word> -- shows what data is associated with word"
db_init(db)
data = get_memory(db, inp.group(1).strip())
if data:
say(data)

67
plugins/flip.py Normal file
View file

@ -0,0 +1,67 @@
# -*- coding: utf-8 -*-
from util import hook
import random
@hook.command
def flip(inp, flip_count=0, say = None):
".flip <text> -- flips the given text"
guy = unicode(random.choice(flips), 'utf8')
inp = inp.lower()
inp = inp[::-1]
reps = 0
for n in xrange(len(inp)):
rep = character_replacements.get(inp[n])
if rep:
inp = inp[:n] + rep.decode('utf8') + inp[n + 1:]
reps += 1
if reps == flip_count:
break
say(guy + u"" + inp)
flips = ["(屮ಠ︵ಠ)屮",
"( ノ♉︵♉ )ノ",
"(╯°□°)╯",
"( ノ⊙︵⊙)ノ"]
character_replacements = {
'a': 'ɐ',
'b': 'q',
'c': 'ɔ',
'd': 'p',
'e': 'ǝ',
'f': 'ɟ',
'g': 'b',
'h': 'ɥ',
'i': 'ı',
'j': 'ظ',
'k': 'ʞ',
'l': 'ן',
'm': 'ɯ',
'n': 'u',
'o': 'o',
'p': 'd',
'q': 'b',
'r': 'ɹ',
's': 's',
't': 'ʇ',
'u': 'n',
'v': 'ʌ',
'w': 'ʍ',
'x': 'x',
'y': 'ʎ',
'z': 'z',
'?': '¿',
'.': '˙',
'/': '\\',
'\\': '/',
'(': ')',
')': '(',
'<': '>',
'>': '<',
'[': ']',
']': '[',
'{': '}',
'}': '{',
'\'': ',',
'_': ''}

59
plugins/flirt.py Normal file
View file

@ -0,0 +1,59 @@
from util import hook
import re
import random
flirts = ["I bet your name's Mickey, 'cause you're so fine.",
"Hey, pretty mama. You smell kinda pretty, wanna smell me?",
"I better get out my library card, 'cause I'm checkin' you out.",
"If you were a booger, I'd pick you.",
"If I could rearrange the alphabet, I would put U and I together.",
"I've been bad, take me to your room.",
"I think Heaven's missing an angel.",
"That shirt looks good on you, it'd look better on my bedroom floor.",
"I cant help to notice but you look a lot like my next girlfriend",
"Aren't your feet tired? Because you've been running through my mind all day.",
"I must be asleep, 'cause you are a dream come true. Also, I'm slightly damp.",
"I like large posteriors and I cannot prevaricate.",
"How you doin'?",
"If I said you had a good body, would you hold it against me?",
"Hey, baby cakes.",
"Nice butt.",
"I love you like a fat kid loves cake.",
"Do you believe in love at first sight? Or should I walk by again...?",
"Want to see my good side? Hahaha, that was a trick question, all I have are good sides.",
"You look like a woman who appreciates the finer things in life. Come over here and feel my velour bedspread.",
"Now you're officially my woman. Kudos! I can't say I don't envy you.",
"I find that the most erotic part of a woman is the boobies.",
"If you want to climb aboard the Love Train, you've got to stand on the Love Tracks. But you might just get smushed by a very sensual cow-catcher.",
"Lets say you and I knock some very /sensual/ boots.",
"I lost my phone number, can I have yours?",
"Does this rag smell like chloroform to you? ",
"I'm here, where are your other two wishes?",
"Apart from being sexy, what do you do for a living?",
"Hi, I'm Mr. Right. Someone said you were looking for me. ",
"You got something on your chest: My eyes.",
"Are you from Tennessee? Cause you're the only TEN I see.",
"Are you an alien? Because you just abducted my heart.",
"Excuse me, but I think you dropped something!!! MY JAW!!",
"If I followed you home, would you keep me?",
"Where have you been all my life?",
"I'm just a love machine, and I don't work for nobody but you",
"Do you live on a chicken farm? Because you sure know how to raise cocks.",
"Are you wearing space pants? Because your ass is out of this world.",
"Nice legs. What time do they open?",
"Your daddy must have been a baker, because you've got a nice set of buns."]
@hook.command(autohelp=False)
def flirt(inp, nick=None, me=None, input=None):
".flirt -- make mau5bot flirt!"
msg = "flirts with " + nick + "... \"" + random.choice(flirts) + "\""
if re.match("^[A-Za-z0-9_|.-\]\[]*$", inp.lower()) and inp != "":
msg = "flirts with " + inp + "... \"" + random.choice(flirts) + "\""
if inp == input.conn.nick.lower() or inp == "itself":
msg = "flirts with itself"
me(msg)

24
plugins/fmylife.py Normal file
View file

@ -0,0 +1,24 @@
import re
from util import hook, http, misc
from urlparse import urljoin
from BeautifulSoup import BeautifulSoup
base_url = 'http://www.fmylife.com/'
rand_url = urljoin(base_url, 'random')
spec_url = urljoin(base_url, '%d')
error = 'Today I couldn\'t seem to access fmylife.com.. FML'
@hook.command(autohelp=False)
@hook.command("fml")
def fmylife(inp):
page = http.get(rand_url)
soup = BeautifulSoup(page)
soup.find('div', id='submit').extract()
post = soup.body.find('div', 'post')
id = int(post.find('a', 'fmllink')['href'].split('/')[-1])
body = strip_html(decode(' '.join(link.renderContents() for link in post('a', 'fmllink')), 'utf-8'))
return u'%s: (%d) %s' % (nick, id, body)

67
plugins/fortune.py Normal file
View file

@ -0,0 +1,67 @@
from util import hook
import re
import random
fortunes = ["Help! I'm stuck in the fortune cookie factory!",
"He who laughs at himself never runs out of things to laugh at.",
"The world is your oyster.",
"Today will be a good day.",
"Only listen to the Hoss Fortune Cookies. Disregard all other fortune telling units.",
"Life's short, party naked.",
"Haters gonna hate.",
"You are amazing and let no one tell you otherwise.",
"A starship ride has been promised to you by the galactic wizard.",
"That wasnt chicken.",
"Dont fry bacon in the nude.",
"Take calculated risks. That is quite different from being rash.",
"DO THE IMPOSSIBLE, SEE THE INVISIBLE.",
"You cannot plough a field by turning it over in your mind. Unless you have telekinesis.",
"No one can make you feel inferior without your consent.",
"Never lose the ability to find beauty in ordinary things.",
"Ignore previous fortune.",
"Smile more.",
"You are the dancing queen.",
"YOU'RE THE BEST AROUND, NOTHIN'S GONNA EVER KEEP YA DOWN.",
"The cake is a lie.",
"Never take life seriously. Nobody gets out alive anyway.",
"Friendship is like peeing on yourself: everyone can see it, but only you get the warm feeling that it brings.",
"Never go to a doctor whose office plants have died.",
"Always remember you're unique, just like everyone else.",
"What if everything is an illusion and nothing exists? In that case, I definitely overpaid for my carpet.",
"Even if you are on the right track, you will get run over if you just sit there.",
"Think like a man of action, and act like a man of thought.",
"When in doubt, lubricate.",
"It is time for you to live up to your family name and face FULL LIFE CONSEQUENCES.",
"It's a good day to do what has to be done.",
"Move near the countryside and you will be friends of John Freeman.",
"If you can't beat 'em, mock 'em.",
"Use gun. And if that don't work, use more gun.",
"LOOK OUT BEHIND YOU",
"This message will self destruct in 10 seconds.",
"You'll never know what you can do until you try.",
"You are talented in many ways",
"Be both a speaker of words and a doer of deeds.",
"A visit to a strange place will bring you renewed perspective.",
"A passionate new romance will appear in your life when you least expect it.",
"If you care enough for a result, you will most certainly attain it.",
"To be loved, be loveable.",
"Step away from the power position for one day.",
"If you want to get a sure crop with a big yield, sow wild oats.",
"It doesn't take guts to quit.",
"You can expect a change for the better in job or status in the future.",
"As the wallet grows, so do the needs.",
"You have a reputation for being straightforward and honest.",
"Learn a new language and get a new soul.",
"A tall dark stranger will soon enter our life.",
"Keep staring. I'll do a trick."]
@hook.command(autohelp=False)
def fortune(inp, nick=None, say=None, input=None):
".fortune -- get your fortune"
msg = "(" + nick + ") " + random.choice(fortunes)
if re.match("^[A-Za-z0-9_|.-\]\[]*$", inp.lower()) and inp != "":
msg = "(@" + inp + ") " + random.choice(fortunes)
say(msg)

32
plugins/gcalc.py Normal file
View file

@ -0,0 +1,32 @@
import re
from util import hook, http, misc
from BeautifulSoup import BeautifulSoup
@hook.command("calc")
@hook.command("math")
def calc(inp):
'''.calc <term> -- returns Google Calculator result'''
white_re = re.compile(r'\s+')
page = http.get('http://www.google.com/search', q=inp)
soup = BeautifulSoup(page)
response = soup.find('h2', {'class' : 'r'})
if response is None:
return "Could not calculate " + inp
output = response.renderContents()
output = ' '.join(output.splitlines())
output = output.replace("\xa0", ",")
output = white_re.sub(' ', output.strip())
output = output.decode('utf-8', 'ignore')
output = misc.strip_html(output)
return output

77
plugins/get.py Normal file
View file

@ -0,0 +1,77 @@
#-*- coding: utf-8 -*-
# Copyright (C) 2011 by Guilherme Pinto Gonçalves, Ivan Sichmman Freitas
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
from util import hook
import sys
import subprocess
from functools import partial
fortunes = {
'fortunes': 'fortunes',
'fortune': 'fortunes',
'quotes': 'literature',
'quote': 'literature',
'riddle': 'riddles',
'riddles': 'riddles',
'cookie': 'cookie',
'cookies': 'cookie',
'disclaimer': 'disclaimer',
'f': 'fortunes',
'q': 'literature',
'r': 'riddles'
}
# Use this later to replace the fortunes list workaaround
def get_installed_fortunes():
try:
proc = subprocess.Popen(("/usr/bin/fortune", "-f"),
stderr = subprocess.PIPE)
except OSError:
return set()
return set(proc.stderr)
# Use this later to replace the fortunes list workaaround
def get_fortune(inp):
try:
proc = subprocess.Popen(("fortune", "-a", inp),
stderr = subprocess.PIPE,
stdout = subprocess.PIPE)
except OSError:
return set()
return set(proc.stderr)
@hook.command()
def get(inp, say=None):
".get <what> -- uses fortune-mod to get something. <what> can be riddle, quote or fortune"
fortune = get_fortune(fortune[inp])
while fortune.length() =< 5:
fortune = get_fortune(fortune[inp])
if proc.wait() == 0:
for line in proc.stdout:
say(line.lstrip())
else:
return "Fortune failed: " + proc.stderr.read()

82
plugins/googlesearch.py Normal file
View file

@ -0,0 +1,82 @@
import random
from util import hook, http
def api_get(kind, query):
url = 'http://ajax.googleapis.com/ajax/services/search/%s?' \
'v=1.0&safe=off'
return http.get_json(url % kind, q=query)
@hook.command
def gis(inp):
'''.gis <term> -- returns first google image result (safesearch off)'''
parsed = api_get('images', inp)
if not 200 <= parsed['responseStatus'] < 300:
raise IOError('error searching for images: %d: %s' % (
parsed['responseStatus'], ''))
if not parsed['responseData']['results']:
return 'no images found'
return random.choice(parsed['responseData']['results'][:10]) \
['unescapedUrl'] # squares is dumb
@hook.command('g')
@hook.command
def google(inp):
'''.g/.google <query> -- returns first google search result'''
parsed = api_get('web', inp)
if not 200 <= parsed['responseStatus'] < 300:
raise IOError('error searching for pages: %d: %s' % (
parsed['responseStatus'], ''))
if not parsed['responseData']['results']:
return 'no results found'
result = parsed['responseData']['results'][0]
title = http.unescape(result['titleNoFormatting'])
content = http.unescape(result['content'])
if len(content) == 0:
content = "No description available"
else:
content = http.html.fromstring(content).text_content()
out = '%s -- \x02%s\x02: "%s"' % (result['unescapedUrl'], title, content)
out = ' '.join(out.split())
if len(out) > 300:
out = out[:out.rfind(' ')] + '..."'
return out
def googleno(inp):
parsed = api_get('web', inp)
if not 200 <= parsed['responseStatus'] < 300:
raise IOError('error searching for pages: %d: %s' % (
parsed['responseStatus'], ''))
if not parsed['responseData']['results']:
return 'no results found'
result = parsed['responseData']['results'][0]
title = http.unescape(result['titleNoFormatting'])
content = http.unescape(result['content'])
if len(content) == 0:
content = "No description available"
else:
content = http.html.fromstring(content).text_content()
out = '%s\x02: "%s"' % (title, content)
out = ' '.join(out.split())
if len(out) > 300:
out = out[:out.rfind(' ')] + '..."'
return out

34
plugins/gtime.py Normal file
View file

@ -0,0 +1,34 @@
import re
from util import hook, http
from BeautifulSoup import BeautifulSoup
@hook.command("time")
def clock(inp, say=None):
'''.time <area> -- gets the time in <area>'''
white_re = re.compile(r'\s+')
tags_re = re.compile(r'<[^<]*?>')
page = http.get('http://www.google.com/search', q="time in " + inp)
soup = BeautifulSoup(page)
response = soup.find('td', {'style' : 'font-size:medium'})
if response is None:
return "Could not get the time for " + inp + "!"
output = response.renderContents()
output = ' '.join(output.splitlines())
output = output.replace("\xa0", ",")
output = white_re.sub(' ', output.strip())
output = tags_re.sub('\x02', output.strip())
output = output.decode('utf-8', 'ignore')
return output

28
plugins/hash.py Normal file
View file

@ -0,0 +1,28 @@
import hashlib
from util import hook
@hook.command
def md5(inp):
".hash <text> -- returns a md5 hash of <text>"
return hashlib.md5(inp).hexdigest()
@hook.command
def sha1(inp):
".hash <text> -- returns a sha1 hash of <text>"
return hashlib.sha1(inp).hexdigest()
@hook.command
def sha256(inp):
".hash <text> -- returns a sha256 hash of <text>"
return hashlib.sha256(inp).hexdigest()
@hook.command
def sha512(inp):
".hash <text> -- returns a sha512 hash of <text>"
return hashlib.sha512(inp).hexdigest()
@hook.command
def hash(inp):
".hash <text> -- returns hashes of <text>"
return ', '.join(x + ": " + getattr(hashlib, x)(inp).hexdigest()
for x in 'md5 sha1 sha256'.split())

51
plugins/help.py Normal file
View file

@ -0,0 +1,51 @@
import re
from util import hook
# Standard automatic help command
@hook.command(autohelp=False)
def help(inp, input=None, bot=None, say=None, notice=None):
".help -- gives a list of commands/help for a command"
funcs = {}
disabled = bot.config.get('disabled_plugins', [])
disabled_comm = bot.config.get('disabled_commands', [])
for command, (func, args) in bot.commands.iteritems():
fn = re.match(r'^plugins.(.+).py$', func._filename)
if fn.group(1).lower() not in disabled:
if command not in disabled_comm:
if func.__doc__ is not None:
if func in funcs:
if len(funcs[func]) < len(command):
funcs[func] = command
else:
funcs[func] = command
hidden = ["part", "stfu", "kthx", "chnick", "join", "8ballnooxt"]
commands = dict((value, key) for key, value in funcs.iteritems() if key not in hidden)
if not inp:
length = 0
out = ["",""]
well = []
for x in commands:
if x not in hidden:
well.append(x)
well.sort()
for x in well:
if len(out[0]) + len(str(x)) > 440:
out[1] += " " + str(x)
else:
out[0] += " " + str(x)
notice(out[0][1:])
if out[1]:
notice(out[1][1:])
else:
if inp in commands:
input.say(commands[inp].__doc__)

25
plugins/imdb.py Normal file
View file

@ -0,0 +1,25 @@
# IMDb lookup plugin by Ghetto Wizard (2011).
from util import hook, http
@hook.command
def imdb(inp):
'''.imdb <movie> -- gets information about <movie> from IMDb'''
content = http.get_json("http://www.imdbapi.com/", t=inp)
if content['Response'] == 'Movie Not Found':
return 'movie not found'
elif content['Response'] == 'True':
content['URL'] = 'http://www.imdb.com/title/%(ID)s' % content
out = '\x02%(Title)s\x02 (%(Year)s) (%(Genre)s): %(Plot)s'
if content['Runtime'] != 'N/A':
out += ' \x02%(Runtime)s\x02.'
if content['Rating'] != 'N/A' and content['Votes'] != 'N/A':
out += ' \x02%(Rating)s/10\x02 with \x02%(Votes)s\x02 votes.'
out += ' %(URL)s'
return out % content
else:
return 'unknown error'

50
plugins/insult.py Normal file
View file

@ -0,0 +1,50 @@
from util import hook
import re
import random
insults = ["You are the son of a motherless ogre.",
"Your mother was a hamster and your father smelled of elderberries.",
"I once owned a dog that was smarter than you. ",
"Go climb a wall of dicks.",
"You fight like a dairy farmer.",
"I've spoken to apes more polite than you.",
"Go and boil your bottom! Son of a silly person! ",
"I fart in your general direction.",
"Go away or I shall taunt you a second time. ",
"Shouldn't you have a license for being that ugly?",
"Calling you an idiot would be an insult to all the stupid people.",
"Why don't you slip into something more comfortable...like a coma.",
"Well, they do say opposites attact...so I sincerely hope you meet somebody who is attractive, honest, intelligent, and cultured..",
"Are you always this stupid or are you just making a special effort today?",
"Yo momma so fat when she sits around the house she sits AROUND the house.",
"Yo momma so ugly she made an onion cry.",
"Is your name Maple Syrup? It should be, you sap.",
"Bite my shiny metal ass!",
"Up yours, meatbag.",
"Jam a bastard in it you crap!",
"Don't piss me off today, I'm running out of places to hide to bodies",
"Why don't you go outside and play hide and go fuck yourself",
"I'll use small words you're sure to understand, you warthog-faced buffoon.",
"You are a sad, strange little man, and you have my pity.",
"Sit your five dollar ass down before I make change.",
"What you've just said is one of the most insanely idiotic things I've ever heard. Everyone in this room is now dumber for having listened to it. May God have mercy on your soul.",
"Look up Idiot in the dictionary. Know what you'll find? The definition of the word IDIOT, which you are.",
"You're dumber than a bag of hammers.",
"Why don't you go back to your home on Whore Island?",
"If I had a dick this is when I'd tell you to suck it.",
"Go play in traffic.",
"The village called, they want their idiot back."]
@hook.command(autohelp=False)
def insult(inp, nick=None, say=None, input=None):
".insult [user] -- insult someone!"
msg = "(" + nick + ") " + random.choice(insults)
if re.match("^[A-Za-z0-9_|.-\]\[]*$", inp.lower()) and inp != "":
msg = "(@" + inp + ") " + random.choice(insults)
if inp == input.conn.nick.lower() or inp == "itself":
msg = "*stares at " + nick + "*"
say(msg)

53
plugins/lastfm.py Normal file
View file

@ -0,0 +1,53 @@
from util import hook, http
api_key = ""
api_url = "http://ws.audioscrobbler.com/2.0/?format=json"
@hook.command
def lastfm(inp, nick='', say=None):
if inp:
user = inp
else:
user = nick
response = http.get_json(api_url, method="user.getrecenttracks",
api_key=api_key, user=user, limit=1)
if 'error' in response:
if inp: # specified a user name
return "error: %s" % response["message"]
else:
return "your nick is not a LastFM account. try '.lastfm username'."
tracks = response["recenttracks"]["track"]
if len(tracks) == 0:
return "no recent tracks for user %r found" % user
if type(tracks) == list:
# if the user is listening to something, the tracks entry is a list
# the first item is the current track
track = tracks[0]
status = 'current track'
elif type(tracks) == dict:
# otherwise, they aren't listening to anything right now, and
# the tracks entry is a dict representing the most recent track
track = tracks
status = 'last track'
else:
return "error parsing track listing"
title = track["name"]
album = track["album"]["#text"]
artist = track["artist"]["#text"]
ret = "\x02%s\x0F's %s - \x02%s\x0f" % (user, status, title)
if artist:
ret += " by \x02%s\x0f" % artist
if album:
ret += " on \x02%s\x0f" % album
say(ret)

36
plugins/location.py Normal file
View file

@ -0,0 +1,36 @@
from util import hook
def find_location(ip):
import string
import urllib
api = "6ddac03a5a67a534045f59908e5c17fd68169609b453e3c6398823fff86a87c0"
response = urllib.urlopen("http://api.ipinfodb.com/v3/ip-city/?key="+api+"&ip="+ip).read()
response = response.split(";")
give = {}
give["country"] = response[4].title()
give["country_short"] = response[3].upper()
give["state"] = response[5].title()
give["city"] = response[6].title()
give["timezone"] = response[10].title()
return give
def timezone(ip):
time = find_location(ip)["timezone"]
time = time.replace(":",".")
time = time.replace(".00","")
return int(time)
@hook.command
def location(inp, say = None, me = None):
".location <ip> - Performs a GeoIP check on the ip given."
give = find_location(inp)
if give["country"] not in [""," ","-"," - "]:
if give["state"] == give["city"]:
localstring = give["city"]
else:
localstring = give["city"] + ", " + give["state"]
say("That IP comes from " + give["country"] + " (" + give["country_short"] + ")")
say("I think it's in " + localstring + " with a timezone of " + give["timezone"] + "GMT")
else:
say("Either that wasn't an IP or I cannot locate it in my database. :(")
return

106
plugins/log.py Normal file
View file

@ -0,0 +1,106 @@
"""
log.py: written by Scaevolus 2009
"""
import os
import codecs
import time
import re
from util import hook
log_fds = {} # '%(net)s %(chan)s' : (filename, fd)
timestamp_format = '%H:%M:%S'
formats = {'PRIVMSG': '<%(nick)s> %(msg)s',
'PART': '-!- %(nick)s [%(user)s@%(host)s] has left %(chan)s',
'JOIN': '-!- %(nick)s [%(user)s@%(host)s] has joined %(param0)s',
'MODE': '-!- mode/%(chan)s [%(param_tail)s] by %(nick)s',
'KICK': '-!- %(param1)s was kicked from %(chan)s by %(nick)s [%(msg)s]',
'TOPIC': '-!- %(nick)s changed the topic of %(chan)s to: %(msg)s',
'QUIT': '-!- %(nick)s has quit [%(msg)s]',
'PING': '',
'NOTICE': ''
}
ctcp_formats = {'ACTION': '* %(nick)s %(ctcpmsg)s'}
irc_color_re = re.compile(r'(\x03(\d+,\d+|\d)|[\x0f\x02\x16\x1f])')
def get_log_filename(dir, server, chan):
return os.path.join(dir, 'log', gmtime('%Y'), server,
(gmtime('%%s.%m-%d.log') % chan).lower())
def gmtime(format):
return time.strftime(format, time.gmtime())
def beautify(input):
format = formats.get(input.command, '%(raw)s')
args = dict(input)
leng = len(args['paraml'])
for n, p in enumerate(args['paraml']):
args['param' + str(n)] = p
args['param_' + str(abs(n - leng))] = p
args['param_tail'] = ' '.join(args['paraml'][1:])
args['msg'] = irc_color_re.sub('', args['msg'])
if input.command == 'PRIVMSG' and input.msg.count('\x01') >= 2:
ctcp = input.msg.split('\x01', 2)[1].split(' ', 1)
if len(ctcp) == 1:
ctcp += ['']
args['ctcpcmd'], args['ctcpmsg'] = ctcp
format = ctcp_formats.get(args['ctcpcmd'],
'%(nick)s [%(user)s@%(host)s] requested unknown CTCP '
'%(ctcpcmd)s from %(chan)s: %(ctcpmsg)s')
return format % args
def get_log_fd(dir, server, chan):
fn = get_log_filename(dir, server, chan)
cache_key = '%s %s' % (server, chan)
filename, fd = log_fds.get(cache_key, ('', 0))
if fn != filename: # we need to open a file for writing
if fd != 0: # is a valid fd
fd.flush()
fd.close()
dir = os.path.split(fn)[0]
if not os.path.exists(dir):
os.makedirs(dir)
fd = codecs.open(fn, 'a', 'utf-8')
log_fds[cache_key] = (fn, fd)
return fd
@hook.singlethread
@hook.event('*')
def log(paraml, input=None, bot=None):
timestamp = gmtime(timestamp_format)
fd = get_log_fd(bot.persist_dir, input.server, 'raw')
fd.write(timestamp + ' ' + input.raw + '\n')
if input.command == 'QUIT': # these are temporary fixes until proper
input.chan = 'quit' # presence tracking is implemented
if input.command == 'NICK':
input.chan = 'nick'
beau = beautify(input)
if beau == '': # don't log this
return
if input.chan:
fd = get_log_fd(bot.persist_dir, input.server, input.chan)
fd.write(timestamp + ' ' + beau + '\n')
print timestamp, input.chan, beau.encode('utf8', 'ignore')

36
plugins/mclogin.py Normal file
View file

@ -0,0 +1,36 @@
from util import hook
import urllib
@hook.command(autohelp=False, input=None, notice=None)
def mccheck(inp):
".mccheck - Attempts to log in to minecraft"
password = input.bot.config["api_keys"]["mc"][0]
password = input.bot.config["api_keys"]["mc"][1]
notice(username + " " + password)
login = urllib.urlopen("https://login.minecraft.net/?user="+username+"&password="+password+"&&version=13").read()
if username in login:
return "Attempting to connect to Minecraft login servers... Login servers appear to be online!"
else:
return "Attempting to connect to Minecraft login servers... Login servers appear to be offline :("
@hook.command
def haspaid(inp):
".haspaid <username> - Checks if a user has a premium Minecraft account"
login = urllib.urlopen("http://www.minecraft.net/haspaid.jsp?user=" + inp).read()
if "true" in login:
return "The user " + inp + " has a premium Minecraft account."
else:
return "The user " + inp + " either has not paid or is an unused nickname."
@hook.command
def mclogin(inp, say=None):
".mclogin <username> <password> - Attempts to log in to minecraft using the provided username and password, this is NOT logged."
inp = inp.split(" ")
username = inp[0]
password = inp[1]
say("Attempting to log in using " + username)
login = urllib.urlopen("https://login.minecraft.net/?user=" + username + "&password=" + password + "&&version=13").read()
if username in login:
return "I logged in with " + username
else:
return "I couldn't log in using " + username + ", either the password changed or minecraft auth is down :O"

37
plugins/mcping.py Normal file
View file

@ -0,0 +1,37 @@
import socket
import struct
from util import hook
def get_info(host, port):
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
try:
sock.connect((host, port))
sock.send('\xfe')
response = sock.recv(1)
if response != '\xff':
return "Server gave invalid response: "+repr(response)
length = struct.unpack('!h', sock.recv(2))[0]
values = sock.recv(length*2).decode('utf-16be').split(u'\xa7')
sock.close()
return "%s - %d/%d players" % (values[0], int(values[1]), int(values[2]))
except:
return "Error pinging "+host+":"+str(port)+", is it up? double-check your address!"
@hook.command
def mcping(inp):
".mcping server[:port] - ping a minecraft server and show response."
inp = inp.strip().split(" ")[0]
if ":" in inp:
host, port = inp.split(":", 1)
try:
port = int(port)
except:
return "Invalid port!"
else:
host = inp
port = 25565
return get_info(host, port)

28
plugins/mem.py Normal file
View file

@ -0,0 +1,28 @@
import os
import re
from util import hook
@hook.command(autohelp=False)
def mem(inp):
".mem -- returns bot's current memory usage -- linux/windows only"
if os.name == 'posix':
status_file = open("/proc/%d/status" % os.getpid()).read()
line_pairs = re.findall(r"^(\w+):\s*(.*)\s*$", status_file, re.M)
status = dict(line_pairs)
keys = 'VmSize VmLib VmData VmExe VmRSS VmStk'.split()
return ', '.join(key + ':' + status[key] for key in keys)
elif os.name == 'nt':
cmd = "tasklist /FI \"PID eq %s\" /FO CSV /NH" % os.getpid()
out = os.popen(cmd).read()
total = 0
for amount in re.findall(r'([,0-9]+) K', out):
total += int(amount.replace(',', ''))
return 'memory usage: %d kB' % total
return mem.__doc__

135
plugins/metacritic.py Normal file
View file

@ -0,0 +1,135 @@
# metacritic.com scraper
import re
from urllib2 import HTTPError
from util import hook, http
@hook.command('mc')
def metacritic(inp):
'.mc [all|movie|tv|album|x360|ps3|pc|ds|wii] <title> -- gets rating for'\
' <title> from metacritic on the specified medium'
# if the results suck, it's metacritic's fault
args = inp.strip()
game_platforms = ('x360', 'ps3', 'pc', 'ds', 'wii', '3ds', 'gba')
all_platforms = game_platforms + ('all', 'movie', 'tv', 'album')
try:
plat, title = args.split(' ', 1)
if plat not in all_platforms:
# raise the ValueError so that the except block catches it
# in this case, or in the case of the .split above raising the
# ValueError, we want the same thing to happen
raise ValueError
except ValueError:
plat = 'all'
title = args
cat = 'game' if plat in game_platforms else plat
title_safe = http.quote_plus(title)
url = 'http://www.metacritic.com/search/%s/%s/results' % (cat, title_safe)
try:
doc = http.get_html(url)
except HTTPError:
return 'error fetching results'
''' result format:
-- game result, with score
-- subsequent results are the same structure, without first_result class
<li class="result first_result">
<div class="result_type">
<strong>Game</strong>
<span class="platform">WII</span>
</div>
<div class="result_wrap">
<div class="basic_stats has_score">
<div class="main_stats">
<h3 class="product_title basic_stat">...</h3>
<div class="std_score">
<div class="score_wrap">
<span class="label">Metascore: </span>
<span class="data metascore score_favorable">87</span>
</div>
</div>
</div>
<div class="more_stats extended_stats">...</div>
</div>
</div>
</li>
-- other platforms are the same basic layout
-- if it doesn't have a score, there is no div.basic_score
-- the <div class="result_type"> changes content for non-games:
<div class="result_type"><strong>Movie</strong></div>
'''
# get the proper result element we want to pull data from
result = None
if not doc.find_class('query_results'):
return 'no results found'
# if they specified an invalid search term, the input box will be empty
if doc.get_element_by_id('search_term').value == '':
return 'invalid search term'
if plat not in game_platforms:
# for [all] results, or non-game platforms, get the first result
result = doc.find_class('result first_result')[0]
# find the platform, if it exists
result_type = result.find_class('result_type')
if result_type:
# if the result_type div has a platform div, get that one
platform_div = result_type[0].find_class('platform')
if platform_div:
plat = platform_div[0].text_content().strip()
else:
# otherwise, use the result_type text_content
plat = result_type[0].text_content().strip()
else:
# for games, we want to pull the first result with the correct
# platform
results = doc.find_class('result')
for res in results:
result_plat = res.find_class('platform')[0].text_content().strip()
if result_plat == plat.upper():
result = res
break
if not result:
return 'no results found'
# get the name, release date, and score from the result
product_title = result.find_class('product_title')[0]
name = product_title.text_content()
link = 'http://metacritic.com' + product_title.find('a').attrib['href']
try:
release = result.find_class('release_date')[0].\
find_class('data')[0].text_content()
# strip extra spaces out of the release date
release = re.sub(r'\s{2,}', ' ', release)
except IndexError:
release = None
try:
score = result.find_class('metascore')[0].text_content()
except IndexError:
score = None
return '[%s] %s - %s, %s -- %s' % (plat.upper(), name,
score or 'no score',
'release: %s' % release if release else 'unreleased',
link)

56
plugins/misc.py Normal file
View file

@ -0,0 +1,56 @@
import re
import socket
import subprocess
import time
from util import hook, http
socket.setdefaulttimeout(10) # global setting
#autorejoin channels
#@hook.event('KICK')
#def rejoin(paraml, conn=None):
# if paraml[1] == conn.nick:
# if paraml[0].lower() in conn.channels:
# conn.join(paraml[0])
#join channels when invited
@hook.event('INVITE')
def invite(paraml, conn=None):
conn.join(paraml[-1])
@hook.event('004')
def onjoin(paraml, conn=None, bot=None):
# identify to services
nickserv_password = conn.conf.get('nickserv_password', '')
nickserv_name = conn.conf.get('nickserv_name', 'nickserv')
nickserv_command = conn.conf.get('nickserv_command', 'IDENTIFY %s')
if nickserv_password:
if nickserv_password in bot.config['censored_strings']:
bot.config['censored_strings'].remove(nickserv_password)
conn.msg(nickserv_name, nickserv_command % nickserv_password)
bot.config['censored_strings'].append(nickserv_password)
time.sleep(1)
# set mode on self
mode = conn.conf.get('mode')
if mode:
conn.cmd('MODE', [conn.nick, mode])
# join channels
for channel in conn.channels:
conn.join(channel)
time.sleep(1) # don't flood JOINs
# set user-agent
http.ua_skybot = 'CloudBot'
@hook.regex(r'^\x01VERSION\x01$')
def version(inp, notice=None):
notice('\x01VERSION CloudBot')
http.ua_skybot = 'CloudBot'

183
plugins/mtg.py Normal file
View file

@ -0,0 +1,183 @@
import re
from util import hook, http
@hook.command
def mtg(inp):
".mtg <name> -- gets information about Magic the Gathering card <name>"
url = 'http://magiccards.info/query?v=card&s=cname'
h = http.get_html(url, q=inp)
name = h.find('body/table/tr/td/span/a')
if name is None:
return "no cards found"
card = name.getparent().getparent().getparent()
type = card.find('td/p').text.replace('\n', '')
# this is ugly
text = http.html.tostring(card.xpath("//p[@class='ctext']/b")[0])
text = text.replace('<br>', '$')
text = http.html.fromstring(text).text_content()
text = re.sub(r'(\w+\s*)\$+(\s*\w+)', r'\1. \2', text)
text = text.replace('$', ' ')
text = re.sub(r'\(.*?\)', '', text) # strip parenthetical explanations
text = re.sub(r'\.(\S)', r'. \1', text) # fix spacing
printings = card.find('td/small').text_content()
printings = re.search(r'Editions:(.*)Languages:', printings).group(1)
printings = re.findall(r'\s*(.+?(?: \([^)]+\))*) \((.*?)\)',
' '.join(printings.split()))
printing_out = ', '.join('%s (%s)' % (set_abbrevs.get(x[0], x[0]),
rarity_abbrevs.get(x[1], x[1]))
for x in printings)
name.make_links_absolute(base_url=url)
link = name.attrib['href']
name = name.text_content().strip()
type = type.strip()
text = ' '.join(text.split())
return ' | '.join((name, type, text, printing_out, link))
set_abbrevs = {
'15th Anniversary': '15ANN',
'APAC Junior Series': 'AJS',
'Alara Reborn': 'ARB',
'Alliances': 'AI',
'Anthologies': 'AT',
'Antiquities': 'AQ',
'Apocalypse': 'AP',
'Arabian Nights': 'AN',
'Arena League': 'ARENA',
'Asia Pacific Land Program': 'APAC',
'Battle Royale': 'BR',
'Battle Royale Box Set': 'BRB',
'Beatdown': 'BTD',
'Beatdown Box Set': 'BTD',
'Betrayers of Kamigawa': 'BOK',
'Celebration Cards': 'UQC',
'Champions of Kamigawa': 'CHK',
'Champs': 'CP',
'Chronicles': 'CH',
'Classic Sixth Edition': '6E',
'Coldsnap': 'CS',
'Coldsnap Theme Decks': 'CSTD',
'Conflux': 'CFX',
'Core Set - Eighth Edition': '8E',
'Core Set - Ninth Edition': '9E',
'Darksteel': 'DS',
'Deckmasters': 'DM',
'Dissension': 'DI',
'Dragon Con': 'DRC',
'Duel Decks: Divine vs. Demonic': 'DVD',
'Duel Decks: Elves vs. Goblins': 'EVG',
'Duel Decks: Garruk vs. Liliana': 'GVL',
'Duel Decks: Jace vs. Chandra': 'JVC',
'Eighth Edition': '8ED',
'Eighth Edition Box Set': '8EB',
'European Land Program': 'EURO',
'Eventide': 'EVE',
'Exodus': 'EX',
'Fallen Empires': 'FE',
'Fifth Dawn': '5DN',
'Fifth Edition': '5E',
'Fourth Edition': '4E',
'Friday Night Magic': 'FNMP',
'From the Vault: Dragons': 'FVD',
'From the Vault: Exiled': 'FVE',
'Future Sight': 'FUT',
'Gateway': 'GRC',
'Grand Prix': 'GPX',
'Guildpact': 'GP',
'Guru': 'GURU',
'Happy Holidays': 'HHO',
'Homelands': 'HL',
'Ice Age': 'IA',
'Introductory Two-Player Set': 'ITP',
'Invasion': 'IN',
'Judge Gift Program': 'JR',
'Judgment': 'JU',
'Junior Series': 'JSR',
'Legend Membership': 'DCILM',
'Legends': 'LG',
'Legions': 'LE',
'Limited Edition (Alpha)': 'LEA',
'Limited Edition (Beta)': 'LEB',
'Limited Edition Alpha': 'LEA',
'Limited Edition Beta': 'LEB',
'Lorwyn': 'LW',
'MTGO Masters Edition': 'MED',
'MTGO Masters Edition II': 'ME2',
'MTGO Masters Edition III': 'ME3',
'Magic 2010': 'M10',
'Magic Game Day Cards': 'MGDC',
'Magic Player Rewards': 'MPRP',
'Magic Scholarship Series': 'MSS',
'Magic: The Gathering Launch Parties': 'MLP',
'Media Inserts': 'MBP',
'Mercadian Masques': 'MM',
'Mirage': 'MR',
'Mirrodin': 'MI',
'Morningtide': 'MT',
'Multiverse Gift Box Cards': 'MGBC',
'Nemesis': 'NE',
'Ninth Edition Box Set': '9EB',
'Odyssey': 'OD',
'Onslaught': 'ON',
'Planar Chaos': 'PC',
'Planechase': 'PCH',
'Planeshift': 'PS',
'Portal': 'PO',
'Portal Demogame': 'POT',
'Portal Second Age': 'PO2',
'Portal Three Kingdoms': 'P3K',
'Premium Deck Series: Slivers': 'PDS',
'Prerelease Events': 'PTC',
'Pro Tour': 'PRO',
'Prophecy': 'PR',
'Ravnica: City of Guilds': 'RAV',
'Release Events': 'REP',
'Revised Edition': 'RV',
'Saviors of Kamigawa': 'SOK',
'Scourge': 'SC',
'Seventh Edition': '7E',
'Shadowmoor': 'SHM',
'Shards of Alara': 'ALA',
'Starter': 'ST',
'Starter 1999': 'S99',
'Starter 2000 Box Set': 'ST2K',
'Stronghold': 'SH',
'Summer of Magic': 'SOM',
'Super Series': 'SUS',
'Tempest': 'TP',
'Tenth Edition': '10E',
'The Dark': 'DK',
'Time Spiral': 'TS',
'Time Spiral Timeshifted': 'TSTS',
'Torment': 'TR',
'Two-Headed Giant Tournament': 'THGT',
'Unglued': 'UG',
'Unhinged': 'UH',
'Unhinged Alternate Foils': 'UHAA',
'Unlimited Edition': 'UN',
"Urza's Destiny": 'UD',
"Urza's Legacy": 'UL',
"Urza's Saga": 'US',
'Visions': 'VI',
'Weatherlight': 'WL',
'Worlds': 'WRL',
'WotC Online Store': 'WOTC',
'Zendikar': 'ZEN'}
rarity_abbrevs = {
'Land': 'L',
'Common': 'C',
'Uncommon': 'UC',
'Rare': 'R',
'Special': 'S',
'Mythic Rare': 'MR'}

73
plugins/munge.py Normal file
View file

@ -0,0 +1,73 @@
# -*- coding: utf-8 -*-
from util import hook
@hook.command
def munge(inp, munge_count=0):
".munge <text> -- munges up the given text"
reps = 0
for n in xrange(len(inp)):
rep = character_replacements.get(inp[n])
if rep:
inp = inp[:n] + rep.decode('utf8') + inp[n + 1:]
reps += 1
if reps == munge_count:
break
return inp
character_replacements = {
'a': 'ä',
# 'b': 'Б',
'c': 'ċ',
'd': 'đ',
'e': 'ë',
'f': 'ƒ',
'g': 'ġ',
'h': 'ħ',
'i': 'í',
'j': 'ĵ',
'k': 'ķ',
'l': 'ĺ',
# 'm': 'ṁ',
'n': 'ñ',
'o': 'ö',
'p': 'ρ',
# 'q': 'ʠ',
'r': 'ŗ',
's': 'š',
't': 'ţ',
'u': 'ü',
# 'v': '',
'w': 'ω',
'x': 'χ',
'y': 'ÿ',
'z': 'ź',
'A': 'Å',
'B': 'Β',
'C': 'Ç',
'D': 'Ď',
'E': 'Ē',
# 'F': 'Ḟ',
'G': 'Ġ',
'H': 'Ħ',
'I': 'Í',
'J': 'Ĵ',
'K': 'Ķ',
'L': 'Ĺ',
'M': 'Μ',
'N': 'Ν',
'O': 'Ö',
'P': 'Р',
# 'Q': '',
'R': 'Ŗ',
'S': 'Š',
'T': 'Ţ',
'U': 'Ů',
# 'V': 'Ṿ',
'W': 'Ŵ',
'X': 'Χ',
'Y': '',
'Z': 'Ż'}

58
plugins/password.py Normal file
View file

@ -0,0 +1,58 @@
# Password generation code by <TheNoodle>
from util import hook
import string
import random
def gen_password(types):
#Password Generator - The Noodle http://bowlofnoodles.net
okay = []
#find the length needed for the password
numb = types.split(" ")
for x in numb[0]:
#if any errors are found defualt to 10
if x not in ['0', '1', '2', '3', '4', '5', '6', '7', '8', '9']:
numb[0] = 10
length = int(numb[0])
needs_def = 0
#alpha characters
if "alpha" in types or "letter" in types:
for x in string.ascii_lowercase:
okay.append(x)
#adds capital characters if not told not to
if "no caps" not in types:
for x in string.ascii_uppercase:
okay.append(x)
else:
needs_def = 1
#adds numbers
if "numeric" in types or "numbers" in types:
for x in range(0,10):
okay.append(str(x))
else:
needs_def = 1
#adds symbols
if "symbols" in types:
sym = ['!', '@', '#', '$', '%', '^', '&', '*', '(', ')', '-', '=', '_', '+', '[', ']', '{', '}', '\\', '|', ';', ':', "'", '.', '>', ',', '<', '/', '?', '`', '~','"']
for x in sym:
okay.append(x)
else:
needs_def = 1
#defualts to lowercase alpha password if no arguments are found
if needs_def == 1:
for x in string.ascii_lowercase:
okay.append(x)
password = ""
#generates password
for x in range(length):
password = password + random.choice(okay)
return password
@hook.command
def password(inp, notice=None):
".password <length> [types] -- generates a password. types can include 'alpha', 'no caps', 'numeric', 'symbols' or any combination of the types, eg. 'numbers symbols'"
if inp == "penis":
return "Unable to process request, input too short"
notice(gen_password(inp))

26
plugins/potato.py Normal file
View file

@ -0,0 +1,26 @@
from util import hook
import re
import random
potatoeslist = ['AC Belmont', 'AC Blue Pride', 'AC Brador', 'AC Chaleur', 'AC Domino', 'AC Dubuc', 'AC Glacier Chip', 'AC Maple Gold', 'AC Novachip', 'AC Peregrine Red', 'AC Ptarmigan', 'AC Red Island', 'AC Saguenor', 'AC Stampede Russet', 'AC Sunbury', 'Abeille', 'Abnaki', 'Acadia', 'Acadia Russet', 'Accent', 'Adirondack Blue', 'Adirondack Red', 'Adora', 'Agria', 'All Blue', 'All Red', 'Alpha', 'Alta Russet', 'Alturas Russet', 'Amandine', 'Amisk', 'Andover', 'Anoka', 'Anson', 'Aquilon', 'Arran Consul', 'Asterix', 'Atlantic', 'Austrian Crescent', 'Avalanche', 'Banana', 'Bannock Russet', 'Batoche', 'BeRus', 'Belle De Fonteney', 'Belleisle', 'Bintje', 'Blossom', 'Blue Christie', 'Blue Mac', 'Brigus', 'Brise du Nord', 'Butte', 'Butterfinger', 'Caesar', 'CalWhite', 'CalRed', 'Caribe', 'Carlingford', 'Carlton', 'Carola', 'Cascade', 'Castile', 'Centennial Russet', 'Century Russet', 'Charlotte', 'Cherie', 'Cherokee', 'Cherry Red', 'Chieftain', 'Chipeta', 'Coastal Russet', 'Colorado Rose', 'Concurrent', 'Conestoga', 'Cowhorn', 'Crestone Russet', 'Crispin', 'Cupids', 'Daisy Gold', 'Dakota Pearl', 'Defender', 'Delikat', 'Denali', 'Desiree', 'Divina', 'Dundrod', 'Durango Red', 'Early Rose', 'Elba', 'Envol', 'Epicure', 'Eramosa', 'Estima', 'Eva', 'Fabula', 'Fambo', 'Fremont Russet', 'French Fingerling', 'Frontier Russet', 'Fundy', 'Garnet Chile', 'Gem Russet', 'GemStar Russet', 'Gemchip', 'German Butterball', 'Gigant', 'Goldrush', 'Granola', 'Green Mountain', 'Haida', 'Hertha', 'Hilite Russet', 'Huckleberry', 'Hunter', 'Huron', 'IdaRose', 'Innovator', 'Irish Cobbler', 'Island Sunshine', 'Ivory Crisp', 'Jacqueline Lee', 'Jemseg', 'Kanona', 'Katahdin', 'Kennebec', "Kerr's Pink", 'Keswick', 'Keuka Gold', 'Keystone Russet', 'King Edward VII', 'Kipfel', 'Klamath Russet', 'Krantz', 'LaRatte', 'Lady Rosetta', 'Latona', 'Lemhi Russet', 'Liberator', 'Lili', 'MaineChip', 'Marfona', 'Maris Bard', 'Maris Piper', 'Matilda', 'Mazama', 'McIntyre', 'Michigan Purple', 'Millenium Russet', 'Mirton Pearl', 'Modoc', 'Mondial', 'Monona', 'Morene', 'Morning Gold', 'Mouraska', 'Navan', 'Nicola', 'Nipigon', 'Niska', 'Nooksack', 'NorValley', 'Norchip', 'Nordonna', 'Norgold Russet', 'Norking Russet', 'Norland', 'Norwis', 'Obelix', 'Ozette', 'Peanut', 'Penta', 'Peribonka', 'Peruvian Purple', 'Pike', 'Pink Pearl', 'Prospect', 'Pungo', 'Purple Majesty', 'Purple Viking', 'Ranger Russet', 'Reba', 'Red Cloud', 'Red Gold', 'Red La Soda', 'Red Pontiac', 'Red Ruby', 'Red Thumb', 'Redsen', 'Rocket', 'Rose Finn Apple', 'Rose Gold', 'Roselys', 'Rote Erstling', 'Ruby Crescent', 'Russet Burbank', 'Russet Legend', 'Russet Norkotah', 'Russet Nugget', 'Russian Banana', 'Saginaw Gold', 'Sangre', 'Santé', 'Satina', 'Saxon', 'Sebago', 'Shepody', 'Sierra', 'Silverton Russet', 'Simcoe', 'Snowden', 'Spunta', "St. John's", 'Summit Russet', 'Sunrise', 'Superior', 'Symfonia', 'Tolaas', 'Trent', 'True Blue', 'Ulla', 'Umatilla Russet', 'Valisa', 'Van Gogh', 'Viking', 'Wallowa Russet', 'Warba', 'Western Russet', 'White Rose', 'Willamette', 'Winema', 'Yellow Finn', 'Yukon Gold']
@hook.command
def potato(inp, me = None, input=None):
".potato <user> - makes the user a tasty little potato"
inp = inp.strip()
if not re.match("^[A-Za-z0-9_|.-\]\[]*$", inp.lower()):
return "I cant make a tasty potato for that user!"
potatoa = random.choice(potatoeslist)
size = random.choice(['small', 'little', 'mid-sized', 'medium-sized', 'large', 'gigantic'])
flavor = random.choice(['tasty', 'delectable', 'delicious', 'yummy', 'toothsome', 'scrumptious', 'luscious'])
cook = random.choice(['bakes', 'fries', 'boils', 'microwaves'])
me(cook + " a " + flavor + " " + size + " " + potatoa + " potato for " + inp + "!")
@hook.command(autohelp=False)
def potatoes(inp, me = None, input=None):
potatoa = random.choice(potatoeslist)
count = str(len(potatoeslist))
me("contemplates eating a "+potatoa+" potato, one of the "+count+" types of potatoes that I know about.")

View file

View file

@ -0,0 +1,98 @@
#-----------------------------------------------------------------
# pycparser: cdecl.py
#
# Example of the CDECL tool using pycparser. CDECL "explains"
# C type declarations in plain English.
#
# The AST generated by pycparser from the given declaration is
# traversed recursively to build the explanation.
# Note that the declaration must be a valid external declaration
# in C. All the types used in it must be defined with typedef,
# or parsing will fail. The definition can be arbitrary, it isn't
# really used - by pycparser must know which tokens are types.
#
# For example:
#
# 'typedef int Node; const Node* (*ar)[10];'
# =>
# ar is a pointer to array[10] of pointer to const Node
#
# Copyright (C) 2008, Eli Bendersky
# License: LGPL
#-----------------------------------------------------------------
import sys
from pycparser import c_parser, c_ast
def explain_c_declaration(c_decl):
""" Parses the declaration in c_decl and returns a text
explanation as a string.
The last external node of the string is used, to allow
earlier typedefs for used types.
"""
parser = c_parser.CParser()
node = parser.parse(c_decl, filename='<stdin>')
if ( not isinstance(node, c_ast.FileAST) or
not isinstance(node.ext[-1], c_ast.Decl)):
return "Last external node is invalid type"
return _explain_decl_node(node.ext[-1])
def _explain_decl_node(decl_node):
""" Receives a c_ast.Decl note and returns its explanation in
English.
"""
#~ print decl_node.show()
storage = ' '.join(decl_node.storage) + ' ' if decl_node.storage else ''
return (decl_node.name +
" is a " +
storage +
_explain_type(decl_node.type))
def _explain_type(decl):
""" Recursively explains a type decl node
"""
typ = type(decl)
if typ == c_ast.TypeDecl:
quals = ' '.join(decl.quals) + ' ' if decl.quals else ''
return quals + _explain_type(decl.type)
elif typ == c_ast.Typename or typ == c_ast.Decl:
return _explain_type(decl.type)
elif typ == c_ast.IdentifierType:
return ' '.join(decl.names)
elif typ == c_ast.PtrDecl:
quals = ' '.join(decl.quals) + ' ' if decl.quals else ''
return quals + 'pointer to ' + _explain_type(decl.type)
elif typ == c_ast.ArrayDecl:
arr = 'array'
if decl.dim: arr += '[%s]' % decl.dim.value
return arr + " of " + _explain_type(decl.type)
elif typ == c_ast.FuncDecl:
if decl.args:
params = [_explain_type(param) for param in decl.args.params]
args = ', '.join(params)
else:
args = ''
return ('function(%s) returning ' % (args) +
_explain_type(decl.type))
if __name__ == "__main__":
if len(sys.argv) > 1:
c_decl = sys.argv[1]
else:
c_decl = "char *(*(**foo[][8])())[];"
print "Explaining the declaration:", c_decl
print "\n", explain_c_declaration(c_decl)

View file

@ -0,0 +1,9 @@
# pycparser.lextab.py. This file automatically created by PLY (version 3.3). Don't edit!
_tabversion = '3.3'
_lextokens = {'VOID': 1, 'LBRACKET': 1, 'WCHAR_CONST': 1, 'FLOAT_CONST': 1, 'MINUS': 1, 'RPAREN': 1, 'LONG': 1, 'PLUS': 1, 'ELLIPSIS': 1, 'GT': 1, 'GOTO': 1, 'ENUM': 1, 'PERIOD': 1, 'GE': 1, 'INT_CONST_DEC': 1, 'ARROW': 1, 'DOUBLE': 1, 'MINUSEQUAL': 1, 'INT_CONST_OCT': 1, 'TIMESEQUAL': 1, 'OR': 1, 'SHORT': 1, 'RETURN': 1, 'RSHIFTEQUAL': 1, 'STATIC': 1, 'SIZEOF': 1, 'UNSIGNED': 1, 'UNION': 1, 'COLON': 1, 'WSTRING_LITERAL': 1, 'DIVIDE': 1, 'FOR': 1, 'PLUSPLUS': 1, 'EQUALS': 1, 'ELSE': 1, 'EQ': 1, 'AND': 1, 'TYPEID': 1, 'LBRACE': 1, 'PPHASH': 1, 'INT': 1, 'SIGNED': 1, 'CONTINUE': 1, 'NOT': 1, 'OREQUAL': 1, 'MOD': 1, 'RSHIFT': 1, 'DEFAULT': 1, 'CHAR': 1, 'WHILE': 1, 'DIVEQUAL': 1, 'EXTERN': 1, 'CASE': 1, 'LAND': 1, 'REGISTER': 1, 'MODEQUAL': 1, 'NE': 1, 'SWITCH': 1, 'INT_CONST_HEX': 1, 'PLUSEQUAL': 1, 'STRUCT': 1, 'CONDOP': 1, 'BREAK': 1, 'VOLATILE': 1, 'ANDEQUAL': 1, 'DO': 1, 'LNOT': 1, 'CONST': 1, 'LOR': 1, 'CHAR_CONST': 1, 'LSHIFT': 1, 'RBRACE': 1, 'LE': 1, 'SEMI': 1, 'LT': 1, 'COMMA': 1, 'TYPEDEF': 1, 'XOR': 1, 'AUTO': 1, 'TIMES': 1, 'LPAREN': 1, 'MINUSMINUS': 1, 'ID': 1, 'IF': 1, 'STRING_LITERAL': 1, 'FLOAT': 1, 'XOREQUAL': 1, 'LSHIFTEQUAL': 1, 'RBRACKET': 1}
_lexreflags = 0
_lexliterals = ''
_lexstateinfo = {'ppline': 'exclusive', 'INITIAL': 'inclusive'}
_lexstatere = {'ppline': [('(?P<t_ppline_FILENAME>"([^"\\\\\\n]|(\\\\(([a-zA-Z\\\\?\'"])|([0-7]{1,3})|(x[0-9a-fA-F]+))))*")|(?P<t_ppline_LINE_NUMBER>(0(([uU][lL])|([lL][uU])|[uU]|[lL])?)|([1-9][0-9]*(([uU][lL])|([lL][uU])|[uU]|[lL])?))|(?P<t_ppline_NEWLINE>\\n)|(?P<t_ppline_PPLINE>line)', [None, ('t_ppline_FILENAME', 'FILENAME'), None, None, None, None, None, None, ('t_ppline_LINE_NUMBER', 'LINE_NUMBER'), None, None, None, None, None, None, None, None, ('t_ppline_NEWLINE', 'NEWLINE'), ('t_ppline_PPLINE', 'PPLINE')])], 'INITIAL': [('(?P<t_PPHASH>[ \\t]*\\#)|(?P<t_NEWLINE>\\n+)|(?P<t_FLOAT_CONST>((((([0-9]*\\.[0-9]+)|([0-9]+\\.))([eE][-+]?[0-9]+)?)|([0-9]+([eE][-+]?[0-9]+)))[FfLl]?))|(?P<t_INT_CONST_HEX>0[xX][0-9a-fA-F]+(([uU][lL])|([lL][uU])|[uU]|[lL])?)|(?P<t_BAD_CONST_OCT>0[0-7]*[89])|(?P<t_INT_CONST_OCT>0[0-7]*(([uU][lL])|([lL][uU])|[uU]|[lL])?)|(?P<t_INT_CONST_DEC>(0(([uU][lL])|([lL][uU])|[uU]|[lL])?)|([1-9][0-9]*(([uU][lL])|([lL][uU])|[uU]|[lL])?))|(?P<t_CHAR_CONST>\'([^\'\\\\\\n]|(\\\\(([a-zA-Z\\\\?\'"])|([0-7]{1,3})|(x[0-9a-fA-F]+))))\')|(?P<t_WCHAR_CONST>L\'([^\'\\\\\\n]|(\\\\(([a-zA-Z\\\\?\'"])|([0-7]{1,3})|(x[0-9a-fA-F]+))))\')|(?P<t_UNMATCHED_QUOTE>(\'([^\'\\\\\\n]|(\\\\(([a-zA-Z\\\\?\'"])|([0-7]{1,3})|(x[0-9a-fA-F]+))))*\\n)|(\'([^\'\\\\\\n]|(\\\\(([a-zA-Z\\\\?\'"])|([0-7]{1,3})|(x[0-9a-fA-F]+))))*$))|(?P<t_BAD_CHAR_CONST>(\'([^\'\\\\\\n]|(\\\\(([a-zA-Z\\\\?\'"])|([0-7]{1,3})|(x[0-9a-fA-F]+))))[^\'\n]+\')|(\'\')|(\'([\\\\][^a-zA-Z\\\\?\'"x0-7])[^\'\\n]*\'))|(?P<t_WSTRING_LITERAL>L"([^"\\\\\\n]|(\\\\(([a-zA-Z\\\\?\'"])|([0-7]{1,3})|(x[0-9a-fA-F]+))))*")|(?P<t_BAD_STRING_LITERAL>"([^"\\\\\\n]|(\\\\(([a-zA-Z\\\\?\'"])|([0-7]{1,3})|(x[0-9a-fA-F]+))))*([\\\\][^a-zA-Z\\\\?\'"x0-7])([^"\\\\\\n]|(\\\\(([a-zA-Z\\\\?\'"])|([0-7]{1,3})|(x[0-9a-fA-F]+))))*")|(?P<t_ID>[a-zA-Z_][0-9a-zA-Z_]*)|(?P<t_STRING_LITERAL>"([^"\\\\\\n]|(\\\\(([a-zA-Z\\\\?\'"])|([0-7]{1,3})|(x[0-9a-fA-F]+))))*")', [None, ('t_PPHASH', 'PPHASH'), ('t_NEWLINE', 'NEWLINE'), ('t_FLOAT_CONST', 'FLOAT_CONST'), None, None, None, None, None, None, None, None, None, ('t_INT_CONST_HEX', 'INT_CONST_HEX'), None, None, None, ('t_BAD_CONST_OCT', 'BAD_CONST_OCT'), ('t_INT_CONST_OCT', 'INT_CONST_OCT'), None, None, None, ('t_INT_CONST_DEC', 'INT_CONST_DEC'), None, None, None, None, None, None, None, None, ('t_CHAR_CONST', 'CHAR_CONST'), None, None, None, None, None, None, ('t_WCHAR_CONST', 'WCHAR_CONST'), None, None, None, None, None, None, ('t_UNMATCHED_QUOTE', 'UNMATCHED_QUOTE'), None, None, None, None, None, None, None, None, None, None, None, None, None, None, ('t_BAD_CHAR_CONST', 'BAD_CHAR_CONST'), None, None, None, None, None, None, None, None, None, None, ('t_WSTRING_LITERAL', 'WSTRING_LITERAL'), None, None, None, None, None, None, ('t_BAD_STRING_LITERAL', 'BAD_STRING_LITERAL'), None, None, None, None, None, None, None, None, None, None, None, None, None, ('t_ID', 'ID'), (None, 'STRING_LITERAL')]), ('(?P<t_ELLIPSIS>\\.\\.\\.)|(?P<t_PLUSPLUS>\\+\\+)|(?P<t_LOR>\\|\\|)|(?P<t_OREQUAL>\\|=)|(?P<t_LSHIFTEQUAL><<=)|(?P<t_RSHIFTEQUAL>>>=)|(?P<t_TIMESEQUAL>\\*=)|(?P<t_PLUSEQUAL>\\+=)|(?P<t_XOREQUAL>^=)|(?P<t_PLUS>\\+)|(?P<t_MODEQUAL>%=)|(?P<t_LBRACE>\\{)|(?P<t_DIVEQUAL>/=)|(?P<t_RBRACKET>\\])|(?P<t_CONDOP>\\?)', [None, (None, 'ELLIPSIS'), (None, 'PLUSPLUS'), (None, 'LOR'), (None, 'OREQUAL'), (None, 'LSHIFTEQUAL'), (None, 'RSHIFTEQUAL'), (None, 'TIMESEQUAL'), (None, 'PLUSEQUAL'), (None, 'XOREQUAL'), (None, 'PLUS'), (None, 'MODEQUAL'), (None, 'LBRACE'), (None, 'DIVEQUAL'), (None, 'RBRACKET'), (None, 'CONDOP')]), ('(?P<t_XOR>\\^)|(?P<t_LSHIFT><<)|(?P<t_LE><=)|(?P<t_LPAREN>\\()|(?P<t_ARROW>->)|(?P<t_EQ>==)|(?P<t_RBRACE>\\})|(?P<t_NE>!=)|(?P<t_MINUSMINUS>--)|(?P<t_OR>\\|)|(?P<t_TIMES>\\*)|(?P<t_LBRACKET>\\[)|(?P<t_GE>>=)|(?P<t_RPAREN>\\))|(?P<t_LAND>&&)|(?P<t_RSHIFT>>>)|(?P<t_ANDEQUAL>&=)|(?P<t_MINUSEQUAL>-=)|(?P<t_PERIOD>\\.)|(?P<t_EQUALS>=)|(?P<t_LT><)|(?P<t_COMMA>,)|(?P<t_DIVIDE>/)|(?P<t_AND>&)|(?P<t_MOD>%)|(?P<t_SEMI>;)|(?P<t_MINUS>-)|(?P<t_GT>>)|(?P<t_COLON>:)|(?P<t_NOT>~)|(?P<t_LNOT>!)', [None, (None, 'XOR'), (None, 'LSHIFT'), (None, 'LE'), (None, 'LPAREN'), (None, 'ARROW'), (None, 'EQ'), (None, 'RBRACE'), (None, 'NE'), (None, 'MINUSMINUS'), (None, 'OR'), (None, 'TIMES'), (None, 'LBRACKET'), (None, 'GE'), (None, 'RPAREN'), (None, 'LAND'), (None, 'RSHIFT'), (None, 'ANDEQUAL'), (None, 'MINUSEQUAL'), (None, 'PERIOD'), (None, 'EQUALS'), (None, 'LT'), (None, 'COMMA'), (None, 'DIVIDE'), (None, 'AND'), (None, 'MOD'), (None, 'SEMI'), (None, 'MINUS'), (None, 'GT'), (None, 'COLON'), (None, 'NOT'), (None, 'LNOT')])]}
_lexstateignore = {'ppline': ' \t', 'INITIAL': ' \t'}
_lexstateerrorf = {'ppline': 't_ppline_error', 'INITIAL': 't_error'}

View file

@ -0,0 +1,75 @@
#-----------------------------------------------------------------
# pycparser: __init__.py
#
# This package file exports some convenience functions for
# interacting with pycparser
#
# Copyright (C) 2008-2009, Eli Bendersky
# License: LGPL
#-----------------------------------------------------------------
__all__ = ['c_lexer', 'c_parser', 'c_ast']
__version__ = '1.05'
from subprocess import Popen, PIPE
from types import ListType
from c_parser import CParser
def parse_file( filename, use_cpp=False,
cpp_path='cpp', cpp_args=''):
""" Parse a C file using pycparser.
filename:
Name of the file you want to parse.
use_cpp:
Set to True if you want to execute the C pre-processor
on the file prior to parsing it.
cpp_path:
If use_cpp is True, this is the path to 'cpp' on your
system. If no path is provided, it attempts to just
execute 'cpp', so it must be in your PATH.
cpp_args:
If use_cpp is True, set this to the command line
arguments strings to cpp. Be careful with quotes -
it's best to pass a raw string (r'') here.
For example:
r'-I../utils/fake_libc_include'
If several arguments are required, pass a list of
strings.
When successful, an AST is returned. ParseError can be
thrown if the file doesn't parse successfully.
Errors from cpp will be printed out.
"""
if use_cpp:
path_list = [cpp_path]
if isinstance(cpp_args, ListType):
path_list += cpp_args
elif cpp_args != '':
path_list += [cpp_args]
path_list += [filename]
# Note the use of universal_newlines to treat all newlines
# as \n for Python's purpose
#
pipe = Popen( path_list,
stdout=PIPE,
universal_newlines=True)
text = pipe.communicate()[0]
else:
text = open(filename).read()
parser = CParser()
return parser.parse(text, filename)
if __name__ == "__main__":
pass

File diff suppressed because it is too large Load diff

View file

@ -0,0 +1,443 @@
#-----------------------------------------------------------------
# pycparser: clex.py
#
# CLexer class: lexer for the C language
#
# Copyright (C) 2008, Eli Bendersky
# License: LGPL
#-----------------------------------------------------------------
import re
import sys
import ply.lex
from ply.lex import TOKEN
class CLexer(object):
""" A lexer for the C language. After building it, set the
input text with input(), and call token() to get new
tokens.
The public attribute filename can be set to an initial
filaneme, but the lexer will update it upon #line
directives.
"""
def __init__(self, error_func, type_lookup_func):
""" Create a new Lexer.
error_func:
An error function. Will be called with an error
message, line and column as arguments, in case of
an error during lexing.
type_lookup_func:
A type lookup function. Given a string, it must
return True IFF this string is a name of a type
that was defined with a typedef earlier.
"""
self.error_func = error_func
self.type_lookup_func = type_lookup_func
self.filename = ''
# Allow either "# line" or "# <num>" to support GCC's
# cpp output
#
self.line_pattern = re.compile('([ \t]*line\W)|([ \t]*\d+)')
def build(self, **kwargs):
""" Builds the lexer from the specification. Must be
called after the lexer object is created.
This method exists separately, because the PLY
manual warns against calling lex.lex inside
__init__
"""
self.lexer = ply.lex.lex(object=self, **kwargs)
def reset_lineno(self):
""" Resets the internal line number counter of the lexer.
"""
self.lexer.lineno = 1
def input(self, text):
self.lexer.input(text)
def token(self):
g = self.lexer.token()
return g
######################-- PRIVATE --######################
##
## Internal auxiliary methods
##
def _error(self, msg, token):
location = self._make_tok_location(token)
self.error_func(msg, location[0], location[1])
self.lexer.skip(1)
def _find_tok_column(self, token):
i = token.lexpos
while i > 0:
if self.lexer.lexdata[i] == '\n': break
i -= 1
return (token.lexpos - i) + 1
def _make_tok_location(self, token):
return (token.lineno, self._find_tok_column(token))
##
## Reserved keywords
##
keywords = (
'AUTO', 'BREAK', 'CASE', 'CHAR', 'CONST', 'CONTINUE',
'DEFAULT', 'DO', 'DOUBLE', 'ELSE', 'ENUM', 'EXTERN',
'FLOAT', 'FOR', 'GOTO', 'IF', 'INT', 'LONG', 'REGISTER',
'RETURN', 'SHORT', 'SIGNED', 'SIZEOF', 'STATIC', 'STRUCT',
'SWITCH', 'TYPEDEF', 'UNION', 'UNSIGNED', 'VOID',
'VOLATILE', 'WHILE',
)
keyword_map = {}
for r in keywords:
keyword_map[r.lower()] = r
##
## All the tokens recognized by the lexer
##
tokens = keywords + (
# Identifiers
'ID',
# Type identifiers (identifiers previously defined as
# types with typedef)
'TYPEID',
# constants
'INT_CONST_DEC', 'INT_CONST_OCT', 'INT_CONST_HEX',
'FLOAT_CONST',
'CHAR_CONST',
'WCHAR_CONST',
# String literals
'STRING_LITERAL',
'WSTRING_LITERAL',
# Operators
'PLUS', 'MINUS', 'TIMES', 'DIVIDE', 'MOD',
'OR', 'AND', 'NOT', 'XOR', 'LSHIFT', 'RSHIFT',
'LOR', 'LAND', 'LNOT',
'LT', 'LE', 'GT', 'GE', 'EQ', 'NE',
# Assignment
'EQUALS', 'TIMESEQUAL', 'DIVEQUAL', 'MODEQUAL',
'PLUSEQUAL', 'MINUSEQUAL',
'LSHIFTEQUAL','RSHIFTEQUAL', 'ANDEQUAL', 'XOREQUAL',
'OREQUAL',
# Increment/decrement
'PLUSPLUS', 'MINUSMINUS',
# Structure dereference (->)
'ARROW',
# Conditional operator (?)
'CONDOP',
# Delimeters
'LPAREN', 'RPAREN', # ( )
'LBRACKET', 'RBRACKET', # [ ]
'LBRACE', 'RBRACE', # { }
'COMMA', 'PERIOD', # . ,
'SEMI', 'COLON', # ; :
# Ellipsis (...)
'ELLIPSIS',
# pre-processor
'PPHASH', # '#'
)
##
## Regexes for use in tokens
##
##
# valid C identifiers (K&R2: A.2.3)
identifier = r'[a-zA-Z_][0-9a-zA-Z_]*'
# integer constants (K&R2: A.2.5.1)
integer_suffix_opt = r'(([uU][lL])|([lL][uU])|[uU]|[lL])?'
decimal_constant = '(0'+integer_suffix_opt+')|([1-9][0-9]*'+integer_suffix_opt+')'
octal_constant = '0[0-7]*'+integer_suffix_opt
hex_constant = '0[xX][0-9a-fA-F]+'+integer_suffix_opt
bad_octal_constant = '0[0-7]*[89]'
# character constants (K&R2: A.2.5.2)
# Note: a-zA-Z are allowed as escape chars to support #line
# directives with Windows paths as filenames (\dir\file...)
#
simple_escape = r"""([a-zA-Z\\?'"])"""
octal_escape = r"""([0-7]{1,3})"""
hex_escape = r"""(x[0-9a-fA-F]+)"""
bad_escape = r"""([\\][^a-zA-Z\\?'"x0-7])"""
escape_sequence = r"""(\\("""+simple_escape+'|'+octal_escape+'|'+hex_escape+'))'
cconst_char = r"""([^'\\\n]|"""+escape_sequence+')'
char_const = "'"+cconst_char+"'"
wchar_const = 'L'+char_const
unmatched_quote = "('"+cconst_char+"*\\n)|('"+cconst_char+"*$)"
bad_char_const = r"""('"""+cconst_char+"""[^'\n]+')|('')|('"""+bad_escape+r"""[^'\n]*')"""
# string literals (K&R2: A.2.6)
string_char = r"""([^"\\\n]|"""+escape_sequence+')'
string_literal = '"'+string_char+'*"'
wstring_literal = 'L'+string_literal
bad_string_literal = '"'+string_char+'*'+bad_escape+string_char+'*"'
# floating constants (K&R2: A.2.5.3)
exponent_part = r"""([eE][-+]?[0-9]+)"""
fractional_constant = r"""([0-9]*\.[0-9]+)|([0-9]+\.)"""
floating_constant = '(((('+fractional_constant+')'+exponent_part+'?)|([0-9]+'+exponent_part+'))[FfLl]?)'
##
## Lexer states
##
states = (
# ppline: preprocessor line directives
#
('ppline', 'exclusive'),
)
def t_PPHASH(self, t):
r'[ \t]*\#'
m = self.line_pattern.match(
t.lexer.lexdata, pos=t.lexer.lexpos)
if m:
t.lexer.begin('ppline')
self.pp_line = self.pp_filename = None
#~ print "ppline starts on line %s" % t.lexer.lineno
else:
t.type = 'PPHASH'
return t
##
## Rules for the ppline state
##
@TOKEN(string_literal)
def t_ppline_FILENAME(self, t):
if self.pp_line is None:
self._error('filename before line number in #line', t)
else:
self.pp_filename = t.value.lstrip('"').rstrip('"')
#~ print "PP got filename: ", self.pp_filename
@TOKEN(decimal_constant)
def t_ppline_LINE_NUMBER(self, t):
if self.pp_line is None:
self.pp_line = t.value
else:
# Ignore: GCC's cpp sometimes inserts a numeric flag
# after the file name
pass
def t_ppline_NEWLINE(self, t):
r'\n'
if self.pp_line is None:
self._error('line number missing in #line', t)
else:
self.lexer.lineno = int(self.pp_line)
if self.pp_filename is not None:
self.filename = self.pp_filename
t.lexer.begin('INITIAL')
def t_ppline_PPLINE(self, t):
r'line'
pass
t_ppline_ignore = ' \t'
def t_ppline_error(self, t):
msg = 'invalid #line directive'
self._error(msg, t)
##
## Rules for the normal state
##
t_ignore = ' \t'
# Newlines
def t_NEWLINE(self, t):
r'\n+'
t.lexer.lineno += t.value.count("\n")
# Operators
t_PLUS = r'\+'
t_MINUS = r'-'
t_TIMES = r'\*'
t_DIVIDE = r'/'
t_MOD = r'%'
t_OR = r'\|'
t_AND = r'&'
t_NOT = r'~'
t_XOR = r'\^'
t_LSHIFT = r'<<'
t_RSHIFT = r'>>'
t_LOR = r'\|\|'
t_LAND = r'&&'
t_LNOT = r'!'
t_LT = r'<'
t_GT = r'>'
t_LE = r'<='
t_GE = r'>='
t_EQ = r'=='
t_NE = r'!='
# Assignment operators
t_EQUALS = r'='
t_TIMESEQUAL = r'\*='
t_DIVEQUAL = r'/='
t_MODEQUAL = r'%='
t_PLUSEQUAL = r'\+='
t_MINUSEQUAL = r'-='
t_LSHIFTEQUAL = r'<<='
t_RSHIFTEQUAL = r'>>='
t_ANDEQUAL = r'&='
t_OREQUAL = r'\|='
t_XOREQUAL = r'^='
# Increment/decrement
t_PLUSPLUS = r'\+\+'
t_MINUSMINUS = r'--'
# ->
t_ARROW = r'->'
# ?
t_CONDOP = r'\?'
# Delimeters
t_LPAREN = r'\('
t_RPAREN = r'\)'
t_LBRACKET = r'\['
t_RBRACKET = r'\]'
t_LBRACE = r'\{'
t_RBRACE = r'\}'
t_COMMA = r','
t_PERIOD = r'\.'
t_SEMI = r';'
t_COLON = r':'
t_ELLIPSIS = r'\.\.\.'
t_STRING_LITERAL = string_literal
# The following floating and integer constants are defined as
# functions to impose a strict order (otherwise, decimal
# is placed before the others because its regex is longer,
# and this is bad)
#
@TOKEN(floating_constant)
def t_FLOAT_CONST(self, t):
return t
@TOKEN(hex_constant)
def t_INT_CONST_HEX(self, t):
return t
@TOKEN(bad_octal_constant)
def t_BAD_CONST_OCT(self, t):
msg = "Invalid octal constant"
self._error(msg, t)
@TOKEN(octal_constant)
def t_INT_CONST_OCT(self, t):
return t
@TOKEN(decimal_constant)
def t_INT_CONST_DEC(self, t):
return t
# Must come before bad_char_const, to prevent it from
# catching valid char constants as invalid
#
@TOKEN(char_const)
def t_CHAR_CONST(self, t):
return t
@TOKEN(wchar_const)
def t_WCHAR_CONST(self, t):
return t
@TOKEN(unmatched_quote)
def t_UNMATCHED_QUOTE(self, t):
msg = "Unmatched '"
self._error(msg, t)
@TOKEN(bad_char_const)
def t_BAD_CHAR_CONST(self, t):
msg = "Invalid char constant %s" % t.value
self._error(msg, t)
@TOKEN(wstring_literal)
def t_WSTRING_LITERAL(self, t):
return t
# unmatched string literals are caught by the preprocessor
@TOKEN(bad_string_literal)
def t_BAD_STRING_LITERAL(self, t):
msg = "String contains invalid escape code"
self._error(msg, t)
@TOKEN(identifier)
def t_ID(self, t):
t.type = self.keyword_map.get(t.value, "ID")
if t.type == 'ID' and self.type_lookup_func(t.value):
t.type = "TYPEID"
return t
def t_error(self, t):
msg = 'Illegal character %s' % repr(t.value[0])
self._error(msg, t)
if __name__ == "__main__":
filename = '../zp.c'
text = open(filename).read()
#~ text = '"'+r"""ka \p ka"""+'"'
text = r"""
546
#line 66 "kwas\df.h"
id 4
# 5
dsf
"""
def errfoo(msg, a, b):
print msg
sys.exit()
def typelookup(namd):
return False
clex = CLexer(errfoo, typelookup)
clex.build()
clex.input(text)
while 1:
tok = clex.token()
if not tok: break
#~ print type(tok)
print "-", tok.value, tok.type, tok.lineno, clex.filename, tok.lexpos

File diff suppressed because it is too large Load diff

View file

@ -0,0 +1,4 @@
# PLY package
# Author: David Beazley (dave@dabeaz.com)
__all__ = ['lex','yacc']

File diff suppressed because it is too large Load diff

File diff suppressed because it is too large Load diff

View file

@ -0,0 +1,67 @@
#-----------------------------------------------------------------
# plyparser.py
#
# PLYParser class and other utilites for simplifying programming
# parsers with PLY
#
# Copyright (C) 2008-2009, Eli Bendersky
# License: LGPL
#-----------------------------------------------------------------
class Coord(object):
""" Coordinates of a syntactic element. Consists of:
- File name
- Line number
- (optional) column number, for the Lexer
"""
def __init__(self, file, line, column=None):
self.file = file
self.line = line
self.column = column
def __str__(self):
str = "%s:%s" % (self.file, self.line)
if self.column: str += ":%s" % self.column
return str
class ParseError(Exception): pass
class PLYParser(object):
def _create_opt_rule(self, rulename):
""" Given a rule name, creates an optional ply.yacc rule
for it. The name of the optional rule is
<rulename>_opt
"""
optname = rulename + '_opt'
def optrule(self, p):
p[0] = p[1]
optrule.__doc__ = '%s : empty\n| %s' % (optname, rulename)
optrule.__name__ = 'p_%s' % optname
setattr(self.__class__, optrule.__name__, optrule)
def _coord(self, lineno, column=None):
return Coord(
file=self.clex.filename,
line=lineno,
column=column)
def _parse_error(self, msg, coord):
raise ParseError("%s: %s" % (coord, msg))
if __name__ == '__main__':
pp = PLYParser()
pp._create_opt_rule('java')
ar = [4, 6]
pp.p_java_opt(ar)
print ar
print pp.p_java_opt.__doc__
print dir(pp)

File diff suppressed because one or more lines are too long

24
plugins/pyexec.py Normal file
View file

@ -0,0 +1,24 @@
import re
from util import hook, http
re_lineends = re.compile(r'[\r\n]*')
@hook.command
def python(inp):
".python <prog> -- executes python code <prog>"
inp = inp.replace("~n", "\n")
res = http.get("http://eval.appspot.com/eval", statement=inp).splitlines()
if len(res) == 0:
return
res[0] = re_lineends.split(res[0])[0]
if not res[0] == 'Traceback (most recent call last):':
return res[0].decode('utf8', 'ignore')
else:
return res[-1].decode('utf8', 'ignore')

98
plugins/quote.py Normal file
View file

@ -0,0 +1,98 @@
import random
import re
import time
from util import hook
def add_quote(db, chan, nick, add_nick, msg):
db.execute('''insert or fail into quote (chan, nick, add_nick,
msg, time) values(?,?,?,?,?)''',
(chan, nick, add_nick, msg, time.time()))
db.commit()
def del_quote(db, chan, nick, add_nick, msg):
db.execute('''update quote set deleted = 1 where
chan=? and lower(nick)=lower(?) and msg=msg''')
db.commit()
def get_quotes_by_nick(db, chan, nick):
return db.execute("select time, nick, msg from quote where deleted!=1 "
"and chan=? and lower(nick)=lower(?) order by time",
(chan, nick)).fetchall()
def get_quotes_by_chan(db, chan):
return db.execute("select time, nick, msg from quote where deleted!=1 "
"and chan=? order by time", (chan,)).fetchall()
def format_quote(q, num, n_quotes):
ctime, nick, msg = q
return "[%d/%d] %s <%s> %s" % (num, n_quotes,
time.strftime("%Y-%m-%d", time.gmtime(ctime)), nick, msg)
@hook.command('q')
@hook.command
def quote(inp, nick='', chan='', db=None):
".q/.quote [#chan] [nick] [#n]/.quote add <nick> <msg> -- gets " \
"random or [#n]th quote by <nick> or from <#chan>/adds quote"
db.execute("create table if not exists quote"
"(chan, nick, add_nick, msg, time real, deleted default 0, "
"primary key (chan, nick, msg))")
db.commit()
add = re.match(r"add[^\w@]+(\S+?)>?\s+(.*)", inp, re.I)
retrieve = re.match(r"(\S+)(?:\s+#?(-?\d+))?$", inp)
retrieve_chan = re.match(r"(#\S+)\s+(\S+)(?:\s+#?(-?\d+))?$", inp)
if add:
quoted_nick, msg = add.groups()
try:
add_quote(db, chan, quoted_nick, nick, msg)
db.commit()
except db.IntegrityError:
return "message already stored, doing nothing."
return "quote added."
elif retrieve:
select, num = retrieve.groups()
by_chan = False
if select.startswith('#'):
by_chan = True
quotes = get_quotes_by_chan(db, select)
else:
quotes = get_quotes_by_nick(db, chan, select)
elif retrieve_chan:
chan, nick, num = retrieve_chan.groups()
quotes = get_quotes_by_nick(db, chan, nick)
else:
return quote.__doc__
n_quotes = len(quotes)
if not n_quotes:
return "no quotes found"
if num:
num = int(num)
if num:
if num > n_quotes or (num < 0 and num < -n_quotes):
return "I only have %d quote%s for %s" % (n_quotes,
('s', '')[n_quotes == 1], select)
elif num < 0:
selected_quote = quotes[num]
num = n_quotes + num + 1
else:
selected_quote = quotes[num - 1]
else:
num = random.randint(1, n_quotes)
selected_quote = quotes[num - 1]
return format_quote(selected_quote, num, n_quotes)

26
plugins/reactions.py Normal file
View file

@ -0,0 +1,26 @@
from util import hook
import re
@hook.regex(r'^(H|h)ello mau5bot')
def response_hello(inp, say=None, nick=None):
say("Hello " + nick + "!")
@hook.regex(r'^(H|h)i mau5bot')
def response_hi(inp, say=None, nick=None):
say("Hi " + nick + "!")
@hook.regex(r'^(H|h)eya mau5bot')
def response_heya(inp, say=None, nick=None):
say("Heya " + nick + "!")
@hook.regex(r'^(S|s)up mau5bot')
def response_sup(inp, say=None, nick=None):
say("Sup " + nick + "!")
@hook.regex(r'^((I|i) love( you,?)?|ilu) mau5bot')
def response_love(inp, say=None, nick=None):
say("I love you too, " + nick)
@hook.regex(r'^((I|i) hate( you,?)?|ihu) mau5bot')
def response_hate(inp, say=None):
say(";(")

181
plugins/repaste.py Normal file
View file

@ -0,0 +1,181 @@
from util import hook, http
import urllib
import random
import urllib2
import htmlentitydefs
import re
re_htmlent = re.compile("&(" + "|".join(htmlentitydefs.name2codepoint.keys()) + ");")
re_numeric = re.compile(r'&#(x?)([a-fA-F0-9]+);')
def db_init(db):
db.execute("create table if not exists repaste(chan, manual, primary key(chan))")
db.commit()
def decode_html(text):
text = re.sub(re_htmlent,
lambda m: unichr(htmlentitydefs.name2codepoint[m.group(1)]),
text)
text = re.sub(re_numeric,
lambda m: unichr(int(m.group(2), 16 if m.group(1) else 10)),
text)
return text
def scrape_mibpaste(url):
if not url.startswith("http"):
url = "http://" + url
pagesource = http.get(url)
rawpaste = re.search(r'(?s)(?<=<body>\n).+(?=<hr>)', pagesource).group(0)
filterbr = rawpaste.replace("<br />", "")
unescaped = decode_html(filterbr)
stripped = unescaped.strip()
return stripped
def scrape_pastebin(url):
id = re.search(r'(?:www\.)?pastebin.com/([a-zA-Z0-9]+)$', url).group(1)
rawurl = "http://pastebin.com/raw.php?i=" + id
text = http.get(rawurl)
return text
autorepastes = {}
#@hook.regex('(pastebin\.com)(/[^ ]+)')
@hook.regex('(mibpaste\.com)(/[^ ]+)')
def autorepaste(inp, input=None, db=None, chan=None):
db_init(db)
manual = input.db.execute("select manual from repaste where chan=?", (chan, )).fetchone()
if manual and len(manual) and manual[0]:
return
url = inp.group(1) + inp.group(2)
urllib.unquote(url)
if url in autorepastes:
out = autorepastes[url]
input.notice("In the future, please use a less awful pastebin (e.g. pastebin.com)")
else:
out = repaste("http://" + url, input, db, False)
autorepastes[url] = out
input.notice("In the future, please use a less awful pastebin (e.g. pastebin.com) instead of %s." % inp.group(1))
input.say("%s (repasted for %s)" % (out, input.nick))
scrapers = {
r'mibpaste\.com': scrape_mibpaste,
r'pastebin\.com': scrape_pastebin
}
def scrape(url):
for pat, scraper in scrapers.iteritems():
print "matching " + repr(pat) + " " + url
if re.search(pat, url):
break
else:
return None
return scraper(url)
def paste_sprunge(text, syntax=None, user=None):
data = urllib.urlencode({"sprunge": text})
url = urllib2.urlopen("http://sprunge.us/", data).read().strip()
if syntax:
url += "?" + syntax
return url
def paste_ubuntu(text, user=None, syntax='text'):
data = urllib.urlencode({"poster": user,
"syntax": syntax,
"content": text})
return urllib2.urlopen("http://paste.ubuntu.com/", data).url
def paste_gist(text, user=None, syntax=None, description=None):
data = {
'file_contents[gistfile1]': text,
'action_button': "private"
}
if description:
data['description'] = description
if syntax:
data['file_ext[gistfile1]'] = "." + syntax
req = urllib2.urlopen('https://gist.github.com/gists', urllib.urlencode(data).encode('utf8'))
return req.url
def paste_strictfp(text, user=None, syntax="plain"):
data = urllib.urlencode(dict(
language=syntax,
paste=text,
private="private",
submit="Paste"))
req = urllib2.urlopen("http://paste.strictfp.com/", data)
return req.url
pasters = dict(
ubuntu=paste_ubuntu,
sprunge=paste_sprunge,
gist=paste_gist,
strictfp=paste_strictfp
)
@hook.command
def repaste(inp, input=None, db=None, isManual=True):
".repaste mode|list|[provider] [syntax] <pastebinurl> -- scrape mibpaste, reupload on given pastebin"
parts = inp.split()
db_init(db)
if parts[0] == 'list':
return " ".join(pasters.keys())
paster = paste_gist
args = {}
if not parts[0].startswith("http"):
p = parts[0].lower()
if p in pasters:
paster = pasters[p]
parts = parts[1:]
if not parts[0].startswith("http"):
p = parts[0].lower()
parts = parts[1:]
args["syntax"] = p
if len(parts) > 1:
return "PEBKAC"
args["user"] = input.user
url = parts[0]
scraped = scrape(url)
if not scraped:
return "No scraper for given url"
args["text"] = scraped
pasted = paster(**args)
return pasted

37
plugins/rottentomatoes.py Normal file
View file

@ -0,0 +1,37 @@
from util import http, hook
api_root = 'http://api.rottentomatoes.com/api/public/v1.0/'
movie_search_url = api_root+'movies.json?q=%s&apikey=%s'
movie_info_url = api_root+'movies/%s.json?apikey=%s'
movie_reviews_url = api_root+'movies/%s/reviews.json?apikey=%s&review_type=all'
response = u"%s - critics: \x02%d%%\x02 (%d\u2191%d\u2193) audience: \x02%d%%\x02 - %s"
@hook.command('rt')
def rottentomatoes(inp, bot=None):
'.rt <title> -- gets ratings for <title> from Rotten Tomatoes'
api_key = bot.config.get("api_keys", {}).get("rottentomatoes", None)
if not api_key:
return None
title = inp.strip()
results = http.get_json(movie_search_url % (http.quote_plus(title), api_key))
if results['total'] > 0:
movie = results['movies'][0]
title = movie['title']
id = movie['id']
critics_score = movie['ratings']['critics_score']
audience_score = movie['ratings']['audience_score']
url = movie['links']['alternate']
if critics_score != -1:
reviews = http.get_json(movie_reviews_url%(id, api_key))
review_count = reviews['total']
fresh = critics_score * review_count / 100
rotten = review_count - fresh
return response % (title, critics_score, fresh, rotten, audience_score, url)

52
plugins/seen.py Normal file
View file

@ -0,0 +1,52 @@
" seen.py: written by sklnd in about two beers July 2009"
import time
import re
from util import hook, timesince
def db_init(db):
"check to see that our db has the the seen table and return a connection."
db.execute("create table if not exists seen(name, time, quote, chan, "
"primary key(name, chan))")
db.commit()
@hook.singlethread
@hook.event('PRIVMSG', ignorebots=False)
def seeninput(paraml, input=None, db=None, bot=None):
db_init(db)
db.execute("insert or replace into seen(name, time, quote, chan)"
"values(?,?,?,?)", (input.nick.lower(), time.time(), input.msg,
input.chan))
db.commit()
@hook.command
def seen(inp, nick='', chan='', db=None, input=None):
".seen <nick> -- Tell when a nickname was last in active in irc"
if input.conn.nick.lower() == inp.lower():
# user is looking for us, being a smartass
return "You need to get your eyes checked."
if inp.lower() == nick.lower():
return "Have you looked in a mirror lately?"
if not re.match("^[A-Za-z0-9_|.-\]\[]*$", inp.lower()):
return "I cant look up that name, its impossible to use!"
db_init(db)
last_seen = db.execute("select name, time, quote from seen where name"
" like ? and chan = ?", (inp, chan)).fetchone()
if last_seen:
reltime = timesince.timesince(last_seen[1])
if last_seen[0] != inp.lower(): # for glob matching
inp = last_seen[0]
return '%s was last seen %s ago saying: %s' % \
(inp, reltime, last_seen[2])
else:
return "I've never seen %s" % inp

42
plugins/shorten.py Normal file
View file

@ -0,0 +1,42 @@
# # Lukeroge
from util import hook
try:
from re import match
from urllib2 import urlopen, Request, HTTPError
from urllib import urlencode
from simplejson import loads
except ImportError, e:
raise Exception('Required module missing: %s' % e.args[0])
user = "o_750ro241n9"
apikey = "R_f3d0a9b478c53d247a134d0791f898fe"
def expand(url):
try:
params = urlencode({'shortUrl': url, 'login': user, 'apiKey': apikey, 'format': 'json'})
req = Request("http://api.bit.ly/v3/expand?%s" % params)
response = urlopen(req)
j = loads(response.read())
if j['status_code'] == 200:
return j['data']['expand'][0]['long_url']
raise Exception('%s'%j['status_txt'])
except HTTPError, e:
raise('HTTP Error%s'%e.read())
def tiny(url):
try:
params = urlencode({'longUrl': url, 'login': user, 'apiKey': apikey, 'format': 'json'})
req = Request("http://api.bit.ly/v3/shorten?%s" % params)
response = urlopen(req)
j = loads(response.read())
if j['status_code'] == 200:
return j['data']['url']
raise Exception('%s'%j['status_txt'])
except HTTPError, e:
raise('HTTP error%s'%e.read())
@hook.command
def shorten(inp):
".shorten <url> - Makes an j.mp shortlink to the url provided"
return tiny(inp)

42
plugins/sieve.py Normal file
View file

@ -0,0 +1,42 @@
import re
from util import hook
@hook.sieve
def sieve_suite(bot, input, func, kind, args):
if input.command == 'PRIVMSG' and \
input.nick.lower()[-3:] == 'bot' and args.get('ignorebots', True):
return None
if kind == "command":
if input.trigger in bot.config.get('disabled_commands', []):
return None
ignored = bot.config.get('ignored', [])
if input.host in ignored or input.nick in ignored:
return None
fn = re.match(r'^plugins.(.+).py$', func._filename)
disabled = bot.config.get('disabled_plugins', [])
if fn and fn.group(1).lower() in disabled:
return None
acl = bot.config.get('acls', {}).get(func.__name__)
if acl:
if 'deny-except' in acl:
allowed_channels = map(unicode.lower, acl['deny-except'])
if input.chan.lower() not in allowed_channels:
return None
if 'allow-except' in acl:
denied_channels = map(unicode.lower, acl['allow-except'])
if input.chan.lower() in denied_channels:
return None
if args.get('adminonly', False):
admins = bot.config.get('admins', [])
if input.host not in admins and input.nick not in admins:
return None
return input

186
plugins/slap.py Normal file
View file

@ -0,0 +1,186 @@
from util import hook
import re
import random
larts = ["swaps <who>'s shampoo with glue",
"installs windows on <who>'s machine",
"forces <who> to use perl for 3 weeks",
"registers <who>'s name with 50 known spammers",
"resizes <who>'s to 40x24",
"takes <who>'s drink",
"dispenses <who>'s email address to a few hundred 'bulk mailing services'",
"pokes <who> in the eye",
"beats <who> senseless with a 50lb Linux manual",
"cats /dev/random into <who>'s ear",
"signs <who> up for AOL",
"enrolls <who> in Visual Basic 101",
"sporks <who>",
"drops a truckload of support tickets on <who>",
"judo chops <who>",
"sets <who>'s resolution to 800x600",
"formats <who>'s harddrive to fat12",
"rm -rf's <who>",
"stabs <who>",
"steals <who>'s mojo",
"strangles <who> with a doohicky mouse cord",
"whacks <who> with the cluebat",
"sells <who> on EBay",
"uses <who> as a biological warfare study",
"uses the 'Customer Appreciation Bat' on <who>",
"puts <who> in the Total Perspective Vortex",
"casts <who> into the fires of Mt. Doom",
"gives <who> a melvin",
"turns over <who> to Agent Smith to be 'bugged'",
"takes away <who>'s internet connection",
"pushes <who> past the Shoe Event Horizon",
"counts '1, 2, 5... er... 3!' and hurls the Holy Handgrenade Of Antioch at <who>",
"puts <who> in a nest of camel spiders",
"makes <who> read slashdot at -1",
"puts 'alias vim=emacs' in <who>'s /etc/profile",
"uninstalls every web browser from <who>'s system",
"locks <who> in the Chateau d'If",
"signs <who> up for getting hit on the head lessons",
"makes <who> try to set up a Lexmark printer",
"fills <who>'s eyedrop bottle with lime juice",
"casts <who> into the fires of Mt. Doom.",
"gives <who> a Flying Dutchman",
"rips off <who>'s arm, and uses it to beat them to death",
"pierces <who>'s nose with a rusty paper hole puncher",
"pokes <who> with a rusty nail",
"puts sugar between <who>'s bedsheets",
"pours sand into <who>'s breakfast",
"mixes epoxy into <who>'s toothpaste",
"puts Icy-Hot in <who>'s lube container",
"straps <who> to a chair, and plays a endless low bitrate MP3 loop of \"the world's most annoying sound\" from \"Dumb and Dumber\"",
"tells Dr. Dre that <who> was talking smack",
"forces <who> to use a Commodore 64 for all their word processing",
"smacks <who> in the face with a burlap sack full of broken glass",
"puts <who> in a room with several heavily armed manic depressives",
"makes <who> watch reruns of \"Blue's Clues\"",
"puts lye in <who>'s coffee",
"tattoos the Windows symbol on <who>'s ass",
"lets Borg have his way with <who>",
"signs <who> up for line dancing classes at the local senior center",
"wakes <who> out of a sound sleep with some brand new nipple piercings",
"gives <who> a 2 guage Prince Albert",
"forces <who> to eat all their veggies",
"covers <who>'s toilet paper with lemon-pepper",
"fills <who>'s ketchup bottle with Dave's Insanity sauce",
"forces <who> to stare at an incredibly frustrating and seemingly neverending IRC political debate",
"knocks two of <who>'s teeth out with a 2x4",
"removes debian from <who>'s system",
"uses <who>'s iPod for skeet shooting practice",
"gives <who>'s phone number to Borg",
"posts <who>'s IP, username, and password on 4chan",
"forces <who> to use words like 'irregardless' and 'administrate' (thereby sounding like a real dumbass)",
"tickles <who> until they wet their pants and pass out",
"replaces <who>'s KY with elmer's clear wood glue",
"replaces <who>'s TUMS with alka-seltzer tablets",
"squeezes habanero pepper juice into <who>'s tub of vaseline",
"Forces <who> to learn the Win32 API",
"gives <who> an atomic wedgie",
"ties <who> to a chair and forces them to listen to 'N Sync at full blast",
"forces <who> to use notepad for text editing",
"frowns at <who> really really hard",
"jabs a hot lighter into <who>'s eye sockets",
"forces <who> to browse the web with IE6",
"takes <who> out at the knees with a broken pool cue",
"forces <who> to listen to emo music",
"lets a few creepers into <who>'s house",
"signs <who> up for the Iowa State Ferret Legging Championship",
"attempts to hotswap <who>'s RAM",
"dragon punches <who>",
"puts track spikes into <who>'s side",
"replaces <who>'s Astroglide with JB Weld",
"replaces <who>'s stress pills with rat poison pellets",
"replaces <who>s crotch itch cream with Nair",
"does the Australian Death Grip on <who>",
"dances upon the grave of <who>'s ancestors.",
"farts in <who>'s general direction",
"flogs <who> with stinging neddle",
"assigns all of the permissions tickets on the BeastNode support system to <who>",
"hands <who> a poison ivy joint"]
loves = ["hugs <who>",
"gives <who> some love",
"gives <who> a cookie",
"makes a balloon animal for <who>",
"shares a slice of cake with <who>",
"slaps <who> heartily on the back",
"tickles <who>"]
slaps = ["slaps <who> with a <item>",
"slaps <who> around a bit with a <item>",
"throws a <item> at <who>",
"grabs a <item> and throws it in <who>'s face",
"holds <who> down and repeatedly whacks them with a <item>",
"prods <who> with a flaming <item>",
"picks up a <item>, and whacks <who> with it",
"ties <who> to a chair and throws a <item> at them",
"hits <who> on the head with a <item>"]
items = ["cast iron skillet",
"large trout",
"baseball bat",
"wooden cane",
"CRT monitor",
"physics textbook",
"television",
"five tonne truck",
"roll of duct tape",
"book",
"rubber chicken",
"fire extinguisher",
"heavy rock",
"chunk of dirt"]
@hook.command
def lart(inp, me = None, nick = None, input=None, notice=None):
".lart <user> - LARTs a user of your choice"
inp = inp.strip()
if not re.match("^[A-Za-z0-9_|.-\]\[]*$", inp.lower()):
notice("Invalid username!")
return
if inp == input.conn.nick.lower() or inp == "itself":
msg = 'slaps ' + nick + ' in the face!'
else:
msg = re.sub ('<who>', inp, random.choice(larts))
me(msg)
@hook.command
def love(inp, me = None, input=None, notice=None):
".love <user> - gives a user a nice comment"
inp = inp.strip()
if not re.match("^[A-Za-z0-9_|.-\]\[]*$", inp.lower()):
notice("Invalid username!")
return
if inp == input.conn.nick.lower() or inp == "itself":
msg = 'hugs themself!'
else:
msg = re.sub ('<who>', inp, random.choice(loves))
me(msg)
@hook.command
def slap(inp, me = None, nick = None, input=None, notice=None):
".slap <user> - slap a user"
inp = inp.strip()
if not re.match("^[A-Za-z0-9_|.-\]\[]*$", inp.lower()):
notice("Invalid username!")
return
if inp == input.conn.nick.lower() or inp == "itself":
msg = 'slaps ' + nick + ' in the face!'
else:
slap = random.choice(slaps)
slap = re.sub ('<who>', inp, slap)
msg = re.sub ('<item>', random.choice(items), slap)
me(msg)

34
plugins/snopes.py Normal file
View file

@ -0,0 +1,34 @@
import re
from util import hook, http
search_url = "http://search.atomz.com/search/?sp_a=00062d45-sp00000000"
@hook.command
def snopes(inp):
".snopes <topic> -- searches snopes for an urban legend about <topic>"
search_page = http.get_html(search_url, sp_q=inp, sp_c="1")
result_urls = search_page.xpath("//a[@target='_self']/@href")
if not result_urls:
return "no matching pages found"
snopes_page = http.get_html(result_urls[0])
snopes_text = snopes_page.text_content()
claim = re.search(r"Claim: .*", snopes_text).group(0).strip()
status = re.search(r"Status: .*", snopes_text)
if status is not None:
status = status.group(0).strip()
else: # new-style statuses
status = "Status: %s." % re.search(r"FALSE|TRUE|MIXTURE|UNDETERMINED",
snopes_text).group(0).title()
claim = re.sub(r"[\s\xa0]+", " ", claim) # compress whitespace
status = re.sub(r"[\s\xa0]+", " ", status)
return "%s %s %s" % (claim, status, result_urls[0])

37
plugins/stock.py Normal file
View file

@ -0,0 +1,37 @@
import random
from util import hook, http
@hook.command
def stock(inp):
'''.stock <symbol> -- gets information about a stock symbol'''
url = 'http://www.google.com/ig/api?stock=%s'
parsed = http.get_xml(url, stock=inp)
if len(parsed) != 1:
return "error getting stock info"
# Stuff the results in a dict for easy string formatting
results = dict((el.tag, el.attrib['data'])
for el in parsed.xpath('//finance/*'))
# if we dont get a company name back, the symbol doesn't match a company
if results['company'] == '':
return "unknown ticker symbol %s" % inp
if results['change'][0] == '-':
results['color'] = "5"
else:
results['color'] = "3"
ret = "%(company)s - %(last)s %(currency)s " \
"\x03%(color)s%(change)s (%(perc_change)s)\x03 " \
"as of %(trade_timestamp)s" % results
if results['delay'] != '0':
ret += " (delayed %s minutes)" % results['delay']
return ret

33
plugins/suggest.py Normal file
View file

@ -0,0 +1,33 @@
import json
import random
import re
from util import hook, http
@hook.command
def suggest(inp, inp_unstripped=''):
".suggest [#n] <phrase> -- gets a random/the nth suggested google search"
inp = inp_unstripped
m = re.match('^#(\d+) (.+)$', inp)
if m:
num, inp = m.groups()
num = int(num)
if num > 10:
return 'can only get first ten suggestions'
else:
num = 0
page = http.get('http://google.com/complete/search', q=inp)
page_json = page.split('(', 1)[1][:-1]
suggestions = json.loads(page_json)[1]
if not suggestions:
return 'no suggestions found'
if num:
if len(suggestions) + 1 <= num:
return 'only got %d suggestions' % len(suggestions)
out = suggestions[num - 1]
else:
out = random.choice(suggestions)
return '#%d: %s' % (int(out[2][0]) + 1, out[0])

118
plugins/tell.py Normal file
View file

@ -0,0 +1,118 @@
" tell.py: written by sklnd in July 2009"
" 2010.01.25 - modified by Scaevolus"
import time
import re
from util import hook, timesince
def db_init(db):
"check to see that our db has the tell table and return a dbection."
db.execute("create table if not exists tell"
"(user_to, user_from, message, chan, time,"
"primary key(user_to, message))")
db.commit()
return db
def get_tells(db, user_to):
return db.execute("select user_from, message, time, chan from tell where"
" user_to=lower(?) order by time",
(user_to.lower(),)).fetchall()
@hook.singlethread
@hook.event('PRIVMSG')
def tellinput(paraml, input=None, db=None, bot=None):
if 'showtells' in input.msg.lower():
return
db_init(db)
tells = get_tells(db, input.nick)
if tells:
user_from, message, time, chan = tells[0]
reltime = timesince.timesince(time)
reply = "%s said %s ago in %s: %s" % (user_from, reltime, chan,
message)
if len(tells) > 1:
reply += " (+%d more, .showtells to view)" % (len(tells) - 1)
db.execute("delete from tell where user_to=lower(?) and message=?",
(input.nick, message))
db.commit()
input.notice(reply)
@hook.command(autohelp=False)
def showtells(inp, nick='', chan='', notice=None, db=None):
".showtells -- view all pending tell messages (sent in PM)."
db_init(db)
tells = get_tells(db, nick)
if not tells:
notice("You have no pending tells.")
return
for tell in tells:
user_from, message, time, chan = tell
past = timesince.timesince(time)
notice("%s said %s ago in %s: %s" % (user_from, past, chan, message))
db.execute("delete from tell where user_to=lower(?)",
(nick,))
db.commit()
@hook.command
def tell(inp, nick='', chan='', db=None, input=None, notice=None):
".tell <nick> <message> -- relay <message> to <nick> when <nick> is around"
query = inp.split(' ', 1)
if len(query) != 2:
return tell.__doc__
user_to = query[0].lower()
message = query[1].strip()
user_from = nick
if chan.lower() == user_from.lower():
chan = 'a pm'
if user_to == user_from.lower():
notice("No.")
return
if user_to.lower() == "mau5bot":
# user is looking for us, being a smartass
notice("Thanks for the message, " + user_from + "!")
return
if not re.match("^[A-Za-z0-9_|.-\]\[]*$", user_to.lower()):
notice("I cant send a message to that user!")
return
db_init(db)
if db.execute("select count() from tell where user_to=?",
(user_to,)).fetchone()[0] >= 10:
notice("That person has too many things queued.")
return
try:
db.execute("insert into tell(user_to, user_from, message, chan,"
"time) values(?,?,?,?,?)", (user_to, user_from, message,
chan, time.time()))
db.commit()
except db.IntegrityError:
notice("Message has already been queued.")
return
notice("I'll pass that along.")

186
plugins/todo.py Normal file
View file

@ -0,0 +1,186 @@
from util import hook
import re
db_inited = False
def cleanSQL(sql):
return re.sub(r'\s+', " ", sql).strip()
def db_init(db):
global db_inited
if db_inited:
return
exists = db.execute("""
select exists (
select * from sqlite_master where type = "table" and name = "todos"
)
""").fetchone()[0] == 1
if not exists:
db.execute(cleanSQL("""
create virtual table todos using fts4(
user,
text,
added,
tokenize=porter
)"""))
db.commit()
db_inited = True
def db_getall(db, nick, limit=-1):
return db.execute("""
select added, text
from todos
where lower(user) = lower(?)
order by added desc
limit ?
""", (nick, limit))
def db_get(db, nick, id):
return db.execute("""
select added, text from todos
where lower(user) = lower(?)
order by added desc
limit 1
offset ?
""", (nick, id)).fetchone()
def db_del(db, nick, limit='all'):
row = db.execute("""
delete from todos
where rowid in (
select rowid from todos
where lower(user) = lower(?)
order by added desc
limit ?
offset ?)
""", (nick,
-1 if limit == 'all' else 1,
0 if limit == 'all' else limit))
db.commit()
return row
def db_add(db, nick, text):
db.execute("""
insert into todos (user, text, added)
values (?, ?, CURRENT_TIMESTAMP)
""", (nick, text))
db.commit()
def db_search(db, nick, query):
return db.execute("""
select added, text
from todos
where todos match ?
and lower(user) = lower(?)
order by added desc
""", (query, nick))
@hook.command
def todo(inp, nick='', chan='', db=None, notice=None, bot=None):
"todo (add|del|list|search) [@user] args -- manipulates your todos"
db_init(db)
parts = inp.split()
cmd = parts[0].lower()
args = parts[1:]
if len(args) and args[0].startswith("@"):
nick = args[0][1:]
args = args[1:]
if cmd == 'add':
if not len(args):
return "no text"
text = " ".join(args)
db_add(db, nick, text)
notice("Task added!")
return
elif cmd == 'get':
if len(args):
try:
index = int(args[0])
except ValueError:
notice("Invalid number format.")
return
else:
index = 0
row = db_get(db, nick, index)
if not row:
notice("No such entry.")
return
notice("[%d]: %s: %s" % (index, row[0], row[1]))
elif cmd == 'del' or cmd == 'delete' or cmd == 'remove':
if not len(args):
return "error"
if args[0] == 'all':
index = 'all'
else:
try:
index = int(args[0])
except ValueError:
notice("Invalid number.")
return
rows = db_del(db, nick, index)
notice("Deleted %d entries" % rows.rowcount)
elif cmd == 'list':
limit = -1
if len(args):
try:
limit = int(args[0])
limit = max(-1, limit)
except ValueError:
notice("Invalid number.")
return
rows = db_getall(db, nick, limit)
found = False
for (index, row) in enumerate(rows):
notice("[%d]: %s: %s" % (index, row[0], row[1]))
found = True
if not found:
notice("%s has no entries." % nick)
elif cmd == 'search':
if not len(args):
notice("No search query given!")
return
query = " ".join(args)
rows = db_search(db, nick, query)
found = False
for (index, row) in enumerate(rows):
notice("[%d]: %s: %s" % (index, row[0], row[1]))
found = True
if not found:
notice("%s has no matching entries for: %s" % (nick, query))
else:
notice("Unknown command: %s" % cmd)

188
plugins/translate.py Normal file
View file

@ -0,0 +1,188 @@
import htmlentitydefs
import re
from util import hook, http
########### from http://effbot.org/zone/re-sub.htm#unescape-html #############
def unescape(text):
def fixup(m):
text = m.group(0)
if text[:2] == "&#":
# character reference
try:
if text[:3] == "&#x":
return unichr(int(text[3:-1], 16))
else:
return unichr(int(text[2:-1]))
except ValueError:
pass
else:
# named entity
try:
text = unichr(htmlentitydefs.name2codepoint[text[1:-1]])
except KeyError:
pass
return text # leave as is
return re.sub("&#?\w+;", fixup, text)
##############################################################################
def goog_trans(text, slang, tlang):
url = 'http://ajax.googleapis.com/ajax/services/language/translate?v=1.0&key=ABQIAAAAGjLiqTxkFw7F24ITXc4bNRS04yDz5pgaUTdxja2Sk3UoWlae7xTXom3fBzER6Upo8jfzcTtvz-8ebQ'
parsed = http.get_json(url, q=text, langpair=(slang + '|' + tlang))
if not 200 <= parsed['responseStatus'] < 300:
raise IOError('error with the translation server: %d: %s' % (
parsed['responseStatus'], parsed['responseDetails']))
if not slang:
return unescape('(%(detectedSourceLanguage)s) %(translatedText)s' %
(parsed['responseData']))
return unescape(parsed['responseData']['translatedText'])
def match_language(fragment):
fragment = fragment.lower()
for short, _ in lang_pairs:
if fragment in short.lower().split():
return short.split()[0]
for short, full in lang_pairs:
if fragment in full.lower():
return short.split()[0]
return None
@hook.command
def translate(inp):
'.translate [source language [target language]] <sentence> -- translates' \
' <sentence> from source language (default autodetect) to target' \
' language (default English) using Google Translate'
args = inp.split(' ', 2)
try:
if len(args) >= 2:
sl = match_language(args[0])
if not sl:
return goog_trans(inp, '', 'en')
if len(args) >= 3:
tl = match_language(args[1])
if not tl:
if sl == 'en':
return 'unable to determine desired target language'
return goog_trans(args[1] + ' ' + args[2], sl, 'en')
return goog_trans(args[2], sl, tl)
return goog_trans(inp, '', 'en')
except IOError, e:
return e
languages = 'ja fr de ko ru zh'.split()
language_pairs = zip(languages[:-1], languages[1:])
def babel_gen(inp):
for language in languages:
inp = inp.encode('utf8')
trans = goog_trans(inp, 'en', language).encode('utf8')
inp = goog_trans(trans, language, 'en')
yield language, trans, inp
@hook.command
def babel(inp):
".babel <sentence> -- translates <sentence> through multiple languages"
try:
return list(babel_gen(inp))[-1][2]
except IOError, e:
return e
@hook.command
def babelext(inp):
".babelext <sentence> -- like .babel, but with more detailed output"
try:
babels = list(babel_gen(inp))
except IOError, e:
return e
out = u''
for lang, trans, text in babels:
out += '%s:"%s", ' % (lang, text.decode('utf8'))
out += 'en:"' + babels[-1][2].decode('utf8') + '"'
if len(out) > 300:
out = out[:150] + ' ... ' + out[-150:]
return out
lang_pairs = [
("no", "Norwegian"),
("it", "Italian"),
("ht", "Haitian Creole"),
("af", "Afrikaans"),
("sq", "Albanian"),
("ar", "Arabic"),
("hy", "Armenian"),
("az", "Azerbaijani"),
("eu", "Basque"),
("be", "Belarusian"),
("bg", "Bulgarian"),
("ca", "Catalan"),
("zh-CN zh", "Chinese"),
("hr", "Croatian"),
("cs", "Czech"),
("da", "Danish"),
("nl", "Dutch"),
("en", "English"),
("et", "Estonian"),
("tl", "Filipino"),
("fi", "Finnish"),
("fr", "French"),
("gl", "Galician"),
("ka", "Georgian"),
("de", "German"),
("el", "Greek"),
("ht", "Haitian Creole"),
("iw", "Hebrew"),
("hi", "Hindi"),
("hu", "Hungarian"),
("is", "Icelandic"),
("id", "Indonesian"),
("ga", "Irish"),
("it", "Italian"),
("ja jp jpn", "Japanese"),
("ko", "Korean"),
("lv", "Latvian"),
("lt", "Lithuanian"),
("mk", "Macedonian"),
("ms", "Malay"),
("mt", "Maltese"),
("no", "Norwegian"),
("fa", "Persian"),
("pl", "Polish"),
("pt", "Portuguese"),
("ro", "Romanian"),
("ru", "Russian"),
("sr", "Serbian"),
("sk", "Slovak"),
("sl", "Slovenian"),
("es", "Spanish"),
("sw", "Swahili"),
("sv", "Swedish"),
("th", "Thai"),
("tr", "Turkish"),
("uk", "Ukrainian"),
("ur", "Urdu"),
("vi", "Vietnamese"),
("cy", "Welsh"),
("yi", "Yiddish")
]

165
plugins/tvdb.py Normal file
View file

@ -0,0 +1,165 @@
"""
TV information, written by Lurchington 2010
modified by rmmh 2010
"""
import datetime
from urllib2 import URLError
from zipfile import ZipFile
from cStringIO import StringIO
from lxml import etree
from util import hook, http
base_url = "http://thetvdb.com/api/"
api_key = "469B73127CA0C411"
def get_zipped_xml(*args, **kwargs):
try:
path = kwargs.pop("path")
except KeyError:
raise KeyError("must specify a path for the zipped file to be read")
zip_buffer = StringIO(http.get(*args, **kwargs))
return etree.parse(ZipFile(zip_buffer, "r").open(path))
def get_episodes_for_series(seriesname):
res = {"error": None, "ended": False, "episodes": None, "name": None}
# http://thetvdb.com/wiki/index.php/API:GetSeries
try:
query = http.get_xml(base_url + 'GetSeries.php', seriesname=seriesname)
except URLError:
res["error"] = "error contacting thetvdb.com"
return res
series_id = query.xpath('//seriesid/text()')
if not series_id:
res["error"] = "unknown tv series (using www.thetvdb.com)"
return res
series_id = series_id[0]
try:
series = get_zipped_xml(base_url + '%s/series/%s/all/en.zip' %
(api_key, series_id), path="en.xml")
except URLError:
res["error"] = "error contacting thetvdb.com"
return res
series_name = series.xpath('//SeriesName/text()')[0]
if series.xpath('//Status/text()')[0] == 'Ended':
res["ended"] = True
res["episodes"] = series.xpath('//Episode')
res["name"] = series_name
return res
def get_episode_info(episode):
first_aired = episode.findtext("FirstAired")
try:
airdate = datetime.date(*map(int, first_aired.split('-')))
except (ValueError, TypeError):
return None
episode_num = "S%02dE%02d" % (int(episode.findtext("SeasonNumber")),
int(episode.findtext("EpisodeNumber")))
episode_name = episode.findtext("EpisodeName")
# in the event of an unannounced episode title, users either leave the
# field out (None) or fill it with TBA
if episode_name == "TBA":
episode_name = None
episode_desc = '%s' % episode_num
if episode_name:
episode_desc += ' - %s' % episode_name
return (first_aired, airdate, episode_desc)
@hook.command
@hook.command('tv')
def tv_next(inp):
".tv_next <series> -- get the next episode of <series>"
episodes = get_episodes_for_series(inp)
if episodes["error"]:
return episodes["error"]
series_name = episodes["name"]
ended = episodes["ended"]
episodes = episodes["episodes"]
if ended:
return "%s has ended." % series_name
next_eps = []
today = datetime.date.today()
for episode in reversed(episodes):
ep_info = get_episode_info(episode)
if ep_info is None:
continue
(first_aired, airdate, episode_desc) = ep_info
if airdate > today:
next_eps = ['%s (%s)' % (first_aired, episode_desc)]
elif airdate == today:
next_eps = ['Today (%s)' % episode_desc] + next_eps
else:
#we're iterating in reverse order with newest episodes last
#so, as soon as we're past today, break out of loop
break
if not next_eps:
return "there are no new episodes scheduled for %s" % series_name
if len(next_eps) == 1:
return "the next episode of %s airs %s" % (series_name, next_eps[0])
else:
next_eps = ', '.join(next_eps)
return "the next episodes of %s: %s" % (series_name, next_eps)
@hook.command
@hook.command('tv_prev')
def tv_last(inp):
".tv_last <series> -- gets the most recently aired episode of <series>"
episodes = get_episodes_for_series(inp)
if episodes["error"]:
return episodes["error"]
series_name = episodes["name"]
ended = episodes["ended"]
episodes = episodes["episodes"]
prev_ep = None
today = datetime.date.today()
for episode in reversed(episodes):
ep_info = get_episode_info(episode)
if ep_info is None:
continue
(first_aired, airdate, episode_desc) = ep_info
if airdate < today:
#iterating in reverse order, so the first episode encountered
#before today was the most recently aired
prev_ep = '%s (%s)' % (first_aired, episode_desc)
break
if not prev_ep:
return "there are no previously aired episodes for %s" % series_name
if ended:
return '%s ended. The last episode aired %s' % (series_name, prev_ep)
return "the last episode of %s aired %s" % (series_name, prev_ep)

134
plugins/twitter.py Normal file
View file

@ -0,0 +1,134 @@
"""
twitter.py: written by Scaevolus 2009
retrieves most recent tweets
"""
import random
import re
from time import strptime, strftime
from util import hook, http
def unescape_xml(string):
# unescape the 5 chars that might be escaped in xml
# gratuitously functional
# return reduce(lambda x, y: x.replace(*y), (string,
# zip('&gt; &lt; &apos; &quote; &amp'.split(), '> < \' " &'.split()))
# boring, normal
return string.replace('&gt;', '>').replace('&lt;', '<').replace('&apos;',
"'").replace('&quote;', '"').replace('&amp;', '&')
history = []
history_max_size = 250
@hook.command
def twitter(inp):
".twitter <user>/<user> <n>/<id>/#<hashtag>/@<user> -- gets last/<n>th "\
"tweet from <user>/gets tweet <id>/gets random tweet with #<hashtag>/"\
"gets replied tweet from @<user>"
def add_reply(reply_name, reply_id):
if len(history) == history_max_size:
history.pop()
history.insert(0, (reply_name, reply_id))
def find_reply(reply_name):
for name, id in history:
if name == reply_name:
return id if id != -1 else name
if inp[0] == '@':
reply_inp = find_reply(inp[1:])
if reply_inp == None:
return 'error: no replies to %s found' % inp
inp = reply_inp
url = 'http://twitter.com'
getting_nth = False
getting_id = False
searching_hashtag = False
time = 'status/created_at'
text = 'status/text'
reply_name = 'status/in_reply_to_screen_name'
reply_id = 'status/in_reply_to_status_id'
reply_user = 'status/in_reply_to_user_id'
if re.match(r'^\d+$', inp):
getting_id = True
url += '/statuses/show/%s.xml' % inp
screen_name = 'user/screen_name'
time = 'created_at'
text = 'text'
reply_name = 'in_reply_to_screen_name'
reply_id = 'in_reply_to_status_id'
reply_user = 'in_reply_to_user_id'
elif re.match(r'^\w{1,15}$', inp):
url += '/users/show/%s.xml' % inp
screen_name = 'screen_name'
elif re.match(r'^\w{1,15}\s+\d+$', inp):
getting_nth = True
name, num = inp.split()
if int(num) > 3200:
return 'error: only supports up to the 3200th tweet'
url += '/statuses/user_timeline/%s.xml?count=1&page=%s' % (name, num)
screen_name = 'status/user/screen_name'
elif re.match(r'^#\w+$', inp):
url = 'http://search.twitter.com/search.atom?q=%23' + inp[1:]
searching_hashtag = True
else:
return 'error: invalid request'
try:
tweet = http.get_xml(url)
except http.HTTPError, e:
errors = {400: 'bad request (ratelimited?)',
401: 'tweet is private',
403: 'tweet is private',
404: 'invalid user/id',
500: 'twitter is broken',
502: 'twitter is down ("getting upgraded")',
503: 'twitter is overloaded (lol, RoR)'}
if e.code == 404:
return 'error: invalid ' + ['username', 'tweet id'][getting_id]
if e.code in errors:
return 'error: ' + errors[e.code]
return 'error: unknown %s' % e.code
except http.URLerror, e:
return 'error: timeout'
if searching_hashtag:
ns = '{http://www.w3.org/2005/Atom}'
tweets = tweet.findall(ns + 'entry/' + ns + 'id')
if not tweets:
return 'error: hashtag not found'
id = random.choice(tweets).text
id = id[id.rfind(':') + 1:]
return twitter(id)
if getting_nth:
if tweet.find('status') is None:
return 'error: user does not have that many tweets'
time = tweet.find(time)
if time is None:
return 'error: user has no tweets'
reply_name = tweet.find(reply_name).text
reply_id = tweet.find(reply_id).text
reply_user = tweet.find(reply_user).text
if reply_name is not None and (reply_id is not None or
reply_user is not None):
add_reply(reply_name, reply_id or -1)
time = strftime('%Y-%m-%d %H:%M:%S',
strptime(time.text,
'%a %b %d %H:%M:%S +0000 %Y'))
text = unescape_xml(tweet.find(text).text.replace('\n', ''))
screen_name = tweet.find(screen_name).text
return "%s %s: %s" % (time, screen_name, text)

80
plugins/urlhistory.py Normal file
View file

@ -0,0 +1,80 @@
import math
import re
import time
from util import hook, urlnorm, timesince
expiration_period = 60 * 60 * 24 # 1 day
ignored_urls = [urlnorm.normalize("http://google.com"),]
def db_init(db):
db.execute("create table if not exists urlhistory"
"(chan, url, nick, time)")
db.commit()
def insert_history(db, chan, url, nick):
now = time.time()
db.execute("insert into urlhistory(chan, url, nick, time) "
"values(?,?,?,?)", (chan, url, nick, time.time()))
db.commit()
def get_history(db, chan, url):
db.execute("delete from urlhistory where time < ?",
(time.time() - expiration_period,))
return db.execute("select nick, time from urlhistory where "
"chan=? and url=? order by time desc", (chan, url)).fetchall()
def nicklist(nicks):
nicks = sorted(dict(nicks), key=unicode.lower)
if len(nicks) <= 2:
return ' and '.join(nicks)
else:
return ', and '.join((', '.join(nicks[:-1]), nicks[-1]))
def format_reply(history):
if not history:
return
last_nick, recent_time = history[0]
last_time = timesince.timesince(recent_time)
if len(history) == 1:
return #"%s linked that %s ago." % (last_nick, last_time)
hour_span = math.ceil((time.time() - history[-1][1]) / 3600)
hour_span = '%.0f hours' % hour_span if hour_span > 1 else 'hour'
hlen = len(history)
ordinal = ["once", "twice", "%d times" % hlen][min(hlen, 3) - 1]
if len(dict(history)) == 1:
last = "last linked %s ago" % last_time
else:
last = "last linked by %s %s ago" % (last_nick, last_time)
return #"that url has been posted %s in the past %s by %s (%s)." % (ordinal,
@hook.regex(r'([a-zA-Z]+://|www\.)[^ ]+')
def urlinput(match, nick='', chan='', db=None, bot=None):
db_init(db)
url = urlnorm.normalize(match.group().encode('utf-8'))
if url not in ignored_urls:
url = url.decode('utf-8')
history = get_history(db, chan, url)
insert_history(db, chan, url, nick)
inp = match.string.lower()
for name in dict(history):
if name.lower() in inp: # person was probably quoting a line
return # that had a link. don't remind them.
if nick not in dict(history):
return format_reply(history)

31
plugins/urltools.py Normal file
View file

@ -0,0 +1,31 @@
from util import hook, http, urlnorm
import urllib
import re
import BeautifulSoup
ignored_urls = [urlnorm.normalize("http://google.com"),urlnorm.normalize("http://youtube.com")]
def parse(match):
url = urlnorm.normalize(match.encode('utf-8'))
if url not in ignored_urls:
url = url.decode('utf-8')
try:
soup = BeautifulSoup.BeautifulSoup(urllib.urlopen(url))
return soup.title.string
except:
return "Failed to parse URL"
#@hook.regex(r'^(?#Protocol)(?:(?:ht|f)tp(?:s?)\:\/\/|~\/|\/)?(?#Username:Password)(?:\w+:\w+@)?(?#Subdomains)(?:(?:[-\w]+\.)+(?#TopLevel Domains)(?:com|org|net|gov|mil|biz|info|mobi|name|aero|jobs|museum|travel|[a-z]{2}))(?#Port)(?::[\d]{1,5})?(?#Directories)(?:(?:(?:\/(?:[-\w~!$+|.,=]|%[a-f\d]{2})+)+|\/)+|\?|#)?(?#Query)(?:(?:\?(?:[-\w~!$+|.,*:]|%[a-f\d{2}])+=?(?:[-\w~!$+|.,*:=]|%[a-f\d]{2})*)(?:&(?:[-\w~!$+|.,*:]|%[a-f\d{2}])+=?(?:[-\w~!$+|.,*:=]|%[a-f\d]{2})*)*)*(?#Anchor)(?:#(?:[-\w~!$+|.,*:=]|%[a-f\d]{2})*)?$')
@hook.command
def title(inp):
p = re.compile(r'^(?#Protocol)(?:(?:ht|f)tp(?:s?)\:\/\/|~\/|\/)?(?#Username:Password)(?:\w+:\w+@)?(?#Subdomains)(?:(?:[-\w]+\.)+(?#TopLevel Domains)(?:com|org|net|gov|mil|biz|info|mobi|name|aero|jobs|museum|travel|[a-z]{2}))(?#Port)(?::[\d]{1,5})?(?#Directories)(?:(?:(?:\/(?:[-\w~!$+|.,=]|%[a-f\d]{2})+)+|\/)+|\?|#)?(?#Query)(?:(?:\?(?:[-\w~!$+|.,*:]|%[a-f\d{2}])+=?(?:[-\w~!$+|.,*:=]|%[a-f\d]{2})*)(?:&(?:[-\w~!$+|.,*:]|%[a-f\d{2}])+=?(?:[-\w~!$+|.,*:=]|%[a-f\d]{2})*)*)*(?#Anchor)(?:#(?:[-\w~!$+|.,*:=]|%[a-f\d]{2})*)?$')
m = p.match(inp)
if m:
return parse(inp)
else:
return 'Invalid URL!'

0
plugins/users.py Normal file
View file

0
plugins/util/__init__.py Normal file
View file

101
plugins/util/hook.py Normal file
View file

@ -0,0 +1,101 @@
import inspect
import re
def _hook_add(func, add, name=''):
if not hasattr(func, '_hook'):
func._hook = []
func._hook.append(add)
if not hasattr(func, '_filename'):
func._filename = func.func_code.co_filename
if not hasattr(func, '_args'):
argspec = inspect.getargspec(func)
if name:
n_args = len(argspec.args)
if argspec.defaults:
n_args -= len(argspec.defaults)
if argspec.keywords:
n_args -= 1
if argspec.varargs:
n_args -= 1
if n_args != 1:
err = '%ss must take 1 non-keyword argument (%s)' % (name,
func.__name__)
raise ValueError(err)
args = []
if argspec.defaults:
end = bool(argspec.keywords) + bool(argspec.varargs)
args.extend(argspec.args[-len(argspec.defaults):
end if end else None])
if argspec.keywords:
args.append(0) # means kwargs present
func._args = args
if not hasattr(func, '_thread'): # does function run in its own thread?
func._thread = False
def sieve(func):
if func.func_code.co_argcount != 5:
raise ValueError(
'sieves must take 5 arguments: (bot, input, func, type, args)')
_hook_add(func, ['sieve', (func,)])
return func
def command(arg=None, **kwargs):
args = {}
def command_wrapper(func):
args.setdefault('name', func.func_name)
_hook_add(func, ['command', (func, args)], 'command')
return func
if kwargs or not inspect.isfunction(arg):
if arg is not None:
args['name'] = arg
args.update(kwargs)
return command_wrapper
else:
return command_wrapper(arg)
def event(arg=None, **kwargs):
args = kwargs
def event_wrapper(func):
args['name'] = func.func_name
args.setdefault('events', ['*'])
_hook_add(func, ['event', (func, args)], 'event')
return func
if inspect.isfunction(arg):
return event_wrapper(arg, kwargs)
else:
if arg is not None:
args['events'] = arg.split()
return event_wrapper
def singlethread(func):
func._thread = True
return func
def regex(regex, flags=0, **kwargs):
args = kwargs
def regex_wrapper(func):
args['name'] = func.func_name
args['regex'] = regex
args['re'] = re.compile(regex, flags)
_hook_add(func, ['regex', (func, args)], 'regex')
return func
if inspect.isfunction(regex):
raise ValueError("regex decorators require a regex to match against")
else:
return regex_wrapper

103
plugins/util/http.py Normal file
View file

@ -0,0 +1,103 @@
# convenience wrapper for urllib2 & friends
import cookielib
import json
import urllib
import urllib2
import urlparse
from urllib import quote, quote_plus as _quote_plus
from urllib2 import HTTPError, URLError
from BeautifulSoup import BeautifulSoup
from lxml import etree, html
ua_skybot = 'Cloudbot/3.4 http://github.com/lukeroge/cloudbot'
ua_firefox = 'Mozilla/5.0 (Windows; U; Windows NT 5.1; en-US; rv:1.8.1.6) ' \
'Gecko/20070725 Firefox/2.0.0.6'
ua_internetexplorer = 'Mozilla/4.0 (compatible; MSIE 7.0; Windows NT 5.1)'
jar = cookielib.CookieJar()
def get(*args, **kwargs):
return open(*args, **kwargs).read()
def get_html(*args, **kwargs):
return html.fromstring(get(*args, **kwargs))
def get_soup(*args, **kwargs):
return BeautifulSoup(get(*args, **kwargs))
def get_xml(*args, **kwargs):
return etree.fromstring(get(*args, **kwargs))
def get_json(*args, **kwargs):
return json.loads(get(*args, **kwargs))
def open(url, query_params=None, user_agent=None, post_data=None,
get_method=None, cookies=False, **kwargs):
if query_params is None:
query_params = {}
if user_agent is None:
user_agent = ua_skybot
query_params.update(kwargs)
url = prepare_url(url, query_params)
request = urllib2.Request(url, post_data)
if get_method is not None:
request.get_method = lambda: get_method
request.add_header('User-Agent', user_agent)
if cookies:
opener = urllib2.build_opener(urllib2.HTTPCookieProcessor(jar))
else:
opener = urllib2.build_opener()
return opener.open(request)
def prepare_url(url, queries):
if queries:
scheme, netloc, path, query, fragment = urlparse.urlsplit(url)
query = dict(urlparse.parse_qsl(query))
query.update(queries)
query = urllib.urlencode(dict((to_utf8(key), to_utf8(value))
for key, value in query.iteritems()))
url = urlparse.urlunsplit((scheme, netloc, path, query, fragment))
return url
def to_utf8(s):
if isinstance(s, unicode):
return s.encode('utf8', 'ignore')
else:
return str(s)
def quote_plus(s):
return _quote_plus(to_utf8(s))
def unescape(s):
if not s.strip():
return s
return html.fromstring(s).text_content()

54
plugins/util/misc.py Normal file
View file

@ -0,0 +1,54 @@
from htmlentitydefs import name2codepoint
from time import time as unix_time
from HTMLParser import HTMLParser
from datetime import datetime
import tempfile
import logging as log
import errno
import re
import sys
import os
class HTMLStripper(HTMLParser):
def __init__(self, data):
HTMLParser.__init__(self)
self._stripped = []
self.feed(data)
def handle_starttag(self, tag, attrs):
if tag.lower() == 'br':
self._stripped.append('\n')
def handle_charref(self, name):
try:
if name.lower().startswith('x'):
char = int(name[1:], 16)
else:
char = int(name)
self._stripped.append(unichr(char))
except Exception, error:
log.warn('invalid entity: %s' % error)
def handle_entityref(self, name):
try:
char = unichr(name2codepoint[name])
except Exception, error:
log.warn('unknown entity: %s' % error)
char = u'&%s;' % name
self._stripped.append(char)
def handle_data(self, data):
self._stripped.append(data)
@property
def stripped(self):
return ''.join(self._stripped)
def superscript(text):
if isinstance(text, str):
text = decode(text, 'utf-8')
return text.translate(SUPER_MAP)
def strip_html(data):
return HTMLStripper(data).stripped

219
plugins/util/molecular.py Normal file
View file

@ -0,0 +1,219 @@
#!/usr/bin/env python
#
# molecular.py
# Copyright (c) 2001, Chris Gonnerman
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
#
# Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
#
# Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
#
# Neither the name of the author nor the names of any contributors
# may be used to endorse or promote products derived from this software
# without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
# FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
# AUTHOR OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""molecular.py -- molecular (ngenoid) name generator
This module knows how to generate "random" names for RPG characters.
It uses the same method as the "ngen" name generator by Kimmo Kulovesi,
and in fact it can use the same name files. molecular.py knows how
to merge multiple tables also, which can be handy...
If run as a command-line program, use the following options:
-r namefile -- read the given name file and add to the
current name table.
nnn -- generate nnn (a number) names and print
on standard output.
To generate names from a name file:
python molecular.py -r file 10
As a module (to be imported) you get the following classes and functions:
NameFile (class) -- a file wrapper with a disabled close() method,
used internally and probably not useful otherwise.
nameopen (function) -- opens a file; takes filename and mode options,
searches the default name file directory if not
found in current directory, handles "-" filenames,
and uses NameFile to disable closing of sys.stdin/
sys.stdout.
Molecule (class) -- the meat of the matter. A Molecule instance has
the following methods:
.load(file) -- loads a name file,
which may be a file-like
object with a .readline()
method or a filename as a
string.
.name() -- generate one name and
return it.
"""
__version__ = "1.0"
import string, re, sys, random
NAMEDIR = "/home/ircbot/bot/plugins/util/names"
NAMESECTIONS = [ "inf", "first", "mid", "final", "notes", "end" ]
class NameFile:
__file_attributes = ('closed','mode','name','softspace')
def __init__(self, file):
self.fd = file
def close(self):
pass
def flush(self):
return self.fd.flush()
def isatty(self):
return self.fd.isatty()
def fileno(self):
return self.fd.fileno()
def read(self, *args):
return apply(self.fd.read, args)
def readline(self, *args):
return apply(self.fd.readline, args)
def readlines(self, *args):
return apply(self.fd.readlines, args)
def seek(self, *args):
return apply(self.fd.seek, args)
def tell(self):
return self.fd.tell()
def write(self, str):
return self.fd.write(str)
def writelines(self, list):
return self.fd.writelines(list)
def __repr__(self):
return repr(self.fd)
def __getattr__(self, name):
if name in self.__file_attributes:
return getattr(self.fd, name)
else:
return self.__dict__[name]
def __setattr__(self, name, value):
if name in self.__file_attributes:
setattr(self.fd, name, value)
else:
self.__dict__[name] = value
def __cmp__(self, file):
"""I'm not sure what the correct behavior is, and therefore
this implementation is just a guess."""
if type(file) == type(self.fd):
return cmp(self.fd, file)
else:
return cmp(self.fd, file.fd)
class NameReader:
def __init__(self, file):
self.file = file
self.line = ""
def next(self):
self.line = self.file.readline()
return self.line
def close(self):
return self.file.close()
def safeopen(filename, mode):
try:
return open(filename, mode)
except IOError:
return None
def nameopen(filename, mode):
if filename == "-":
if "r" in mode:
return NameFile(sys.stdin)
else:
return NameFile(sys.stdout)
fp = safeopen(filename, mode)
if fp is None:
fp = safeopen(filename + ".nam", mode)
if "r" in mode and fp is None:
fp = safeopen(NAMEDIR + "/" + filename, mode)
# last call is open() instead of safeopen() to finally raise
# the exception if we just can't find the file.
if fp is None:
fp = open(NAMEDIR + "/" + filename + ".nam", mode)
return fp
class Molecule:
def __init__(self):
self.nametbl = {}
for i in NAMESECTIONS:
self.nametbl[i] = []
self.nametbl[""] = []
self.cursection = self.nametbl[""]
def load(self, fp):
if type(fp) is type(""):
fp = nameopen(fp, "r")
else:
fp = NameFile(fp)
rdr = NameReader(fp)
while rdr.next():
line = rdr.line[:-1]
if len(line) > 0 and line[0] == '[' and line[-1] == ']':
line = string.strip(line)[1:-1]
if not self.nametbl.has_key(line):
self.nametbl[line] = []
self.cursection = self.nametbl[line]
else:
self.cursection.append(line)
fp.close()
def name(self):
n = []
if len(self.nametbl["first"]) > 0:
n.append(random.choice(self.nametbl["first"]))
if len(self.nametbl["mid"]) > 0:
n.append(random.choice(self.nametbl["mid"]))
if len(self.nametbl["final"]) > 0:
n.append(random.choice(self.nametbl["final"]))
return string.join(n, "")
if __name__ == "__main__":
if len(sys.argv) <= 1:
sys.stderr.write( \
"Usage: molecular.py [ -r file ] [ nn ]\n")
sys.exit(0)
name = Molecule()
i = 1
while i < len(sys.argv):
arg = sys.argv[i]
if arg == "-r":
i += 1
name.load(sys.argv[i])
else:
n = int(sys.argv[i])
lst = []
for i in range(n):
print name.name()
i += 1

102
plugins/util/timesince.py Normal file
View file

@ -0,0 +1,102 @@
# Copyright (c) Django Software Foundation and individual contributors.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
#
# 3. Neither the name of Django nor the names of its contributors may be used
# to endorse or promote products derived from this software without
# specific prior written permission.
#
#THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"AND
#ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
#WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
#DISCLAIMED.IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR
#ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
#(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
#LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
#ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
#(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
#SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import datetime
def timesince(d, now=None):
"""
Takes two datetime objects and returns the time between d and now
as a nicely formatted string, e.g. "10 minutes". If d occurs after now,
then "0 minutes" is returned.
Units used are years, months, weeks, days, hours, and minutes.
Seconds and microseconds are ignored. Up to two adjacent units will be
displayed. For example, "2 weeks, 3 days" and "1 year, 3 months" are
possible outputs, but "2 weeks, 3 hours" and "1 year, 5 days" are not.
Adapted from http://blog.natbat.co.uk/archive/2003/Jun/14/time_since
"""
chunks = (
(60 * 60 * 24 * 365, ('year', 'years')),
(60 * 60 * 24 * 30, ('month', 'months')),
(60 * 60 * 24 * 7, ('week', 'weeks')),
(60 * 60 * 24, ('day', 'days')),
(60 * 60, ('hour', 'hours')),
(60, ('minute', 'minutes'))
)
# Convert int or float (unix epoch) to datetime.datetime for comparison
if isinstance(d, int) or isinstance(d, float):
d = datetime.datetime.fromtimestamp(d)
# Convert datetime.date to datetime.datetime for comparison.
if not isinstance(d, datetime.datetime):
d = datetime.datetime(d.year, d.month, d.day)
if now and not isinstance(now, datetime.datetime):
now = datetime.datetime(now.year, now.month, now.day)
if not now:
now = datetime.datetime.now()
# ignore microsecond part of 'd' since we removed it from 'now'
delta = now - (d - datetime.timedelta(0, 0, d.microsecond))
since = delta.days * 24 * 60 * 60 + delta.seconds
if since <= 0:
# d is in the future compared to now, stop processing.
return u'0 ' + 'minutes'
for i, (seconds, name) in enumerate(chunks):
count = since // seconds
if count != 0:
break
if count == 1:
s = '%(number)d %(type)s' % {'number': count, 'type': name[0]}
else:
s = '%(number)d %(type)s' % {'number': count, 'type': name[1]}
if i + 1 < len(chunks):
# Now get the second item
seconds2, name2 = chunks[i + 1]
count2 = (since - (seconds * count)) // seconds2
if count2 != 0:
if count2 == 1:
s += ', %d %s' % (count2, name2[0])
else:
s += ', %d %s' % (count2, name2[1])
return s
def timeuntil(d, now=None):
"""
Like timesince, but returns a string measuring the time until
the given time.
"""
if not now:
now = datetime.datetime.now()
return timesince(now, d)

133
plugins/util/urlnorm.py Normal file
View file

@ -0,0 +1,133 @@
"""
URI Normalization function:
* Always provide the URI scheme in lowercase characters.
* Always provide the host, if any, in lowercase characters.
* Only perform percent-encoding where it is essential.
* Always use uppercase A-through-F characters when percent-encoding.
* Prevent dot-segments appearing in non-relative URI paths.
* For schemes that define a default authority, use an empty authority if the
default is desired.
* For schemes that define an empty path to be equivalent to a path of "/",
use "/".
* For schemes that define a port, use an empty port if the default is desired
* All portions of the URI must be utf-8 encoded NFC from Unicode strings
implements:
http://gbiv.com/protocols/uri/rev-2002/rfc2396bis.html#canonical-form
http://www.intertwingly.net/wiki/pie/PaceCanonicalIds
inspired by:
Tony J. Ibbs, http://starship.python.net/crew/tibs/python/tji_url.py
Mark Nottingham, http://www.mnot.net/python/urlnorm.py
"""
__license__ = "Python"
import re
import unicodedata
import urlparse
from urllib import quote, unquote
default_port = {
'http': 80,
}
class Normalizer(object):
def __init__(self, regex, normalize_func):
self.regex = regex
self.normalize = normalize_func
normalizers = ( Normalizer( re.compile(r'(?:https?://)?(?:[a-zA-Z0-9\-]+\.)?(?:amazon|amzn){1}\.(?P<tld>[a-zA-Z\.]{2,})\/(gp/(?:product|offer-listing|customer-media/product-gallery)/|exec/obidos/tg/detail/-/|o/ASIN/|dp/|(?:[A-Za-z0-9\-]+)/dp/)?(?P<ASIN>[0-9A-Za-z]{10})'),
lambda m: r'http://amazon.%s/dp/%s' % (m.group('tld'), m.group('ASIN'))),
Normalizer( re.compile(r'.*waffleimages\.com.*/([0-9a-fA-F]{40})'),
lambda m: r'http://img.waffleimages.com/%s' % m.group(1) ),
Normalizer( re.compile(r'(?:youtube.*?(?:v=|/v/)|youtu\.be/|yooouuutuuube.*?id=)([-_a-z0-9]+)'),
lambda m: r'http://youtube.com/watch?v=%s' % m.group(1) ),
)
def normalize(url):
"""Normalize a URL."""
scheme, auth, path, query, fragment = urlparse.urlsplit(url.strip())
userinfo, host, port = re.search('([^@]*@)?([^:]*):?(.*)', auth).groups()
# Always provide the URI scheme in lowercase characters.
scheme = scheme.lower()
# Always provide the host, if any, in lowercase characters.
host = host.lower()
if host and host[-1] == '.':
host = host[:-1]
if host and host.startswith("www."):
if not scheme:
scheme = "http"
host = host[4:]
elif path and path.startswith("www."):
if not scheme:
scheme = "http"
path = path[4:]
# Only perform percent-encoding where it is essential.
# Always use uppercase A-through-F characters when percent-encoding.
# All portions of the URI must be utf-8 encoded NFC from Unicode strings
def clean(string):
string = unicode(unquote(string), 'utf-8', 'replace')
return unicodedata.normalize('NFC', string).encode('utf-8')
path = quote(clean(path), "~:/?#[]@!$&'()*+,;=")
fragment = quote(clean(fragment), "~")
# note care must be taken to only encode & and = characters as values
query = "&".join(["=".join([quote(clean(t), "~:/?#[]@!$'()*+,;=")
for t in q.split("=", 1)]) for q in query.split("&")])
# Prevent dot-segments appearing in non-relative URI paths.
if scheme in ["", "http", "https", "ftp", "file"]:
output = []
for input in path.split('/'):
if input == "":
if not output:
output.append(input)
elif input == ".":
pass
elif input == "..":
if len(output) > 1:
output.pop()
else:
output.append(input)
if input in ["", ".", ".."]:
output.append("")
path = '/'.join(output)
# For schemes that define a default authority, use an empty authority if
# the default is desired.
if userinfo in ["@", ":@"]:
userinfo = ""
# For schemes that define an empty path to be equivalent to a path of "/",
# use "/".
if path == "" and scheme in ["http", "https", "ftp", "file"]:
path = "/"
# For schemes that define a port, use an empty port if the default is
# desired
if port and scheme in default_port.keys():
if port.isdigit():
port = str(int(port))
if int(port) == default_port[scheme]:
port = ''
# Put it all back together again
auth = (userinfo or "") + host
if port:
auth += ":" + port
if url.endswith("#") and query == "" and fragment == "":
path += "#"
normal_url = urlparse.urlunsplit((scheme, auth, path, query,
fragment)).replace("http:///", "http://")
for norm in normalizers:
m = norm.regex.match(normal_url)
if m:
return norm.normalize(m)
return normal_url

25
plugins/validate.py Normal file
View file

@ -0,0 +1,25 @@
'''
Runs a given url through the w3c validator
by Vladi
'''
from util import hook, http
@hook.command
def validate(inp):
".validate <url> -- runs url through w3c markup validator"
if not inp.startswith('http://'):
inp = 'http://' + inp
url = 'http://validator.w3.org/check?uri=' + http.quote_plus(inp)
info = dict(http.open(url).info())
status = info['x-w3c-validator-status'].lower()
if status in ("valid", "invalid"):
errorcount = info['x-w3c-validator-errors']
warningcount = info['x-w3c-validator-warnings']
return "%s was found to be %s with %s errors and %s warnings." \
" see: %s" % (inp, status, errorcount, warningcount, url)

14
plugins/vimeo.py Normal file
View file

@ -0,0 +1,14 @@
from util import hook, http
@hook.regex(r'vimeo.com/([0-9]+)')
def vimeo_url(match):
info = http.get_json('http://vimeo.com/api/v2/video/%s.json'
% match.group(1))
if info:
return ("\x02%(title)s\x02 - length \x02%(duration)ss\x02 - "
"\x02%(stats_number_of_likes)s\x02 likes - "
"\x02%(stats_number_of_plays)s\x02 plays - "
"\x02%(user_name)s\x02 on \x02%(upload_date)s\x02"
% info[0])

45
plugins/weather.py Normal file
View file

@ -0,0 +1,45 @@
"weather, thanks to google"
from util import hook, http
@hook.command(autohelp=False)
def weather(inp, nick='', server='', reply=None, db=None, notice=None):
".weather <location> [dontsave] -- gets weather data from Google"
loc = inp
dontsave = loc.endswith(" dontsave")
if dontsave:
loc = loc[:-9].strip().lower()
db.execute("create table if not exists weather(nick primary key, loc)")
if not loc: # blank line
loc = db.execute("select loc from weather where nick=lower(?)",
(nick,)).fetchone()
if not loc:
notice(weather.__doc__)
return
loc = loc[0]
w = http.get_xml('http://www.google.com/ig/api', weather=loc)
w = w.find('weather')
if w.find('problem_cause') is not None:
notice("Couldn't fetch weather data for '%s', try using a zip or " \
"postal code." % inp)
return
info = dict((e.tag, e.get('data')) for e in w.find('current_conditions'))
info['city'] = w.find('forecast_information/city').get('data')
info['high'] = w.find('forecast_conditions/high').get('data')
info['low'] = w.find('forecast_conditions/low').get('data')
reply('%(city)s: %(condition)s, %(temp_f)sF/%(temp_c)sC (H:%(high)sF'\
', L:%(low)sF), %(humidity)s, %(wind_condition)s.' % info)
if inp and not dontsave:
db.execute("insert or replace into weather(nick, loc) values (?,?)",
(nick.lower(), loc))
db.commit()

51
plugins/wikipedia.py Normal file
View file

@ -0,0 +1,51 @@
'''Searches wikipedia and returns first sentence of article
Scaevolus 2009'''
import re
from util import hook, http
api_prefix = "http://en.wikipedia.org/w/api.php"
search_url = api_prefix + "?action=opensearch&format=xml"
paren_re = re.compile('\s*\(.*\)$')
@hook.command('w')
@hook.command
def wiki(inp):
'''.w/.wiki <phrase> -- gets first sentence of wikipedia ''' \
'''article on <phrase>'''
x = http.get_xml(search_url, search=inp)
ns = '{http://opensearch.org/searchsuggest2}'
items = x.findall(ns + 'Section/' + ns + 'Item')
if items == []:
if x.find('error') is not None:
return 'error: %(code)s: %(info)s' % x.find('error').attrib
else:
return 'no results found'
def extract(item):
return [item.find(ns + x).text for x in
('Text', 'Description', 'Url')]
title, desc, url = extract(items[0])
if 'may refer to' in desc:
title, desc, url = extract(items[1])
title = paren_re.sub('', title)
if title.lower() not in desc.lower():
desc = title + desc
desc = re.sub('\s+', ' ', desc).strip() # remove excess spaces
if len(desc) > 300:
desc = desc[:300] + '...'
return '%s -- %s' % (desc, http.quote(url, ':/'))

56
plugins/wolframalpha.py Normal file
View file

@ -0,0 +1,56 @@
import re
from util import hook, http
@hook.command('wa')
@hook.command
def wolframalpha(inp):
".wa/.wolframalpha <query> -- scrapes Wolfram Alpha's" \
" results for <query>"
url = "http://www.wolframalpha.com/input/?asynchronous=false"
h = http.get_html(url, i=inp)
pods = h.xpath("//div[@class='pod ']")
pod_texts = []
for pod in pods:
heading = pod.find('h2')
if heading is not None:
heading = heading.text_content().strip()
if heading.startswith('Input'):
continue
else:
continue
results = []
for alt in pod.xpath('div/div[@class="output pnt"]/img/@alt'):
alt = alt.strip().replace('\\n', '; ')
alt = re.sub(r'\s+', ' ', alt)
if alt:
results.append(alt)
if results:
pod_texts.append(heading + ' ' + '|'.join(results))
ret = '. '.join(pod_texts)
if not pod_texts:
return 'no results'
ret = re.sub(r'\\(.)', r'\1', ret)
def unicode_sub(match):
return unichr(int(match.group(1), 16))
ret = re.sub(r'\\:([0-9a-z]{4})', unicode_sub, ret)
if len(ret) > 430:
ret = ret[:ret.rfind(' ', 0, 430)]
ret = re.sub(r'\W+$', '', ret) + '...'
if not ret:
return 'no result'
return ret

19
plugins/word.py Normal file
View file

@ -0,0 +1,19 @@
import re
from util import hook, http
from BeautifulSoup import BeautifulSoup
@hook.command(autohelp=False)
def wordu(inp, say=False, nick=False):
".word -- gets the word of the day
return "true"
page = http.get('http://merriam-webster.com/word-of-the-day')
soup = BeautifulSoup(page)
word = soup.find('strong', {'class' : 'main_entry_word'})
function = soup.find('p', {'class' : 'word_function'})
#definitions = re.findall(r'<span class="ssens"><strong>:</strong>'
# r' *([^<]+)</span>', content)
say("(%s) The word of the day is: \x02%s\x02 (%s)" % (nick, word, function))

90
plugins/youtube.py Normal file
View file

@ -0,0 +1,90 @@
import locale
import re
import time
from util import hook, http
locale.setlocale(locale.LC_ALL, '')
youtube_re = (r'(?:youtube.*?(?:v=|/v/)|youtu\.be/|yooouuutuuube.*?id=)'
'([-_a-z0-9]+)', re.I)
base_url = 'http://gdata.youtube.com/feeds/api/'
url = base_url + 'videos/%s?v=2&alt=jsonc'
search_api_url = base_url + 'videos?v=2&alt=jsonc&max-results=1'
video_url = "http://youtube.com/watch?v=%s"
def get_video_description(vid_id):
j = http.get_json(url % vid_id)
if j.get('error'):
return
j = j['data']
out = {}
out["title"] = '%s' % j['title']
if not j.get('duration'):
return out
length = j['duration']
ti = ""
if length / 3600: # > 1 hour
ti += '%dh ' % (length / 3600)
if length / 60:
ti += '%dm ' % (length / 60 % 60)
out["length"] = ti
#out += "%ds\x02" % (length % 60)
if 'ratingCount' in j and 'likeCount' in j:
out["likes"] = int(j['likeCount'])
out["dislikes"] = int(j['ratingCount']) - int(j['likeCount'])
if 'rating' in j:
out["rating"] = (j['rating'])
if 'viewCount' in j:
out["views"] = j['viewCount']
upload_time = time.strptime(j['uploaded'], "%Y-%m-%dT%H:%M:%S.000Z")
out["uploadtime"] = (j['uploader'])
# title. uploader. length. upload date.
give = '\x02' + j[u'title'] + '\x02'
give += " - Length: " + GetInHMS(j['duration'])
give += " - "+ j[u'uploaded'][:10] + " by " + j[u'uploader']
return give
def GetInHMS(seconds):
hours = seconds / 3600
seconds -= 3600*hours
minutes = seconds / 60
seconds -= 60*minutes
if hours == 0:
return "%02d:%02d" % (minutes, seconds)
return "%02d:%02d:%02d" % (hours, minutes, seconds)
@hook.regex(*youtube_re)
def youtube_url(match):
return get_video_description(match.group(1))
@hook.command('y')
@hook.command
def youtube(inp):
'.youtube <query> -- returns the first YouTube search result for <query>'
j = http.get_json(search_api_url, q=inp)
if 'error' in j:
return 'error performing search'
if j['data']['totalItems'] == 0:
return 'no results found'
vid_id = j['data']['items'][0]['id']
return get_video_description(vid_id) + " - " + video_url % vid_id