Merge branch 'develop' into refresh

Conflicts:
	disabled_stuff/mygengo_translate.py
	plugins/attacks.py
	plugins/core_sieve.py
	plugins/fortune.py
	plugins/geoip.py
	plugins/mark.py
This commit is contained in:
Luke Rogers 2014-02-14 16:41:51 +13:00
commit bf9468a4aa
82 changed files with 530 additions and 380 deletions

View file

@ -1,79 +1,79 @@
{
"templates":[
"rips off {user}'s {limbs} and leaves them to die.",
"grabs {user}'s head and rips it clean off their body.",
"grabs a {gun} and riddles {user}'s body with bullets.",
"gags and ties {user} then throws them off a {tall_thing}.",
"crushes {user} with a huge spiked {spiked_thing}.",
"glares at {user} until they die of boredom.",
"stabs {user} in the heart a few times with a {weapon_stab}.",
"rams a {weapon_explosive} up {user}'s ass and lets off a few rounds.",
"crushes {user}'s skull in with a {weapon_crush}.",
"unleashes the armies of Isengard on {user}.",
"gags and ties {user} then throws them off a {tall_thing} to their death.",
"reaches out and punches right through {user}'s chest.",
"slices {user}'s limbs off with a {weapon_slice}.",
"throws {user} to Cthulu and watches them get ripped to shreds.",
"feeds {user} to an owlbear who then proceeds to maul them violently.",
"turns {user} into a snail and covers then in salt.",
"snacks on {user}'s dismembered body.",
"stuffs {bomb} up {user}'s ass and waits for it to go off.",
"puts {user} into a sack, throws the sack in the river, and hurls the river into space.",
"goes bowling with {user}'s bloody disembodied head.",
"sends {user} to /dev/null!",
"feeds {user} coke and mentos till they violently explode."
],
"parts": {
"gun":[
"AK47",
"machine gun",
"automatic pistol",
"Uzi"
],
"limbs": [
"legs",
"arms",
"limbs"
],
"weapon_stab": [
"knife",
"shard of glass",
"sword blade",
"butchers knife",
"corkscrew"
],
"weapon_slice": [
"sharpened katana",
"chainsaw",
"polished axe"
],
"weapon_crush": [
"spiked mace",
"baseball bat",
"wooden club",
"massive steel ball",
"heavy iron rod"
],
"weapon_explosive": [
"rocket launcher",
"grenade launcher",
"napalm launcher"
],
"tall_thing": [
"bridge",
"tall building",
"cliff",
"mountain"
],
"spiked_thing": [
"boulder",
"rock",
"barrel of rocks"
],
"bomb": [
"a bomb",
"some TNT",
"a bunch of C4"
]
}
"templates": [
"rips off {user}'s {limbs} and leaves them to die.",
"grabs {user}'s head and rips it clean off their body.",
"grabs a {gun} and riddles {user}'s body with bullets.",
"gags and ties {user} then throws them off a {tall_thing}.",
"crushes {user} with a huge spiked {spiked_thing}.",
"glares at {user} until they die of boredom.",
"stabs {user} in the heart a few times with a {weapon_stab}.",
"rams a {weapon_explosive} up {user}'s ass and lets off a few rounds.",
"crushes {user}'s skull in with a {weapon_crush}.",
"unleashes the armies of Isengard on {user}.",
"gags and ties {user} then throws them off a {tall_thing} to their death.",
"reaches out and punches right through {user}'s chest.",
"slices {user}'s limbs off with a {weapon_slice}.",
"throws {user} to Cthulu and watches them get ripped to shreds.",
"feeds {user} to an owlbear who then proceeds to maul them violently.",
"turns {user} into a snail and covers then in salt.",
"snacks on {user}'s dismembered body.",
"stuffs {bomb} up {user}'s ass and waits for it to go off.",
"puts {user} into a sack, throws the sack in the river, and hurls the river into space.",
"goes bowling with {user}'s bloody disembodied head.",
"sends {user} to /dev/null!",
"feeds {user} coke and mentos till they violently explode."
],
"parts": {
"gun": [
"AK47",
"machine gun",
"automatic pistol",
"Uzi"
],
"limbs": [
"legs",
"arms",
"limbs"
],
"weapon_stab": [
"knife",
"shard of glass",
"sword blade",
"butchers knife",
"corkscrew"
],
"weapon_slice": [
"sharpened katana",
"chainsaw",
"polished axe"
],
"weapon_crush": [
"spiked mace",
"baseball bat",
"wooden club",
"massive steel ball",
"heavy iron rod"
],
"weapon_explosive": [
"rocket launcher",
"grenade launcher",
"napalm launcher"
],
"tall_thing": [
"bridge",
"tall building",
"cliff",
"mountain"
],
"spiked_thing": [
"boulder",
"rock",
"barrel of rocks"
],
"bomb": [
"a bomb",
"some TNT",
"a bunch of C4"
]
}
}

View file

@ -1,4 +1,3 @@
from util import hook
import os
import sys
import re
@ -6,6 +5,8 @@ import json
import time
import subprocess
from util import hook
@hook.command(autohelp=False, permissions=["permissions_users"])
def permissions(inp, bot=None, notice=None):

View file

@ -1,6 +1,8 @@
from util import hook
import random
from util import hook
with open("data/larts.txt") as f:
larts = [line.strip() for line in f.readlines()
if not line.startswith("//")]

View file

@ -1,5 +1,5 @@
'''brainfuck interpreter adapted from (public domain) code at
http://brainfuck.sourceforge.net/brain.py'''
"""brainfuck interpreter adapted from (public domain) code at
http://brainfuck.sourceforge.net/brain.py"""
import re
import random

View file

@ -1,6 +1,7 @@
from util import hook
import random
from util import hook
@hook.command(autohelp=False)
def coin(inp, action=None):

View file

@ -1,7 +1,9 @@
import time
from util import hook
# CTCP responses
@hook.regex(r'^\x01VERSION\x01$')
def ctcp_version(inp, notice=None):

View file

@ -4,6 +4,7 @@ import re
from util import hook
socket.setdefaulttimeout(10)
nick_re = re.compile(":(.+?)!")

View file

@ -4,6 +4,7 @@ TOKENS = 10
RESTORE_RATE = 2
MESSAGE_COST = 5
buckets = {}
@hook.sieve

View file

@ -5,14 +5,17 @@ from util import http, hook
exchanges = {
"blockchain": {
"api_url": "https://blockchain.info/ticker",
"func": lambda data: u"Blockchain // Buy: \x0307${:,.2f}\x0f - Sell: \x0307${:,.2f}\x0f".format(data["USD"]["buy"], \
data["USD"]["sell"])
"func": lambda data: u"Blockchain // Buy: \x0307${:,.2f}\x0f -"
u" Sell: \x0307${:,.2f}\x0f".format(data["USD"]["buy"], data["USD"]["sell"])
},
"mtgox": {
"api_url": "https://mtgox.com/api/1/BTCUSD/ticker",
"func": lambda data: u"MtGox // Current: \x0307{}\x0f - High: \x0307{}\x0f - Low: \x0307{}\x0f - Best Ask: \x0307{}\x0f - Volume: {}".format(data['return']['last']['display'], \
data['return']['high']['display'], data['return']['low']['display'], data['return']['buy']['display'], \
data['return']['vol']['display'])
"func": lambda data: u"MtGox // Current: \x0307{}\x0f - High: \x0307{}\x0f - Low: \x0307{}\x0f"
u" - Best Ask: \x0307{}\x0f - Volume: {}".format(data['return']['last']['display'],
data['return']['high']['display'],
data['return']['low']['display'],
data['return']['buy']['display'],
data['return']['vol']['display'])
},
"coinbase":{
"api_url": "https://coinbase.com/api/v1/prices/spot_rate",
@ -24,8 +27,11 @@ exchanges = {
},
"bitstamp": {
"api_url": "https://www.bitstamp.net/api/ticker/",
"func": lambda data: u"BitStamp // Current: \x0307${:,.2f}\x0f - High: \x0307${:,.2f}\x0f - Low: \x0307${:,.2f}\x0f - Volume: {:,.2f} BTC".format(float(data['last']), float(data['high']), float(data['low']), \
float(data['volume']))
"func": lambda data: u"BitStamp // Current: \x0307${:,.2f}\x0f - High: \x0307${:,.2f}\x0f -"
u" Low: \x0307${:,.2f}\x0f - Volume: {:,.2f} BTC".format(float(data['last']),
float(data['high']),
float(data['low']),
float(data['volume']))
}
}
@ -35,7 +41,8 @@ exchanges = {
@hook.command("btc", autohelp=False)
@hook.command(autohelp=False)
def bitcoin(inp):
"""bitcoin <exchange> -- Gets current exchange rate for bitcoins from several exchanges, default is Blockchain. Supports MtGox, Bitpay, Coinbase and BitStamp."""
"""bitcoin <exchange> -- Gets current exchange rate for bitcoins from several exchanges, default is Blockchain.
Supports MtGox, Bitpay, Coinbase and BitStamp."""
inp = inp.lower()
if inp:
@ -58,4 +65,5 @@ def litecoin(inp, message=None):
data = http.get_json("https://btc-e.com/api/2/ltc_usd/ticker")
ticker = data['ticker']
message("Current: \x0307${:,.2f}\x0f - High: \x0307${:,.2f}\x0f"
" - Low: \x0307${:,.2f}\x0f - Volume: {:,.2f} LTC".format(ticker['buy'], ticker['high'], ticker['low'], ticker['vol_cur']))
" - Low: \x0307${:,.2f}\x0f - Volume: {:,.2f} LTC".format(ticker['buy'], ticker['high'], ticker['low'],
ticker['vol_cur']))

View file

@ -1,4 +1,5 @@
import base64
from util import hook

View file

@ -15,7 +15,7 @@ sign_re = re.compile(r'[+-]?(?:\d*d)?(?:\d+|F)', re.I)
split_re = re.compile(r'([\d+-]*)d?(F|\d*)', re.I)
def nrolls(count, n):
def n_rolls(count, n):
"""roll an n-sided die count times"""
if n == "F":
return [random.randint(-1, 1) for x in xrange(min(count, 100))]
@ -37,7 +37,7 @@ def nrolls(count, n):
#@hook.regex(valid_diceroll, re.I)
@hook.command
def dice(inp):
"""dice <diceroll> -- Simulates dicerolls. Example of <diceroll>:
"""dice <dice roll> -- Simulates dice rolls. Example of <dice roll>:
'dice 2d20-d5+4 roll 2'. D20s, subtract 1D5, add 4"""
try: # if inp is a re.match object...
@ -50,7 +50,7 @@ def dice(inp):
spec = whitespace_re.sub('', inp)
if not valid_diceroll_re.match(spec):
return "Invalid diceroll"
return "Invalid dice roll"
groups = sign_re.findall(spec)
total = 0
@ -60,7 +60,7 @@ def dice(inp):
count, side = split_re.match(roll).groups()
count = int(count) if count not in " +-" else 1
if side.upper() == "F": # fudge dice are basically 1d3-2
for fudge in nrolls(count, "F"):
for fudge in n_rolls(count, "F"):
if fudge == 1:
rolls.append("\x033+\x0F")
elif fudge == -1:
@ -74,14 +74,15 @@ def dice(inp):
side = int(side)
try:
if count > 0:
dice = nrolls(count, side)
rolls += map(str, dice)
total += sum(dice)
d = n_rolls(count, side)
rolls += map(str, d)
total += sum(d)
else:
dice = nrolls(-count, side)
rolls += [str(-x) for x in dice]
total -= sum(dice)
d = n_rolls(-count, side)
rolls += [str(-x) for x in d]
total -= sum(d)
except OverflowError:
# I have never seen this happen. If you make this happen, you win a cookie
return "Thanks for overflowing a float, jerk >:["
if desc:

View file

@ -1,5 +1,6 @@
# Plugin by GhettoWizard and Scaevolus
import re
from util import hook
from util import http

View file

@ -9,7 +9,7 @@ def domainr(inp):
except (http.URLError, http.HTTPError) as e:
return "Unable to get data for some reason. Try again later."
if data['query'] == "":
return "An error occurrred: {status} - {message}".format(**data['error'])
return "An error occurred: {status} - {message}".format(**data['error'])
domains = ""
for domain in data['results']:
domains += ("\x034" if domain['availability'] == "taken" else (

View file

@ -1,6 +1,8 @@
from util import hook, http, text
import re
from util import hook, http, text
api_url = "http://encyclopediadramatica.se/api.php?action=opensearch"
ed_url = "http://encyclopediadramatica.se/"

View file

@ -1,6 +1,8 @@
import random
from util import hook, text
color_codes = {
"<r>": "\x02\x0305",
"<g>": "\x02\x0303",
@ -13,7 +15,7 @@ with open("./data/8ball_responses.txt") as f:
@hook.command('8ball')
def eightball(input, action=None):
def eightball(inp, action=None):
"""8ball <question> -- The all knowing magic eight ball,
in electronic form. Ask and it shall be answered!"""

View file

@ -1,13 +1,15 @@
from util import hook
from Crypto import Random
from Crypto.Cipher import AES
from Crypto.Protocol.KDF import PBKDF2
import os
import base64
import json
import hashlib
from Crypto import Random
from Crypto.Cipher import AES
from Crypto.Protocol.KDF import PBKDF2
from util import hook
# helper functions to pad and unpad a string to a specified block size
# <http://stackoverflow.com/questions/12524994/encrypt-decrypt-using-pycrypto-aes-256>
BS = AES.block_size

View file

@ -2,7 +2,7 @@ from util import hook, http, web
@hook.command(autohelp=False)
def fact(inp, say=False, nick=False):
def fact(inp):
"""fact -- Gets a random fact from OMGFACTS."""
attempts = 0
@ -20,10 +20,10 @@ def fact(inp, say=False, nick=False):
response = soup.find('a', {'class': 'surprise'})
link = response['href']
fact = ''.join(response.find(text=True))
fact_data = ''.join(response.find(text=True))
if fact:
fact = fact.strip()
if fact_data:
fact_data = fact_data.strip()
break
else:
if attempts > 2:
@ -34,4 +34,4 @@ def fact(inp, say=False, nick=False):
url = web.try_isgd(link)
return "{} - {}".format(fact, url)
return "{} - {}".format(fact_data, url)

View file

@ -1,8 +1,10 @@
# Written by Scaevolus 2010
from util import hook, http, text, pyexec
import string
import re
from util import hook, http, text, pyexec
re_lineends = re.compile(r'[\r\n]*')
# some simple "shortcodes" for formatting purposes

View file

@ -1,6 +1,8 @@
from util import hook, http
from urllib import quote_plus
from util import hook, http
api_url = "http://api.fishbans.com/stats/{}/"
@ -51,6 +53,5 @@ def bancount(inp):
if not out:
return "The user \x02{}\x02 has no bans.".format(user)
else:
# dat string.
return "Bans for \x02{}\x02: ".format(user) + ", ".join(out) + ". More info " \
"at {}".format(user_url)

View file

@ -8,9 +8,9 @@ def refresh_cache():
soup = http.get_soup('http://www.fmylife.com/random/')
for e in soup.find_all('div', {'class': 'post article'}):
id = int(e['id'])
fml_id = int(e['id'])
text = ''.join(e.find('p').find_all(text=True))
fml_cache.append((id, text))
fml_cache.append((fml_id, text))
# do an initial refresh of the cache
refresh_cache()
@ -21,9 +21,9 @@ def fml(inp, reply=None):
"""fml -- Gets a random quote from fmyfife.com."""
# grab the last item in the fml cache and remove it
id, text = fml_cache.pop()
fml_id, text = fml_cache.pop()
# reply with the fml we grabbed
reply('(#{}) {}'.format(id, text))
reply('(#{}) {}'.format(fml_id, text))
# refresh fml cache if its getting empty
if len(fml_cache) < 3:
refresh_cache()

View file

@ -1,7 +1,10 @@
from util import hook
import random
from util import hook
with open("./data/fortunes.txt") as f:
fortunes = [line.strip() for line in f.readlines()
if not line.startswith("//")]

View file

@ -1,10 +1,13 @@
from util import hook, http
import pygeoip
import os.path
import json
import gzip
from StringIO import StringIO
import pygeoip
from util import hook, http
# load region database
with open("./data/geoip_regions.json", "rb") as f:

View file

@ -1,7 +1,9 @@
from util import hook, http
import json
import urllib2
from util import hook, http
shortcuts = {"cloudbot": "ClouDev/CloudBot"}

View file

@ -1,4 +1,5 @@
import random
from util import hook, http, text

View file

@ -8,6 +8,7 @@ import re
from util import hook, http
max_length = 100

View file

@ -1,9 +1,10 @@
import re
from util import hook
@hook.command(autohelp=False)
def help(inp, notice=None, input=None, conn=None, bot=None):
def help(inp, notice=None, conn=None, bot=None):
"""help -- Gives a list of commands/help for a command."""
funcs = {}

View file

@ -1,7 +1,9 @@
from util import hook, http, timeformat
from urllib import urlencode
import re
from util import hook, http, timeformat
hulu_re = (r'(.*://)(www.hulu.com|hulu.com)(.*)', re.I)
@ -10,7 +12,7 @@ def hulu_url(match):
data = http.get_json("http://www.hulu.com/api/oembed.json?url=http://www.hulu.com" + match.group(3))
showname = data['title'].split("(")[-1].split(")")[0]
title = data['title'].split(" (")[0]
return "{}: {} - {}".format(showname, title, timeformat.timeformat(int(data['duration'])))
return "{}: {} - {}".format(showname, title, timeformat.format_time(int(data['duration'])))
@hook.command('hulu')
@ -21,7 +23,7 @@ def hulu_search(inp):
data = result.find('results').find('videos').find('video')
showname = data.find('show').find('name').text
title = data.find('title').text
duration = timeformat.timeformat(int(float(data.find('duration').text)))
duration = timeformat.format_time(int(float(data.find('duration').text)))
description = data.find('description').text
rating = data.find('content-rating').text
return "{}: {} - {} - {} ({}) {}".format(showname, title, description, duration, rating,

View file

@ -1,7 +1,8 @@
import json
from util import hook
from fnmatch import fnmatch
from util import hook
#@hook.sieve
def ignore_sieve(bot, input, func, type, args):

View file

@ -1,8 +1,10 @@
# IMDb lookup plugin by Ghetto Wizard (2011) and blha303 (2013)
from util import hook, http, text
import re
from util import hook, http, text
id_re = re.compile("tt\d+")
imdb_re = (r'(.*:)//(imdb.com|www.imdb.com)(:[0-9]+)?(.*)', re.I)

View file

@ -1,7 +1,8 @@
from util import hook, http, web
import re
import random
from util import hook, http, web
base_url = "http://reddit.com/r/{}/.json"
imgur_re = re.compile(r'http://(?:i\.)?imgur\.com/(a/)?(\w+\b(?!/))\.?\w?')
@ -18,7 +19,7 @@ def is_valid(data):
@hook.command(autohelp=False)
def imgur(inp):
"imgur [subreddit] -- Gets the first page of imgur images from [subreddit] and returns a link to them. If [subreddit] is undefined, return any imgur images"
"""imgur [subreddit] -- Gets the first page of imgur images from [subreddit] and returns a link to them. If [subreddit] is undefined, return any imgur images"""
if inp:
# see if the input ends with "nsfw"
show_nsfw = inp.endswith(" nsfw")

View file

@ -5,7 +5,7 @@ from util import hook, http, urlnorm
@hook.command
def isup(inp):
"isup -- uses isup.me to see if a site is up or not"
"""isup -- uses isup.me to see if a site is up or not"""
# slightly overcomplicated, esoteric URL parsing
scheme, auth, path, query, fragment = urlparse.urlsplit(inp.strip())

View file

@ -1,6 +1,7 @@
from util import hook, http
import re
from util import hook, http
@hook.command(autohelp=False)
def kernel(inp, reply=None):

View file

@ -1,6 +1,7 @@
from util import hook, textgen
import json
from util import hook, textgen
def get_generator(_json, variables):
data = json.loads(_json)

View file

@ -1,6 +1,8 @@
from util import hook, http, timesince
from datetime import datetime
from util import hook, http, timesince
api_url = "http://ws.audioscrobbler.com/2.0/?format=json"

View file

@ -12,12 +12,8 @@ def metacritic(inp):
"""mc [all|movie|tv|album|x360|ps3|pc|gba|ds|3ds|wii|vita|wiiu|xone|ps4] <title>
Gets rating for <title> from metacritic on the specified medium."""
# if the results suck, it's metacritic's fault
args = inp.strip()
game_platforms = ('x360', 'ps3', 'pc', 'gba', 'ds', '3ds', 'wii',
'vita', 'wiiu', 'xone', 'ps4')
@ -45,38 +41,7 @@ def metacritic(inp):
except HTTPError:
return 'error fetching results'
''' result format:
-- game result, with score
-- subsequent results are the same structure, without first_result class
<li class="result first_result">
<div class="result_type">
<strong>Game</strong>
<span class="platform">WII</span>
</div>
<div class="result_wrap">
<div class="basic_stats has_score">
<div class="main_stats">
<h3 class="product_title basic_stat">...</h3>
<div class="std_score">
<div class="score_wrap">
<span class="label">Metascore: </span>
<span class="data metascore score_favorable">87</span>
</div>
</div>
</div>
<div class="more_stats extended_stats">...</div>
</div>
</div>
</li>
-- other platforms are the same basic layout
-- if it doesn't have a score, there is no div.basic_score
-- the <div class="result_type"> changes content for non-games:
<div class="result_type"><strong>Movie</strong></div>
'''
# get the proper result element we want to pull data from
result = None
if not doc.find_class('query_results'):

View file

@ -1,7 +1,9 @@
from util import hook, http, web, text
import time
import random
from util import hook, http, web, text
## CONSTANTS
base_url = "http://api.bukget.org/3/"
@ -13,7 +15,7 @@ details_url = base_url + "plugins/bukkit/{}"
categories = http.get_json("http://api.bukget.org/3/categories")
count_total = sum([cat["count"] for cat in categories])
count_categores = {cat["name"].lower() : int(cat["count"]) for cat in categories} # dict conps!
count_categories = {cat["name"].lower(): int(cat["count"]) for cat in categories} # dict comps!
class BukgetError(Exception):
@ -88,7 +90,7 @@ def format_output(data):
current_version = data['versions'][0]
last_update = time.strftime('%d %B %Y %H:%M',
time.gmtime(current_version['date']))
time.gmtime(current_version['date']))
version_number = data['versions'][0]['version']
bukkit_versions = ", ".join(current_version['game_versions'])
@ -99,7 +101,8 @@ def format_output(data):
else:
line_a = u"\x02{}\x02, by \x02{}\x02 ({}) \x02{}".format(name, authors, stage, url)
line_b = u"Last release: \x02v{}\x02 for \x02{}\x02 at {} \x02{}\x02".format(version_number, bukkit_versions, last_update, link)
line_b = u"Last release: \x02v{}\x02 for \x02{}\x02 at {} \x02{}\x02".format(version_number, bukkit_versions,
last_update, link)
return line_a, line_b

View file

@ -1,9 +1,11 @@
""" plugin by _303 (?)
"""
from util import hook
import re
from util import hook
pattern = re.compile(r'^(?P<count>\d+)x (?P<name>.+?): (?P<ingredients>.*)$')
recipelist = []
@ -42,29 +44,29 @@ with open("./data/itemids.txt") as f:
if line.startswith("//"):
continue
parts = line.strip().split()
id = parts[0]
itemid = parts[0]
name = " ".join(parts[1:])
ids.append((id, name))
ids.append((itemid, name))
@hook.command("mcid")
@hook.command
def mcitem(input, reply=None):
def mcitem(inp, reply=None):
"""mcitem <item/id> -- gets the id from an item or vice versa"""
input = input.lower().strip()
inp = inp.lower().strip()
if input == "":
if inp == "":
reply("error: no input.")
return
results = []
for id, name in ids:
if input == id:
results = ["\x02[{}]\x02 {}".format(id, name)]
for item_id, item_name in ids:
if inp == item_id:
results = ["\x02[{}]\x02 {}".format(item_id, item_name)]
break
elif input in name.lower():
results.append("\x02[{}]\x02 {}".format(id, name))
elif inp in item_name.lower():
results.append("\x02[{}]\x02 {}".format(item_id, item_name))
if not results:
return "No matches found."
@ -80,12 +82,12 @@ def mcitem(input, reply=None):
@hook.command("mccraft")
@hook.command
def mcrecipe(input, reply=None):
def mcrecipe(inp, reply=None):
"""mcrecipe <item> -- gets the crafting recipe for an item"""
input = input.lower().strip()
inp = inp.lower().strip()
results = [recipe.line for recipe in recipelist
if input in recipe.output]
if inp in recipe.output]
if not results:
return "No matches found."

View file

@ -1,8 +1,11 @@
from util import hook
# TODO: Rewrite this whole mess
import socket
import struct
import json
from util import hook
try:
import DNS
# Please remember to install the dependancy 'pydns'
@ -106,7 +109,7 @@ def mcping_legacy(host, port):
def get_srv_data(domain):
""" takes a domain and finds minecraft SRV records """
DNS.ParseResolvConf()
DNS.DiscoverNameServers()
srv_req = DNS.Request(qtype='srv')
srv_result = srv_req.req('_minecraft._tcp.{}'.format(domain))
@ -137,10 +140,10 @@ def parse_input(inp):
@hook.command("mcp6")
def mcping6(inp):
"""mcping6 <server>[:port] - Ping a Minecraft server version 1.6 or smaller to check status."""
try:
host, port = parse_input(inp)
except Exception as ex:
return ex.args[0]
#try:
host, port = parse_input(inp)
#except Exception as ex:
# return ex.args[0]
try:
return mcping_legacy(host, port)
except:
@ -165,10 +168,12 @@ def mcping7(inp):
@hook.command("mcp")
def mcping(inp):
"""mcping <server>[:port] - Ping a Minecraft server to check status."""
try:
host, port = parse_input(inp)
except Exception as e:
return e.args[0]
# try:
host, port = parse_input(inp)
#except Exception as e:
# return e.args[0]
#
try:
return mcping_modern(host, port)

View file

@ -1,6 +1,7 @@
from util import hook, http
import json
from util import hook, http
@hook.command(autohelp=False)
def mcstatus(inp):

View file

@ -1,6 +1,8 @@
from util import hook, http, text
import re
from util import hook, http, text
api_url = "http://minecraft.gamepedia.com/api.php?action=opensearch"
mc_url = "http://minecraft.gamepedia.com/"
@ -21,7 +23,7 @@ def mcwiki(inp):
return "No results found."
# we remove items with a '/' in the name, because
# gamepedia uses subpages for different languages
# gamepedia uses sub-pages for different languages
# for some stupid reason
items = [item for item in j[1] if not "/" in item]

View file

@ -1,6 +1,7 @@
# Plugin by Infinity - <https://github.com/infinitylabs/UguuBot>
import random
from util import hook, http
@ -23,11 +24,11 @@ refresh_cache()
@hook.command(autohelp=False)
def mlia(inp, reply=None):
"mlia -- Gets a random quote from MyLifeIsAverage.com."
"""mlia -- Gets a random quote from MyLifeIsAverage.com."""
# grab the last item in the mlia cache and remove it
id, text = mlia_cache.pop()
mlia_id, text = mlia_cache.pop()
# reply with the mlia we grabbed
reply('({}) {}'.format(id, text))
reply('({}) {}'.format(mlia_id, text))
# refresh mlia cache if its getting empty
if len(mlia_cache) < 3:
refresh_cache()

View file

@ -1,8 +1,8 @@
# Plugin by Lukeroge
from util import hook, text, textgen
import json
import os
from util import hook, text, textgen
GEN_DIR = "./plugins/data/name_files/"
@ -10,13 +10,13 @@ GEN_DIR = "./plugins/data/name_files/"
def get_generator(_json):
data = json.loads(_json)
return textgen.TextGenerator(data["templates"],
data["parts"], default_templates=data["default_templates"])
data["parts"], default_templates=data["default_templates"])
@hook.command(autohelp=False)
def namegen(inp, notice=None):
"namegen [generator] -- Generates some names using the chosen generator. " \
"'namegen list' will display a list of all generators."
"""namegen [generator] -- Generates some names using the chosen generator.
'namegen list' will display a list of all generators."""
# clean up the input
inp = inp.strip().lower()

View file

@ -1,7 +1,9 @@
from util import hook, http, text, web
import json
import re
from util import hook, http, text, web
## CONSTANTS
ITEM_URL = "http://www.newegg.com/Product/Product.aspx?Item={}"
@ -21,11 +23,11 @@ def format_item(item, show_url=True):
# format the rating nicely if it exists
if not item["ReviewSummary"]["TotalReviews"] == "[]":
rating = "Rated {}/5 ({} ratings)".format(item["ReviewSummary"]["Rating"],
item["ReviewSummary"]["TotalReviews"][1:-1])
item["ReviewSummary"]["TotalReviews"][1:-1])
else:
rating = "No Ratings"
if not item["FinalPrice"] == item["OriginalPrice"]:
if not item["FinalPrice"] == item["OriginalPrice"]:
price = "{FinalPrice}, was {OriginalPrice}".format(**item)
else:
price = item["FinalPrice"]
@ -44,19 +46,19 @@ def format_item(item, show_url=True):
tags.append("\x02Featured\x02")
if item["IsShellShockerItem"]:
tags.append("\x02SHELL SHOCKER®\x02")
tags.append(u"\x02SHELL SHOCKER\u00AE\x02")
# join all the tags together in a comma seperated string ("tag1, tag2, tag3")
# join all the tags together in a comma separated string ("tag1, tag2, tag3")
tag_text = u", ".join(tags)
if show_url:
# create the item URL and shorten it
url = web.try_isgd(ITEM_URL.format(item["NeweggItemNumber"]))
return u"\x02{}\x02 ({}) - {} - {} - {}".format(title, price, rating,
tag_text, url)
tag_text, url)
else:
return u"\x02{}\x02 ({}) - {} - {}".format(title, price, rating,
tag_text)
tag_text)
## HOOK FUNCTIONS
@ -80,8 +82,8 @@ def newegg(inp):
# submit the search request
r = http.get_json(
'http://www.ows.newegg.com/Search.egg/Advanced',
post_data = json.dumps(request)
'http://www.ows.newegg.com/Search.egg/Advanced',
post_data=json.dumps(request)
)
# get the first result

View file

@ -1,6 +1,8 @@
import re
from util import hook, http
newgrounds_re = (r'(.*:)//(www.newgrounds.com|newgrounds.com)(:[0-9]+)?(.*)', re.I)
valid = set('0123456789')

View file

@ -1,10 +1,12 @@
from util import hook
import re
from util import hook
db_inited = False
def cleanSQL(sql):
def clean_sql(sql):
return re.sub(r'\s+', " ", sql).strip()
@ -20,7 +22,7 @@ def db_init(db):
""").fetchone()[0] == 1
if not exists:
db.execute(cleanSQL("""
db.execute(clean_sql("""
create virtual table todos using fts4(
user,
text,
@ -91,7 +93,7 @@ def db_search(db, nick, query):
@hook.command("notes")
@hook.command
def note(inp, nick='', chan='', db=None, notice=None, bot=None):
"note(s) <add|del|list|search> args -- Manipulates your list of notes."
"""note(s) <add|del|list|search> args -- Manipulates your list of notes."""
db_init(db)

View file

@ -1,6 +1,8 @@
from util import hook, http, web
from bs4 import BeautifulSoup
from util import hook, http, web
api_url = "http://osrc.dfm.io/{}/stats"
user_url = "http://osrc.dfm.io/{}"

View file

@ -1,8 +1,10 @@
# based on password generation code by TheNoodle
from util import hook
# TODO: Add some kind of pronounceable password generation
# TODO: Improve randomness
import string
import random
from util import hook
@hook.command
def password(inp, notice=None):
@ -39,10 +41,10 @@ def password(inp, notice=None):
if not okay:
okay = okay + list(string.ascii_lowercase)
password = ""
pw = ""
# generates password
for x in range(length):
password = password + random.choice(okay)
pw = pw + random.choice(okay)
notice(password)
notice(pw)

View file

@ -1,9 +1,11 @@
# ping plugin by neersighted
from util import hook
import subprocess
import re
import os
from util import hook
ping_regex = re.compile(r"(\d+.\d+)/(\d+.\d+)/(\d+.\d+)/(\d+.\d+)")
@ -13,6 +15,7 @@ def ping(inp, reply=None):
if os.name == "nt":
return "Sorry, this command is not supported on Windows systems."
# TODO: Rewrite this entire command to work on Windows, somehow
args = inp.split(' ')
host = args[0]

View file

@ -1,8 +1,10 @@
# coding=utf-8
from util import hook
import re
import random
from util import hook
potatoes = ['AC Belmont', 'AC Blue Pride', 'AC Brador', 'AC Chaleur', 'AC Domino', 'AC Dubuc', 'AC Glacier Chip',
'AC Maple Gold', 'AC Novachip', 'AC Peregrine Red', 'AC Ptarmigan', 'AC Red Island', 'AC Saguenor',
'AC Stampede Russet', 'AC Sunbury', 'Abeille', 'Abnaki', 'Acadia', 'Acadia Russet', 'Accent',
@ -51,4 +53,4 @@ def potato(inp, action=None, input=None):
side_dish = random.choice(['side salad', 'dollop of sour cream', 'piece of chicken', 'bowl of shredded bacon'])
action("{} a {} {} {} potato for {} and serves it with a small {}!".format(method, flavor, size, potato_type, inp,
side_dish))
side_dish))

View file

@ -1,11 +1,12 @@
from util import hook, http, timesince
import datetime
from util import hook, http, timesince
@hook.command("scene")
@hook.command
def pre(inp):
'pre <query> -- searches scene releases using orlydb.com'
"""pre <query> -- searches scene releases using orlydb.com"""
try:
h = http.get_html("http://orlydb.com/", q=inp)

View file

@ -1,49 +0,0 @@
import urllib2
import random
from util import hook
def make_string():
stuff = "abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ1234567890"
string = random.choice("123")
for x in range(4):
string += random.choice(stuff)
return string
def check_url(code):
try:
urllib2.urlopen(make_url(code))
return True
except:
return False # sorry <3
def make_url(code):
return "http://puu.sh/{}".format(code)
@hook.command(autohelp=False)
def puush(inp):
"""puush [1-5] -- Returns a number of random puu.sh entries."""
out = ""
num = 0
if not inp:
inp = "1"
if not inp.isdigit():
out += "Defaulting to one: "
num = 1
elif int(inp[0]) > 5:
out += "Five images max: "
num = 5
else:
num = int(inp[0])
images = []
for x in xrange(num):
ran = make_string()
while not check_url(ran):
ran = make_string()
images.append(make_url(ran))
return out + " ".join(images)

View file

@ -8,9 +8,9 @@ def qrcode(inp):
"""qrcode [link] returns a link for a QR code."""
args = {
"cht": "qr", # chart type
"cht": "qr", # chart type (QR)
"chs": "200x200", # dimensions
"chl": inp
"chl": inp # data
}
link = http.prepare_url("http://chart.googleapis.com/chart", args)

View file

@ -1,9 +1,11 @@
import urllib
import json
import re
from util import hook
import oauth2 as oauth
from util import hook
def getdata(inp, types, api_key, api_secret):
consumer = oauth.Consumer(api_key, api_secret)

View file

@ -1,9 +1,11 @@
from util import hook, http, text, timesince
from datetime import datetime
import re
import random
reddit_re = (r'.*((www\.)?reddit\.com/r[^ ]+)', re.I)
from util import hook, http, text, timesince
reddit_re = (r'.*(((www\.)?reddit\.com/r|redd\.it)[^ ]+)', re.I)
base_url = "http://reddit.com/r/{}/.json"
short_url = "http://redd.it/{}"
@ -52,7 +54,7 @@ def reddit(inp):
data = data["data"]["children"]
# get the requested/random post
if id_num != None:
if id_num is not None:
try:
item = data[id_num]["data"]
except IndexError:
@ -64,14 +66,14 @@ def reddit(inp):
item["title"] = text.truncate_str(item["title"], 50)
item["link"] = short_url.format(item["id"])
rawtime = datetime.fromtimestamp(int(item["created_utc"]))
item["timesince"] = timesince.timesince(rawtime)
raw_time = datetime.fromtimestamp(int(item["created_utc"]))
item["timesince"] = timesince.timesince(raw_time)
if item["over_18"]:
item["warning"] = " \x02NSFW\x02"
else:
item["warning"] = ""
return u'\x02{title} : {subreddit}\x02 - posted by \x02{author}\x02' \
' {timesince} ago - {ups} upvotes, {downs} downvotes -' \
' {link}{warning}'.format(**item)
return u"\x02{title} : {subreddit}\x02 - posted by \x02{author}\x02" \
" {timesince} ago - {ups} upvotes, {downs} downvotes -" \
" {link}{warning}".format(**item)

128
plugins/regex_chans.py Normal file
View file

@ -0,0 +1,128 @@
from util import hook
# Default value.
# If True, all channels without a setting will have regex enabled
# If False, all channels without a setting will have regex disabled
default_enabled = True
db_already_initiated = False
def db_init(db):
global db_already_initiated
if not db_already_initiated:
db_already_initiated = True
db.execute("CREATE TABLE IF NOT EXISTS regexchans(channel PRIMARY KEY, status)")
db.commit()
def get_status(db, channel):
row = db.execute("SELECT status FROM regexchans WHERE channel = ?", [channel]).fetchone()
if row:
return row[0]
else:
return None
def set_status(db, channel, status):
row = db.execute("REPLACE INTO regexchans (channel, status) VALUES(?, ?)", [channel, status])
db.commit()
def delete_status(db, channel):
row = db.execute("DELETE FROM regexchans WHERE channel = ?", [channel])
db.commit()
def list_status(db):
row = db.execute("SELECT * FROM regexchans").fetchall()
result = None
for values in row:
if result:
result += u", {}: {}".format(values[0], values[1])
else:
result = u"{}: {}".format(values[0], values[1])
return result
@hook.sieve
def sieve_regex(bot, inp, func, kind, args):
db = bot.get_db_connection(inp.conn)
db_init(db)
if kind == 'regex' and inp.chan.startswith("#") and func.__name__ != 'factoid':
chanstatus = get_status(db, inp.chan)
if chanstatus != "ENABLED" and (chanstatus == "DISABLED" or not default_enabled):
print u"Denying input.raw={}, kind={}, args={} from {}".format(inp.raw, kind, args, inp.chan)
return None
print u"Allowing input.raw={}, kind={}, args={} from {}".format(inp.raw, kind, args, inp.chan)
return inp
@hook.command(permissions=["botcontrol"])
def enableregex(inp, db=None, message=None, notice=None, chan=None, nick=None):
db_init(db)
inp = inp.strip().lower()
if not inp:
channel = chan
elif inp.startswith("#"):
channel = inp
else:
channel = u"#{}".format(inp)
message(u"Enabling regex matching (youtube, etc) (issued by {})".format(nick), target=channel)
notice(u"Enabling regex matching (youtube, etc) in channel {}".format(channel))
set_status(db, channel, "ENABLED")
@hook.command(permissions=["botcontrol"])
def disableregex(inp, db=None, message=None, notice=None, chan=None, nick=None):
db_init(db)
inp = inp.strip().lower()
if not inp:
channel = chan
elif inp.startswith("#"):
channel = inp
else:
channel = u"#{}".format(inp)
message(u"Disabling regex matching (youtube, etc) (issued by {})".format(nick), target=channel)
notice(u"Disabling regex matching (youtube, etc) in channel {}".format(channel))
set_status(db, channel, "DISABLED")
@hook.command(permissions=["botcontrol"])
def resetregex(inp, db=None, message=None, notice=None, chan=None, nick=None):
db_init(db)
inp = inp.strip().lower()
if not inp:
channel = chan
elif inp.startswith("#"):
channel = inp
else:
channel = u"#{}".format(inp)
message(u"Resetting regex matching setting (youtube, etc) (issued by {})".format(nick), target=channel)
notice(u"Resetting regex matching setting (youtube, etc) in channel {}".format(channel))
delete_status(db, channel)
@hook.command(permissions=["botcontrol"])
def regexstatus(inp, db=None, chan=None):
db_init(db)
inp = inp.strip().lower()
if not inp:
channel = chan
elif inp.startswith("#"):
channel = inp
else:
channel = u"#{}".format(inp)
return u"Regex status for {}: {}".format(channel, get_status(db, channel))
@hook.command(permissions=["botcontrol"])
def listregex(inp, db=None):
db_init(db)
return list_status(db)

View file

@ -36,4 +36,4 @@ def rottentomatoes(inp, bot=None):
rotten = review_count - fresh
return u"{} - Critics Rating: \x02{}%\x02 ({} liked, {} disliked) " \
"Audience Rating: \x02{}%\x02 - {}".format(title, critics_score, fresh, rotten, audience_score, url)
"Audience Rating: \x02{}%\x02 - {}".format(title, critics_score, fresh, rotten, audience_score, url)

View file

@ -5,6 +5,7 @@ import re
from util import hook, timesince
db_ready = False

View file

@ -1,6 +1,7 @@
from util import hook, textgen
import json
from util import hook, textgen
def get_generator(_json, variables):
data = json.loads(_json)

View file

@ -1,6 +1,7 @@
from util import hook, text
import random
from util import hook, text
with open("./data/slogans.txt") as f:
slogans = [line.strip() for line in f.readlines()

View file

@ -1,7 +1,9 @@
from util import hook, http, web, text
from urllib import urlencode
import re
from util import hook, http, web, text
sc_re = (r'(.*:)//(www.)?(soundcloud.com)(.*)', re.I)
api_url = "http://api.soundcloud.com"
sndsc_re = (r'(.*:)//(www.)?(snd.sc)(.*)', re.I)

View file

@ -1,8 +1,9 @@
from util import hook
from enchant.checker import SpellChecker
import enchant
from util import hook
locale = "en_US"
@ -15,11 +16,11 @@ def spell(inp):
if len(inp.split(" ")) > 1:
# input is a sentence
chkr = SpellChecker(locale)
chkr.set_text(inp)
checker = SpellChecker(locale)
checker.set_text(inp)
offset = 0
for err in chkr:
for err in checker:
# find the location of the incorrect word
start = err.wordpos + offset
finish = start + len(err.word)

View file

@ -1,7 +1,7 @@
import re
from urllib import urlencode
from util import hook, http, web
from urllib import urlencode
gateway = 'http://open.spotify.com/{}/{}' # http spotify gw address
spuri = 'spotify:{}:{}'

View file

@ -1,7 +1,9 @@
import re
from bs4 import BeautifulSoup, NavigableString, Tag
from util import hook, http, web
from util.text import truncate_str
from bs4 import BeautifulSoup, NavigableString, Tag
steam_re = (r'(.*:)//(store.steampowered.com)(:[0-9]+)?(.*)', re.I)
@ -53,10 +55,10 @@ def get_steam_info(url):
data[title] = text
continue
data["price"] = soup.find('div', {'class': 'game_purchase_price price'}).text.strip()
return u"\x02{name}\x02: {desc}, \x02Genre\x02: {genre}, \x02Release Date\x02: {release date}, \x02Price\x02: {price}".format(**data)
return u"\x02{name}\x02: {desc}, \x02Genre\x02: {genre}, \x02Release Date\x02: {release date}," \
u" \x02Price\x02: {price}".format(**data)
@hook.regex(*steam_re)

View file

@ -1,7 +1,9 @@
from util import hook, http, text
import csv
import StringIO
from util import hook, http, text
gauge_url = "http://www.mysteamgauge.com/search?username={}"
api_url = "http://mysteamgauge.com/user/{}.csv"

View file

@ -9,7 +9,7 @@ def stock(inp):
query = "SELECT * FROM yahoo.finance.quote WHERE symbol=@symbol LIMIT 1"
quote = web.query(query, {"symbol": sym}).one()
# if we dont get a company name back, the symbol doesn't match a company
# if we don't get a company name back, the symbol doesn't match a company
if quote['Change'] is None:
return "Unknown ticker symbol: {}".format(sym)

View file

@ -2,9 +2,10 @@ import os
import re
import time
import platform
from util import hook
from datetime import timedelta
from util import hook
def convert_kilobytes(kilobytes):
if kilobytes >= 1024:

View file

@ -50,7 +50,7 @@ def tellinput(paraml, input=None, notice=None, db=None, bot=None, nick=None, con
@hook.command(autohelp=False)
def showtells(inp, nick='', chan='', notice=None, db=None):
"showtells -- View all pending tell messages (sent in a notice)."
"""showtells -- View all pending tell messages (sent in a notice)."""
db_init(db)

View file

@ -1,7 +1,9 @@
from util import hook, http
import time
from util import hook, http
from util.text import capitalize_first
api_url = 'http://api.wolframalpha.com/v2/query?format=plaintext'
@ -16,10 +18,10 @@ def time_command(inp, bot=None):
return "error: no wolfram alpha api key set"
request = http.get_xml(api_url, input=query, appid=api_key)
time = " ".join(request.xpath("//pod[@title='Result']/subpod/plaintext/text()"))
time = time.replace(" | ", ", ")
current_time = " ".join(request.xpath("//pod[@title='Result']/subpod/plaintext/text()"))
current_time = current_time.replace(" | ", ", ")
if time:
if current_time:
# nice place name for UNIX time
if inp.lower() == "unix":
place = "Unix Epoch"
@ -27,7 +29,7 @@ def time_command(inp, bot=None):
place = capitalize_first(" ".join(request.xpath("//pod[@"
"title='Input interpretation']/subpod/plaintext/text()"))[
16:])
return "{} - \x02{}\x02".format(time, place)
return "{} - \x02{}\x02".format(current_time, place)
else:
return "Could not get the time for '{}'.".format(inp)

View file

@ -1,6 +1,7 @@
from util import hook, http, urlnorm
from bs4 import BeautifulSoup
from util import hook, http, urlnorm
@hook.command
def title(inp):
@ -14,9 +15,9 @@ def title(inp):
except (http.HTTPError, http.URLError):
return "Could not fetch page."
title = soup.find('title').contents[0]
page_title = soup.find('title').contents[0]
if not title:
if not page_title:
return "Could not find title."
return u"{} [{}]".format(title, real_url)
return u"{} [{}]".format(page_title, real_url)

View file

@ -9,11 +9,11 @@ from zipfile import ZipFile
from cStringIO import StringIO
from lxml import etree
from util import hook, http
base_url = "http://thetvdb.com/api/"
api_key = "469B73127CA0C411"
def get_zipped_xml(*args, **kwargs):
@ -25,11 +25,11 @@ def get_zipped_xml(*args, **kwargs):
return etree.parse(ZipFile(zip_buffer, "r").open(path))
def get_episodes_for_series(seriesname, api_key):
def get_episodes_for_series(series_name, api_key):
res = {"error": None, "ended": False, "episodes": None, "name": None}
# http://thetvdb.com/wiki/index.php/API:GetSeries
try:
query = http.get_xml(base_url + 'GetSeries.php', seriesname=seriesname)
query = http.get_xml(base_url + 'GetSeries.php', seriesname=series_name)
except URLError:
res["error"] = "error contacting thetvdb.com"
return res
@ -63,7 +63,7 @@ def get_episode_info(episode, api_key):
first_aired = episode.findtext("FirstAired")
try:
airdate = datetime.date(*map(int, first_aired.split('-')))
air_date = datetime.date(*map(int, first_aired.split('-')))
except (ValueError, TypeError):
return None
@ -79,7 +79,7 @@ def get_episode_info(episode, api_key):
episode_desc = '{}'.format(episode_num)
if episode_name:
episode_desc += ' - {}'.format(episode_name)
return first_aired, airdate, episode_desc
return first_aired, air_date, episode_desc
@hook.command
@ -111,15 +111,15 @@ def tv_next(inp, bot=None):
if ep_info is None:
continue
(first_aired, airdate, episode_desc) = ep_info
(first_aired, air_date, episode_desc) = ep_info
if airdate > today:
if air_date > today:
next_eps = ['{} ({})'.format(first_aired, episode_desc)]
elif airdate == today:
elif air_date == today:
next_eps = ['Today ({})'.format(episode_desc)] + next_eps
else:
#we're iterating in reverse order with newest episodes last
#so, as soon as we're past today, break out of loop
# we're iterating in reverse order with newest episodes last
# so, as soon as we're past today, break out of loop
break
if not next_eps:
@ -158,9 +158,9 @@ def tv_last(inp, bot=None):
if ep_info is None:
continue
(first_aired, airdate, episode_desc) = ep_info
(first_aired, air_date, episode_desc) = ep_info
if airdate < today:
if air_date < today:
#iterating in reverse order, so the first episode encountered
#before today was the most recently aired
prev_ep = '{} ({})'.format(first_aired, episode_desc)

View file

@ -1,7 +1,9 @@
import re
from util import hook, http
from HTMLParser import HTMLParser
from util import hook, http
twitch_re = (r'(.*:)//(twitch.tv|www.twitch.tv)(:[0-9]+)?(.*)', re.I)
multitwitch_re = (r'(.*:)//(www.multitwitch.tv|multitwitch.tv)/(.*)', re.I)

View file

@ -1,9 +1,12 @@
from util import hook, timesince
import tweepy
import re
import random
from datetime import datetime
import tweepy
from util import hook, timesince
TWITTER_RE = (r"(?:(?:www.twitter.com|twitter.com)/(?:[-_a-zA-Z0-9]+)/status/)([0-9]+)", re.I)
@ -26,7 +29,7 @@ def get_api(bot):
@hook.regex(*TWITTER_RE)
def twitter_url(match, bot=None):
tweet_id = match.group(1)
api = get_api(bot)
if not api:
return
@ -46,13 +49,13 @@ def twitter_url(match, bot=None):
time = timesince.timesince(tweet.created_at, datetime.utcnow())
return u"{}@\x02{}\x02 ({}): {} ({} ago)".format(prefix, user.screen_name, user.name, text, time)
@hook.command("tw")
@hook.command("twatter")
@hook.command
def twitter(inp, bot=None):
"twitter <user> [n] -- Gets last/[n]th tweet from <user>"
"""twitter <user> [n] -- Gets last/[n]th tweet from <user>"""
api = get_api(bot)
if not api:
@ -117,6 +120,9 @@ def twitter(inp, bot=None):
tweet = random.choice(search)
user = tweet.user
else:
# ???
return "Unknown Error"
text = " ".join(tweet.text.split())

View file

@ -1,6 +1,8 @@
from util import hook, http, text
import re
from util import hook, http, text
base_url = 'http://www.urbandictionary.com/iphone/search/define'
@ -10,37 +12,36 @@ def urban(inp):
"""urban <phrase> [id] -- Looks up <phrase> on urbandictionary.com."""
# clean and split the input
input = inp.lower().strip()
parts = input.split()
inp = inp.lower().strip()
parts = inp.split()
# if the last word is a number, set the ID to that number
if parts[-1].isdigit():
id = int(parts[-1])
id_num = int(parts[-1])
# remove the ID from the input string
del parts[-1]
input = " ".join(parts)
inp = " ".join(parts)
else:
id = 1
id_num = 1
# fetch the definitions
page = http.get_json(base_url, term=input, referer="http://m.urbandictionary.com")
defs = page['list']
print page
page = http.get_json(base_url, term=inp, referer="http://m.urbandictionary.com")
definitions = page['list']
if page['result_type'] == 'no_results':
return 'Not found.'
# try getting the requested definition
try:
definition = defs[id - 1]['definition'].replace('\r\n', ' ')
definition = definitions[id_num - 1]['definition'].replace('\r\n', ' ')
definition = re.sub('\s+', ' ', definition).strip() # remove excess spaces
definition = text.truncate_str(definition, 200)
except IndexError:
return 'Not found.'
url = defs[id - 1]['permalink']
url = definitions[id_num - 1]['permalink']
output = u"[%i/%i] %s :: %s" % \
(id, len(defs), definition, url)
(id_num, len(definitions), definition, url)
return output

View file

@ -1,13 +1,15 @@
from util import hook, text
import hashlib
import collections
import re
from util import hook, text
# variables
colors = collections.OrderedDict([
('red', '\x0304'),
('ornage', '\x0307'),
('orange', '\x0307'),
('yellow', '\x0308'),
('green', '\x0309'),
('cyan', '\x0303'),
@ -24,8 +26,8 @@ colors = collections.OrderedDict([
strip_re = re.compile("(\x03|\x02|\x1f)(?:,?\d{1,2}(?:,\d{1,2})?)?", re.UNICODE)
def strip(text):
return strip_re.sub('', text)
def strip(string):
return strip_re.sub('', string)
# basic text tools
@ -89,7 +91,7 @@ def checkbase64(inp):
recoded = decoded.encode('base64').strip()
is_base64 = recoded == inp
except:
is_base64 = False
return '"{}" is not base64 encoded'.format(inp)
if is_base64:
return '"{}" is base64 encoded'.format(recoded)

View file

@ -20,7 +20,7 @@ def validate(inp):
status = info['x-w3c-validator-status'].lower()
if status in ("valid", "invalid"):
errorcount = info['x-w3c-validator-errors']
warningcount = info['x-w3c-validator-warnings']
error_count = info['x-w3c-validator-errors']
warning_count = info['x-w3c-validator-warnings']
return "{} was found to be {} with {} errors and {} warnings." \
" see: {}".format(inp, status, errorcount, warningcount, url)
" see: {}".format(inp, status, error_count, warning_count, url)

View file

@ -1,7 +1,8 @@
from util import hook, http, web
import json
import urllib2
from util import hook, http, web
def get_sound_info(game, search):
search = search.replace(" ", "+")

View file

@ -8,7 +8,7 @@ def vimeo_url(match):
% match.group(1))
if info:
info[0]["duration"] = timeformat.timeformat(info[0]["duration"])
info[0]["duration"] = timeformat.format_time(info[0]["duration"])
info[0]["stats_number_of_likes"] = format(
info[0]["stats_number_of_likes"], ",d")
info[0]["stats_number_of_plays"] = format(

View file

@ -13,7 +13,7 @@ def weather(inp, reply=None, db=None, nick=None, bot=None, notice=None):
if not api_key:
return "Error: No wunderground API details."
# initalise weather DB
# initialise weather DB
db.execute("create table if not exists weather(nick primary key, loc)")
# if there is no input, try getting the users last location from the DB

View file

@ -9,7 +9,6 @@ from util import hook, http, text, web
@hook.command
def wolframalpha(inp, bot=None):
"""wa <query> -- Computes <query> using Wolfram Alpha."""
api_key = bot.config.get("api_keys", {}).get("wolframalpha", None)
if not api_key:

View file

@ -11,6 +11,6 @@ def answer(inp):
short_url = web.try_isgd(result["Link"])
# we split the answer and .join() it to remove newlines/extra spaces
answer = text.truncate_str(' '.join(result["ChosenAnswer"].split()), 80)
answer_text = text.truncate_str(' '.join(result["ChosenAnswer"].split()), 80)
return u'\x02{}\x02 "{}" - {}'.format(result["Subject"], answer, short_url)
return u'\x02{}\x02 "{}" - {}'.format(result["Subject"], answer_text, short_url)

View file

@ -14,10 +14,7 @@ video_url = "http://youtu.be/%s"
def plural(num=0, text=''):
return "{:,} {}{}".format(num, text, "s"[num==1:])
return "{:,} {}{}".format(num, text, "s"[num == 1:])
def get_video_description(video_id):
@ -41,22 +38,23 @@ def get_video_description(video_id):
likes = plural(int(data['likeCount']), "like")
dislikes = plural(data['ratingCount'] - int(data['likeCount']), "dislike")
percent = 100 * float(data['likeCount'])/float(data['ratingCount'])
percent = 100 * float(data['likeCount']) / float(data['ratingCount'])
out += u' - {}, {} (\x02{:.1f}\x02%)'.format(likes,
dislikes, percent)
dislikes, percent)
if 'viewCount' in data:
views = data['viewCount']
out += u' - \x02{:,}\x02 view{}'.format(views, "s"[views==1:])
out += u' - \x02{:,}\x02 view{}'.format(views, "s"[views == 1:])
try:
uploader = http.get_json(base_url + "users/{}?alt=json".format(data["uploader"]))["entry"]["author"][0]["name"]["$t"]
uploader = http.get_json(base_url + "users/{}?alt=json".format(data["uploader"]))["entry"]["author"][0]["name"][
"$t"]
except:
uploader = data["uploader"]
upload_time = time.strptime(data['uploaded'], "%Y-%m-%dT%H:%M:%S.000Z")
out += u' - \x02{}\x02 on \x02{}\x02'.format(uploader,
time.strftime("%Y.%m.%d", upload_time))
time.strftime("%Y.%m.%d", upload_time))
if 'contentRating' in data:
out += u' - \x034NSFW\x02'
@ -88,7 +86,6 @@ def youtube(inp):
return get_video_description(video_id) + u" - " + video_url % video_id
@hook.command('ytime')
@hook.command
def youtime(inp):
@ -119,8 +116,8 @@ def youtime(inp):
total_text = timeformat.format_time(total, accuracy=8)
return u'The video \x02{}\x02 has a length of {} and has been viewed {:,} times for ' \
'a total run time of {}!'.format(data['title'], length_text, views, \
total_text)
u'a total run time of {}!'.format(data['title'], length_text, views,
total_text)
ytpl_re = (r'(.*:)//(www.youtube.com/playlist|youtube.com/playlist)(:[0-9]+)?(.*)', re.I)
@ -135,6 +132,6 @@ def ytplaylist_url(match):
return "\x034\x02Invalid response."
title = soup.find('title').text.split('-')[0].strip()
author = soup.find('img', {'class': 'channel-header-profile-image'})['title']
numvideos = soup.find('ul', {'class': 'header-stats'}).findAll('li')[0].text.split(' ')[0]
num_videos = soup.find('ul', {'class': 'header-stats'}).findAll('li')[0].text.split(' ')[0]
views = soup.find('ul', {'class': 'header-stats'}).findAll('li')[1].text.split(' ')[0]
return u"\x02%s\x02 - \x02%s\x02 views - \x02%s\x02 videos - \x02%s\x02" % (title, views, numvideos, author)
return u"\x02%s\x02 - \x02%s\x02 views - \x02%s\x02 videos - \x02%s\x02" % (title, views, num_videos, author)

View file

@ -1,3 +1,4 @@
Crypto
BeautifulSoup==3.2.1
lxml==3.1beta1
pyenchant==1.6.5