Merge branch 'develop' into refresh

Conflicts:
	disabled_stuff/mygengo_translate.py
	plugins/attacks.py
	plugins/core_sieve.py
	plugins/fortune.py
	plugins/geoip.py
	plugins/mark.py
This commit is contained in:
Luke Rogers 2014-02-14 16:41:51 +13:00
commit bf9468a4aa
82 changed files with 530 additions and 380 deletions

View file

@ -1,79 +1,79 @@
{ {
"templates":[ "templates": [
"rips off {user}'s {limbs} and leaves them to die.", "rips off {user}'s {limbs} and leaves them to die.",
"grabs {user}'s head and rips it clean off their body.", "grabs {user}'s head and rips it clean off their body.",
"grabs a {gun} and riddles {user}'s body with bullets.", "grabs a {gun} and riddles {user}'s body with bullets.",
"gags and ties {user} then throws them off a {tall_thing}.", "gags and ties {user} then throws them off a {tall_thing}.",
"crushes {user} with a huge spiked {spiked_thing}.", "crushes {user} with a huge spiked {spiked_thing}.",
"glares at {user} until they die of boredom.", "glares at {user} until they die of boredom.",
"stabs {user} in the heart a few times with a {weapon_stab}.", "stabs {user} in the heart a few times with a {weapon_stab}.",
"rams a {weapon_explosive} up {user}'s ass and lets off a few rounds.", "rams a {weapon_explosive} up {user}'s ass and lets off a few rounds.",
"crushes {user}'s skull in with a {weapon_crush}.", "crushes {user}'s skull in with a {weapon_crush}.",
"unleashes the armies of Isengard on {user}.", "unleashes the armies of Isengard on {user}.",
"gags and ties {user} then throws them off a {tall_thing} to their death.", "gags and ties {user} then throws them off a {tall_thing} to their death.",
"reaches out and punches right through {user}'s chest.", "reaches out and punches right through {user}'s chest.",
"slices {user}'s limbs off with a {weapon_slice}.", "slices {user}'s limbs off with a {weapon_slice}.",
"throws {user} to Cthulu and watches them get ripped to shreds.", "throws {user} to Cthulu and watches them get ripped to shreds.",
"feeds {user} to an owlbear who then proceeds to maul them violently.", "feeds {user} to an owlbear who then proceeds to maul them violently.",
"turns {user} into a snail and covers then in salt.", "turns {user} into a snail and covers then in salt.",
"snacks on {user}'s dismembered body.", "snacks on {user}'s dismembered body.",
"stuffs {bomb} up {user}'s ass and waits for it to go off.", "stuffs {bomb} up {user}'s ass and waits for it to go off.",
"puts {user} into a sack, throws the sack in the river, and hurls the river into space.", "puts {user} into a sack, throws the sack in the river, and hurls the river into space.",
"goes bowling with {user}'s bloody disembodied head.", "goes bowling with {user}'s bloody disembodied head.",
"sends {user} to /dev/null!", "sends {user} to /dev/null!",
"feeds {user} coke and mentos till they violently explode." "feeds {user} coke and mentos till they violently explode."
], ],
"parts": { "parts": {
"gun":[ "gun": [
"AK47", "AK47",
"machine gun", "machine gun",
"automatic pistol", "automatic pistol",
"Uzi" "Uzi"
], ],
"limbs": [ "limbs": [
"legs", "legs",
"arms", "arms",
"limbs" "limbs"
], ],
"weapon_stab": [ "weapon_stab": [
"knife", "knife",
"shard of glass", "shard of glass",
"sword blade", "sword blade",
"butchers knife", "butchers knife",
"corkscrew" "corkscrew"
], ],
"weapon_slice": [ "weapon_slice": [
"sharpened katana", "sharpened katana",
"chainsaw", "chainsaw",
"polished axe" "polished axe"
], ],
"weapon_crush": [ "weapon_crush": [
"spiked mace", "spiked mace",
"baseball bat", "baseball bat",
"wooden club", "wooden club",
"massive steel ball", "massive steel ball",
"heavy iron rod" "heavy iron rod"
], ],
"weapon_explosive": [ "weapon_explosive": [
"rocket launcher", "rocket launcher",
"grenade launcher", "grenade launcher",
"napalm launcher" "napalm launcher"
], ],
"tall_thing": [ "tall_thing": [
"bridge", "bridge",
"tall building", "tall building",
"cliff", "cliff",
"mountain" "mountain"
], ],
"spiked_thing": [ "spiked_thing": [
"boulder", "boulder",
"rock", "rock",
"barrel of rocks" "barrel of rocks"
], ],
"bomb": [ "bomb": [
"a bomb", "a bomb",
"some TNT", "some TNT",
"a bunch of C4" "a bunch of C4"
] ]
} }
} }

View file

@ -1,4 +1,3 @@
from util import hook
import os import os
import sys import sys
import re import re
@ -6,6 +5,8 @@ import json
import time import time
import subprocess import subprocess
from util import hook
@hook.command(autohelp=False, permissions=["permissions_users"]) @hook.command(autohelp=False, permissions=["permissions_users"])
def permissions(inp, bot=None, notice=None): def permissions(inp, bot=None, notice=None):

View file

@ -1,6 +1,8 @@
from util import hook
import random import random
from util import hook
with open("data/larts.txt") as f: with open("data/larts.txt") as f:
larts = [line.strip() for line in f.readlines() larts = [line.strip() for line in f.readlines()
if not line.startswith("//")] if not line.startswith("//")]

View file

@ -1,5 +1,5 @@
'''brainfuck interpreter adapted from (public domain) code at """brainfuck interpreter adapted from (public domain) code at
http://brainfuck.sourceforge.net/brain.py''' http://brainfuck.sourceforge.net/brain.py"""
import re import re
import random import random

View file

@ -1,6 +1,7 @@
from util import hook
import random import random
from util import hook
@hook.command(autohelp=False) @hook.command(autohelp=False)
def coin(inp, action=None): def coin(inp, action=None):

View file

@ -1,7 +1,9 @@
import time import time
from util import hook from util import hook
# CTCP responses # CTCP responses
@hook.regex(r'^\x01VERSION\x01$') @hook.regex(r'^\x01VERSION\x01$')
def ctcp_version(inp, notice=None): def ctcp_version(inp, notice=None):

View file

@ -4,6 +4,7 @@ import re
from util import hook from util import hook
socket.setdefaulttimeout(10) socket.setdefaulttimeout(10)
nick_re = re.compile(":(.+?)!") nick_re = re.compile(":(.+?)!")

View file

@ -4,6 +4,7 @@ TOKENS = 10
RESTORE_RATE = 2 RESTORE_RATE = 2
MESSAGE_COST = 5 MESSAGE_COST = 5
buckets = {} buckets = {}
@hook.sieve @hook.sieve

View file

@ -5,14 +5,17 @@ from util import http, hook
exchanges = { exchanges = {
"blockchain": { "blockchain": {
"api_url": "https://blockchain.info/ticker", "api_url": "https://blockchain.info/ticker",
"func": lambda data: u"Blockchain // Buy: \x0307${:,.2f}\x0f - Sell: \x0307${:,.2f}\x0f".format(data["USD"]["buy"], \ "func": lambda data: u"Blockchain // Buy: \x0307${:,.2f}\x0f -"
data["USD"]["sell"]) u" Sell: \x0307${:,.2f}\x0f".format(data["USD"]["buy"], data["USD"]["sell"])
}, },
"mtgox": { "mtgox": {
"api_url": "https://mtgox.com/api/1/BTCUSD/ticker", "api_url": "https://mtgox.com/api/1/BTCUSD/ticker",
"func": lambda data: u"MtGox // Current: \x0307{}\x0f - High: \x0307{}\x0f - Low: \x0307{}\x0f - Best Ask: \x0307{}\x0f - Volume: {}".format(data['return']['last']['display'], \ "func": lambda data: u"MtGox // Current: \x0307{}\x0f - High: \x0307{}\x0f - Low: \x0307{}\x0f"
data['return']['high']['display'], data['return']['low']['display'], data['return']['buy']['display'], \ u" - Best Ask: \x0307{}\x0f - Volume: {}".format(data['return']['last']['display'],
data['return']['vol']['display']) data['return']['high']['display'],
data['return']['low']['display'],
data['return']['buy']['display'],
data['return']['vol']['display'])
}, },
"coinbase":{ "coinbase":{
"api_url": "https://coinbase.com/api/v1/prices/spot_rate", "api_url": "https://coinbase.com/api/v1/prices/spot_rate",
@ -24,8 +27,11 @@ exchanges = {
}, },
"bitstamp": { "bitstamp": {
"api_url": "https://www.bitstamp.net/api/ticker/", "api_url": "https://www.bitstamp.net/api/ticker/",
"func": lambda data: u"BitStamp // Current: \x0307${:,.2f}\x0f - High: \x0307${:,.2f}\x0f - Low: \x0307${:,.2f}\x0f - Volume: {:,.2f} BTC".format(float(data['last']), float(data['high']), float(data['low']), \ "func": lambda data: u"BitStamp // Current: \x0307${:,.2f}\x0f - High: \x0307${:,.2f}\x0f -"
float(data['volume'])) u" Low: \x0307${:,.2f}\x0f - Volume: {:,.2f} BTC".format(float(data['last']),
float(data['high']),
float(data['low']),
float(data['volume']))
} }
} }
@ -35,7 +41,8 @@ exchanges = {
@hook.command("btc", autohelp=False) @hook.command("btc", autohelp=False)
@hook.command(autohelp=False) @hook.command(autohelp=False)
def bitcoin(inp): def bitcoin(inp):
"""bitcoin <exchange> -- Gets current exchange rate for bitcoins from several exchanges, default is Blockchain. Supports MtGox, Bitpay, Coinbase and BitStamp.""" """bitcoin <exchange> -- Gets current exchange rate for bitcoins from several exchanges, default is Blockchain.
Supports MtGox, Bitpay, Coinbase and BitStamp."""
inp = inp.lower() inp = inp.lower()
if inp: if inp:
@ -58,4 +65,5 @@ def litecoin(inp, message=None):
data = http.get_json("https://btc-e.com/api/2/ltc_usd/ticker") data = http.get_json("https://btc-e.com/api/2/ltc_usd/ticker")
ticker = data['ticker'] ticker = data['ticker']
message("Current: \x0307${:,.2f}\x0f - High: \x0307${:,.2f}\x0f" message("Current: \x0307${:,.2f}\x0f - High: \x0307${:,.2f}\x0f"
" - Low: \x0307${:,.2f}\x0f - Volume: {:,.2f} LTC".format(ticker['buy'], ticker['high'], ticker['low'], ticker['vol_cur'])) " - Low: \x0307${:,.2f}\x0f - Volume: {:,.2f} LTC".format(ticker['buy'], ticker['high'], ticker['low'],
ticker['vol_cur']))

View file

@ -1,4 +1,5 @@
import base64 import base64
from util import hook from util import hook

View file

@ -15,7 +15,7 @@ sign_re = re.compile(r'[+-]?(?:\d*d)?(?:\d+|F)', re.I)
split_re = re.compile(r'([\d+-]*)d?(F|\d*)', re.I) split_re = re.compile(r'([\d+-]*)d?(F|\d*)', re.I)
def nrolls(count, n): def n_rolls(count, n):
"""roll an n-sided die count times""" """roll an n-sided die count times"""
if n == "F": if n == "F":
return [random.randint(-1, 1) for x in xrange(min(count, 100))] return [random.randint(-1, 1) for x in xrange(min(count, 100))]
@ -37,7 +37,7 @@ def nrolls(count, n):
#@hook.regex(valid_diceroll, re.I) #@hook.regex(valid_diceroll, re.I)
@hook.command @hook.command
def dice(inp): def dice(inp):
"""dice <diceroll> -- Simulates dicerolls. Example of <diceroll>: """dice <dice roll> -- Simulates dice rolls. Example of <dice roll>:
'dice 2d20-d5+4 roll 2'. D20s, subtract 1D5, add 4""" 'dice 2d20-d5+4 roll 2'. D20s, subtract 1D5, add 4"""
try: # if inp is a re.match object... try: # if inp is a re.match object...
@ -50,7 +50,7 @@ def dice(inp):
spec = whitespace_re.sub('', inp) spec = whitespace_re.sub('', inp)
if not valid_diceroll_re.match(spec): if not valid_diceroll_re.match(spec):
return "Invalid diceroll" return "Invalid dice roll"
groups = sign_re.findall(spec) groups = sign_re.findall(spec)
total = 0 total = 0
@ -60,7 +60,7 @@ def dice(inp):
count, side = split_re.match(roll).groups() count, side = split_re.match(roll).groups()
count = int(count) if count not in " +-" else 1 count = int(count) if count not in " +-" else 1
if side.upper() == "F": # fudge dice are basically 1d3-2 if side.upper() == "F": # fudge dice are basically 1d3-2
for fudge in nrolls(count, "F"): for fudge in n_rolls(count, "F"):
if fudge == 1: if fudge == 1:
rolls.append("\x033+\x0F") rolls.append("\x033+\x0F")
elif fudge == -1: elif fudge == -1:
@ -74,14 +74,15 @@ def dice(inp):
side = int(side) side = int(side)
try: try:
if count > 0: if count > 0:
dice = nrolls(count, side) d = n_rolls(count, side)
rolls += map(str, dice) rolls += map(str, d)
total += sum(dice) total += sum(d)
else: else:
dice = nrolls(-count, side) d = n_rolls(-count, side)
rolls += [str(-x) for x in dice] rolls += [str(-x) for x in d]
total -= sum(dice) total -= sum(d)
except OverflowError: except OverflowError:
# I have never seen this happen. If you make this happen, you win a cookie
return "Thanks for overflowing a float, jerk >:[" return "Thanks for overflowing a float, jerk >:["
if desc: if desc:

View file

@ -1,5 +1,6 @@
# Plugin by GhettoWizard and Scaevolus # Plugin by GhettoWizard and Scaevolus
import re import re
from util import hook from util import hook
from util import http from util import http

View file

@ -9,7 +9,7 @@ def domainr(inp):
except (http.URLError, http.HTTPError) as e: except (http.URLError, http.HTTPError) as e:
return "Unable to get data for some reason. Try again later." return "Unable to get data for some reason. Try again later."
if data['query'] == "": if data['query'] == "":
return "An error occurrred: {status} - {message}".format(**data['error']) return "An error occurred: {status} - {message}".format(**data['error'])
domains = "" domains = ""
for domain in data['results']: for domain in data['results']:
domains += ("\x034" if domain['availability'] == "taken" else ( domains += ("\x034" if domain['availability'] == "taken" else (

View file

@ -1,6 +1,8 @@
from util import hook, http, text
import re import re
from util import hook, http, text
api_url = "http://encyclopediadramatica.se/api.php?action=opensearch" api_url = "http://encyclopediadramatica.se/api.php?action=opensearch"
ed_url = "http://encyclopediadramatica.se/" ed_url = "http://encyclopediadramatica.se/"

View file

@ -1,6 +1,8 @@
import random import random
from util import hook, text from util import hook, text
color_codes = { color_codes = {
"<r>": "\x02\x0305", "<r>": "\x02\x0305",
"<g>": "\x02\x0303", "<g>": "\x02\x0303",
@ -13,7 +15,7 @@ with open("./data/8ball_responses.txt") as f:
@hook.command('8ball') @hook.command('8ball')
def eightball(input, action=None): def eightball(inp, action=None):
"""8ball <question> -- The all knowing magic eight ball, """8ball <question> -- The all knowing magic eight ball,
in electronic form. Ask and it shall be answered!""" in electronic form. Ask and it shall be answered!"""

View file

@ -1,13 +1,15 @@
from util import hook
from Crypto import Random
from Crypto.Cipher import AES
from Crypto.Protocol.KDF import PBKDF2
import os import os
import base64 import base64
import json import json
import hashlib import hashlib
from Crypto import Random
from Crypto.Cipher import AES
from Crypto.Protocol.KDF import PBKDF2
from util import hook
# helper functions to pad and unpad a string to a specified block size # helper functions to pad and unpad a string to a specified block size
# <http://stackoverflow.com/questions/12524994/encrypt-decrypt-using-pycrypto-aes-256> # <http://stackoverflow.com/questions/12524994/encrypt-decrypt-using-pycrypto-aes-256>
BS = AES.block_size BS = AES.block_size

View file

@ -2,7 +2,7 @@ from util import hook, http, web
@hook.command(autohelp=False) @hook.command(autohelp=False)
def fact(inp, say=False, nick=False): def fact(inp):
"""fact -- Gets a random fact from OMGFACTS.""" """fact -- Gets a random fact from OMGFACTS."""
attempts = 0 attempts = 0
@ -20,10 +20,10 @@ def fact(inp, say=False, nick=False):
response = soup.find('a', {'class': 'surprise'}) response = soup.find('a', {'class': 'surprise'})
link = response['href'] link = response['href']
fact = ''.join(response.find(text=True)) fact_data = ''.join(response.find(text=True))
if fact: if fact_data:
fact = fact.strip() fact_data = fact_data.strip()
break break
else: else:
if attempts > 2: if attempts > 2:
@ -34,4 +34,4 @@ def fact(inp, say=False, nick=False):
url = web.try_isgd(link) url = web.try_isgd(link)
return "{} - {}".format(fact, url) return "{} - {}".format(fact_data, url)

View file

@ -1,8 +1,10 @@
# Written by Scaevolus 2010 # Written by Scaevolus 2010
from util import hook, http, text, pyexec
import string import string
import re import re
from util import hook, http, text, pyexec
re_lineends = re.compile(r'[\r\n]*') re_lineends = re.compile(r'[\r\n]*')
# some simple "shortcodes" for formatting purposes # some simple "shortcodes" for formatting purposes

View file

@ -1,6 +1,8 @@
from util import hook, http
from urllib import quote_plus from urllib import quote_plus
from util import hook, http
api_url = "http://api.fishbans.com/stats/{}/" api_url = "http://api.fishbans.com/stats/{}/"
@ -51,6 +53,5 @@ def bancount(inp):
if not out: if not out:
return "The user \x02{}\x02 has no bans.".format(user) return "The user \x02{}\x02 has no bans.".format(user)
else: else:
# dat string.
return "Bans for \x02{}\x02: ".format(user) + ", ".join(out) + ". More info " \ return "Bans for \x02{}\x02: ".format(user) + ", ".join(out) + ". More info " \
"at {}".format(user_url) "at {}".format(user_url)

View file

@ -8,9 +8,9 @@ def refresh_cache():
soup = http.get_soup('http://www.fmylife.com/random/') soup = http.get_soup('http://www.fmylife.com/random/')
for e in soup.find_all('div', {'class': 'post article'}): for e in soup.find_all('div', {'class': 'post article'}):
id = int(e['id']) fml_id = int(e['id'])
text = ''.join(e.find('p').find_all(text=True)) text = ''.join(e.find('p').find_all(text=True))
fml_cache.append((id, text)) fml_cache.append((fml_id, text))
# do an initial refresh of the cache # do an initial refresh of the cache
refresh_cache() refresh_cache()
@ -21,9 +21,9 @@ def fml(inp, reply=None):
"""fml -- Gets a random quote from fmyfife.com.""" """fml -- Gets a random quote from fmyfife.com."""
# grab the last item in the fml cache and remove it # grab the last item in the fml cache and remove it
id, text = fml_cache.pop() fml_id, text = fml_cache.pop()
# reply with the fml we grabbed # reply with the fml we grabbed
reply('(#{}) {}'.format(id, text)) reply('(#{}) {}'.format(fml_id, text))
# refresh fml cache if its getting empty # refresh fml cache if its getting empty
if len(fml_cache) < 3: if len(fml_cache) < 3:
refresh_cache() refresh_cache()

View file

@ -1,7 +1,10 @@
from util import hook
import random import random
from util import hook
with open("./data/fortunes.txt") as f: with open("./data/fortunes.txt") as f:
fortunes = [line.strip() for line in f.readlines() fortunes = [line.strip() for line in f.readlines()
if not line.startswith("//")] if not line.startswith("//")]

View file

@ -1,10 +1,13 @@
from util import hook, http
import pygeoip
import os.path import os.path
import json import json
import gzip import gzip
from StringIO import StringIO from StringIO import StringIO
import pygeoip
from util import hook, http
# load region database # load region database
with open("./data/geoip_regions.json", "rb") as f: with open("./data/geoip_regions.json", "rb") as f:

View file

@ -1,7 +1,9 @@
from util import hook, http
import json import json
import urllib2 import urllib2
from util import hook, http
shortcuts = {"cloudbot": "ClouDev/CloudBot"} shortcuts = {"cloudbot": "ClouDev/CloudBot"}

View file

@ -1,4 +1,5 @@
import random import random
from util import hook, http, text from util import hook, http, text

View file

@ -8,6 +8,7 @@ import re
from util import hook, http from util import hook, http
max_length = 100 max_length = 100

View file

@ -1,9 +1,10 @@
import re import re
from util import hook from util import hook
@hook.command(autohelp=False) @hook.command(autohelp=False)
def help(inp, notice=None, input=None, conn=None, bot=None): def help(inp, notice=None, conn=None, bot=None):
"""help -- Gives a list of commands/help for a command.""" """help -- Gives a list of commands/help for a command."""
funcs = {} funcs = {}

View file

@ -1,7 +1,9 @@
from util import hook, http, timeformat
from urllib import urlencode from urllib import urlencode
import re import re
from util import hook, http, timeformat
hulu_re = (r'(.*://)(www.hulu.com|hulu.com)(.*)', re.I) hulu_re = (r'(.*://)(www.hulu.com|hulu.com)(.*)', re.I)
@ -10,7 +12,7 @@ def hulu_url(match):
data = http.get_json("http://www.hulu.com/api/oembed.json?url=http://www.hulu.com" + match.group(3)) data = http.get_json("http://www.hulu.com/api/oembed.json?url=http://www.hulu.com" + match.group(3))
showname = data['title'].split("(")[-1].split(")")[0] showname = data['title'].split("(")[-1].split(")")[0]
title = data['title'].split(" (")[0] title = data['title'].split(" (")[0]
return "{}: {} - {}".format(showname, title, timeformat.timeformat(int(data['duration']))) return "{}: {} - {}".format(showname, title, timeformat.format_time(int(data['duration'])))
@hook.command('hulu') @hook.command('hulu')
@ -21,7 +23,7 @@ def hulu_search(inp):
data = result.find('results').find('videos').find('video') data = result.find('results').find('videos').find('video')
showname = data.find('show').find('name').text showname = data.find('show').find('name').text
title = data.find('title').text title = data.find('title').text
duration = timeformat.timeformat(int(float(data.find('duration').text))) duration = timeformat.format_time(int(float(data.find('duration').text)))
description = data.find('description').text description = data.find('description').text
rating = data.find('content-rating').text rating = data.find('content-rating').text
return "{}: {} - {} - {} ({}) {}".format(showname, title, description, duration, rating, return "{}: {} - {} - {} ({}) {}".format(showname, title, description, duration, rating,

View file

@ -1,7 +1,8 @@
import json import json
from util import hook
from fnmatch import fnmatch from fnmatch import fnmatch
from util import hook
#@hook.sieve #@hook.sieve
def ignore_sieve(bot, input, func, type, args): def ignore_sieve(bot, input, func, type, args):

View file

@ -1,8 +1,10 @@
# IMDb lookup plugin by Ghetto Wizard (2011) and blha303 (2013) # IMDb lookup plugin by Ghetto Wizard (2011) and blha303 (2013)
from util import hook, http, text
import re import re
from util import hook, http, text
id_re = re.compile("tt\d+") id_re = re.compile("tt\d+")
imdb_re = (r'(.*:)//(imdb.com|www.imdb.com)(:[0-9]+)?(.*)', re.I) imdb_re = (r'(.*:)//(imdb.com|www.imdb.com)(:[0-9]+)?(.*)', re.I)

View file

@ -1,7 +1,8 @@
from util import hook, http, web
import re import re
import random import random
from util import hook, http, web
base_url = "http://reddit.com/r/{}/.json" base_url = "http://reddit.com/r/{}/.json"
imgur_re = re.compile(r'http://(?:i\.)?imgur\.com/(a/)?(\w+\b(?!/))\.?\w?') imgur_re = re.compile(r'http://(?:i\.)?imgur\.com/(a/)?(\w+\b(?!/))\.?\w?')
@ -18,7 +19,7 @@ def is_valid(data):
@hook.command(autohelp=False) @hook.command(autohelp=False)
def imgur(inp): def imgur(inp):
"imgur [subreddit] -- Gets the first page of imgur images from [subreddit] and returns a link to them. If [subreddit] is undefined, return any imgur images" """imgur [subreddit] -- Gets the first page of imgur images from [subreddit] and returns a link to them. If [subreddit] is undefined, return any imgur images"""
if inp: if inp:
# see if the input ends with "nsfw" # see if the input ends with "nsfw"
show_nsfw = inp.endswith(" nsfw") show_nsfw = inp.endswith(" nsfw")

View file

@ -5,7 +5,7 @@ from util import hook, http, urlnorm
@hook.command @hook.command
def isup(inp): def isup(inp):
"isup -- uses isup.me to see if a site is up or not" """isup -- uses isup.me to see if a site is up or not"""
# slightly overcomplicated, esoteric URL parsing # slightly overcomplicated, esoteric URL parsing
scheme, auth, path, query, fragment = urlparse.urlsplit(inp.strip()) scheme, auth, path, query, fragment = urlparse.urlsplit(inp.strip())

View file

@ -1,6 +1,7 @@
from util import hook, http
import re import re
from util import hook, http
@hook.command(autohelp=False) @hook.command(autohelp=False)
def kernel(inp, reply=None): def kernel(inp, reply=None):

View file

@ -1,6 +1,7 @@
from util import hook, textgen
import json import json
from util import hook, textgen
def get_generator(_json, variables): def get_generator(_json, variables):
data = json.loads(_json) data = json.loads(_json)

View file

@ -1,6 +1,8 @@
from util import hook, http, timesince
from datetime import datetime from datetime import datetime
from util import hook, http, timesince
api_url = "http://ws.audioscrobbler.com/2.0/?format=json" api_url = "http://ws.audioscrobbler.com/2.0/?format=json"

View file

@ -12,12 +12,8 @@ def metacritic(inp):
"""mc [all|movie|tv|album|x360|ps3|pc|gba|ds|3ds|wii|vita|wiiu|xone|ps4] <title> """mc [all|movie|tv|album|x360|ps3|pc|gba|ds|3ds|wii|vita|wiiu|xone|ps4] <title>
Gets rating for <title> from metacritic on the specified medium.""" Gets rating for <title> from metacritic on the specified medium."""
# if the results suck, it's metacritic's fault
args = inp.strip() args = inp.strip()
game_platforms = ('x360', 'ps3', 'pc', 'gba', 'ds', '3ds', 'wii', game_platforms = ('x360', 'ps3', 'pc', 'gba', 'ds', '3ds', 'wii',
'vita', 'wiiu', 'xone', 'ps4') 'vita', 'wiiu', 'xone', 'ps4')
@ -45,38 +41,7 @@ def metacritic(inp):
except HTTPError: except HTTPError:
return 'error fetching results' return 'error fetching results'
''' result format:
-- game result, with score
-- subsequent results are the same structure, without first_result class
<li class="result first_result">
<div class="result_type">
<strong>Game</strong>
<span class="platform">WII</span>
</div>
<div class="result_wrap">
<div class="basic_stats has_score">
<div class="main_stats">
<h3 class="product_title basic_stat">...</h3>
<div class="std_score">
<div class="score_wrap">
<span class="label">Metascore: </span>
<span class="data metascore score_favorable">87</span>
</div>
</div>
</div>
<div class="more_stats extended_stats">...</div>
</div>
</div>
</li>
-- other platforms are the same basic layout
-- if it doesn't have a score, there is no div.basic_score
-- the <div class="result_type"> changes content for non-games:
<div class="result_type"><strong>Movie</strong></div>
'''
# get the proper result element we want to pull data from # get the proper result element we want to pull data from
result = None result = None
if not doc.find_class('query_results'): if not doc.find_class('query_results'):

View file

@ -1,7 +1,9 @@
from util import hook, http, web, text
import time import time
import random import random
from util import hook, http, web, text
## CONSTANTS ## CONSTANTS
base_url = "http://api.bukget.org/3/" base_url = "http://api.bukget.org/3/"
@ -13,7 +15,7 @@ details_url = base_url + "plugins/bukkit/{}"
categories = http.get_json("http://api.bukget.org/3/categories") categories = http.get_json("http://api.bukget.org/3/categories")
count_total = sum([cat["count"] for cat in categories]) count_total = sum([cat["count"] for cat in categories])
count_categores = {cat["name"].lower() : int(cat["count"]) for cat in categories} # dict conps! count_categories = {cat["name"].lower(): int(cat["count"]) for cat in categories} # dict comps!
class BukgetError(Exception): class BukgetError(Exception):
@ -88,7 +90,7 @@ def format_output(data):
current_version = data['versions'][0] current_version = data['versions'][0]
last_update = time.strftime('%d %B %Y %H:%M', last_update = time.strftime('%d %B %Y %H:%M',
time.gmtime(current_version['date'])) time.gmtime(current_version['date']))
version_number = data['versions'][0]['version'] version_number = data['versions'][0]['version']
bukkit_versions = ", ".join(current_version['game_versions']) bukkit_versions = ", ".join(current_version['game_versions'])
@ -99,7 +101,8 @@ def format_output(data):
else: else:
line_a = u"\x02{}\x02, by \x02{}\x02 ({}) \x02{}".format(name, authors, stage, url) line_a = u"\x02{}\x02, by \x02{}\x02 ({}) \x02{}".format(name, authors, stage, url)
line_b = u"Last release: \x02v{}\x02 for \x02{}\x02 at {} \x02{}\x02".format(version_number, bukkit_versions, last_update, link) line_b = u"Last release: \x02v{}\x02 for \x02{}\x02 at {} \x02{}\x02".format(version_number, bukkit_versions,
last_update, link)
return line_a, line_b return line_a, line_b

View file

@ -1,9 +1,11 @@
""" plugin by _303 (?) """ plugin by _303 (?)
""" """
from util import hook
import re import re
from util import hook
pattern = re.compile(r'^(?P<count>\d+)x (?P<name>.+?): (?P<ingredients>.*)$') pattern = re.compile(r'^(?P<count>\d+)x (?P<name>.+?): (?P<ingredients>.*)$')
recipelist = [] recipelist = []
@ -42,29 +44,29 @@ with open("./data/itemids.txt") as f:
if line.startswith("//"): if line.startswith("//"):
continue continue
parts = line.strip().split() parts = line.strip().split()
id = parts[0] itemid = parts[0]
name = " ".join(parts[1:]) name = " ".join(parts[1:])
ids.append((id, name)) ids.append((itemid, name))
@hook.command("mcid") @hook.command("mcid")
@hook.command @hook.command
def mcitem(input, reply=None): def mcitem(inp, reply=None):
"""mcitem <item/id> -- gets the id from an item or vice versa""" """mcitem <item/id> -- gets the id from an item or vice versa"""
input = input.lower().strip() inp = inp.lower().strip()
if input == "": if inp == "":
reply("error: no input.") reply("error: no input.")
return return
results = [] results = []
for id, name in ids: for item_id, item_name in ids:
if input == id: if inp == item_id:
results = ["\x02[{}]\x02 {}".format(id, name)] results = ["\x02[{}]\x02 {}".format(item_id, item_name)]
break break
elif input in name.lower(): elif inp in item_name.lower():
results.append("\x02[{}]\x02 {}".format(id, name)) results.append("\x02[{}]\x02 {}".format(item_id, item_name))
if not results: if not results:
return "No matches found." return "No matches found."
@ -80,12 +82,12 @@ def mcitem(input, reply=None):
@hook.command("mccraft") @hook.command("mccraft")
@hook.command @hook.command
def mcrecipe(input, reply=None): def mcrecipe(inp, reply=None):
"""mcrecipe <item> -- gets the crafting recipe for an item""" """mcrecipe <item> -- gets the crafting recipe for an item"""
input = input.lower().strip() inp = inp.lower().strip()
results = [recipe.line for recipe in recipelist results = [recipe.line for recipe in recipelist
if input in recipe.output] if inp in recipe.output]
if not results: if not results:
return "No matches found." return "No matches found."

View file

@ -1,8 +1,11 @@
from util import hook # TODO: Rewrite this whole mess
import socket import socket
import struct import struct
import json import json
from util import hook
try: try:
import DNS import DNS
# Please remember to install the dependancy 'pydns' # Please remember to install the dependancy 'pydns'
@ -106,7 +109,7 @@ def mcping_legacy(host, port):
def get_srv_data(domain): def get_srv_data(domain):
""" takes a domain and finds minecraft SRV records """ """ takes a domain and finds minecraft SRV records """
DNS.ParseResolvConf() DNS.DiscoverNameServers()
srv_req = DNS.Request(qtype='srv') srv_req = DNS.Request(qtype='srv')
srv_result = srv_req.req('_minecraft._tcp.{}'.format(domain)) srv_result = srv_req.req('_minecraft._tcp.{}'.format(domain))
@ -137,10 +140,10 @@ def parse_input(inp):
@hook.command("mcp6") @hook.command("mcp6")
def mcping6(inp): def mcping6(inp):
"""mcping6 <server>[:port] - Ping a Minecraft server version 1.6 or smaller to check status.""" """mcping6 <server>[:port] - Ping a Minecraft server version 1.6 or smaller to check status."""
try: #try:
host, port = parse_input(inp) host, port = parse_input(inp)
except Exception as ex: #except Exception as ex:
return ex.args[0] # return ex.args[0]
try: try:
return mcping_legacy(host, port) return mcping_legacy(host, port)
except: except:
@ -165,10 +168,12 @@ def mcping7(inp):
@hook.command("mcp") @hook.command("mcp")
def mcping(inp): def mcping(inp):
"""mcping <server>[:port] - Ping a Minecraft server to check status.""" """mcping <server>[:port] - Ping a Minecraft server to check status."""
try: # try:
host, port = parse_input(inp) host, port = parse_input(inp)
except Exception as e: #except Exception as e:
return e.args[0] # return e.args[0]
#
try: try:
return mcping_modern(host, port) return mcping_modern(host, port)

View file

@ -1,6 +1,7 @@
from util import hook, http
import json import json
from util import hook, http
@hook.command(autohelp=False) @hook.command(autohelp=False)
def mcstatus(inp): def mcstatus(inp):

View file

@ -1,6 +1,8 @@
from util import hook, http, text
import re import re
from util import hook, http, text
api_url = "http://minecraft.gamepedia.com/api.php?action=opensearch" api_url = "http://minecraft.gamepedia.com/api.php?action=opensearch"
mc_url = "http://minecraft.gamepedia.com/" mc_url = "http://minecraft.gamepedia.com/"
@ -21,7 +23,7 @@ def mcwiki(inp):
return "No results found." return "No results found."
# we remove items with a '/' in the name, because # we remove items with a '/' in the name, because
# gamepedia uses subpages for different languages # gamepedia uses sub-pages for different languages
# for some stupid reason # for some stupid reason
items = [item for item in j[1] if not "/" in item] items = [item for item in j[1] if not "/" in item]

View file

@ -1,6 +1,7 @@
# Plugin by Infinity - <https://github.com/infinitylabs/UguuBot> # Plugin by Infinity - <https://github.com/infinitylabs/UguuBot>
import random import random
from util import hook, http from util import hook, http
@ -23,11 +24,11 @@ refresh_cache()
@hook.command(autohelp=False) @hook.command(autohelp=False)
def mlia(inp, reply=None): def mlia(inp, reply=None):
"mlia -- Gets a random quote from MyLifeIsAverage.com." """mlia -- Gets a random quote from MyLifeIsAverage.com."""
# grab the last item in the mlia cache and remove it # grab the last item in the mlia cache and remove it
id, text = mlia_cache.pop() mlia_id, text = mlia_cache.pop()
# reply with the mlia we grabbed # reply with the mlia we grabbed
reply('({}) {}'.format(id, text)) reply('({}) {}'.format(mlia_id, text))
# refresh mlia cache if its getting empty # refresh mlia cache if its getting empty
if len(mlia_cache) < 3: if len(mlia_cache) < 3:
refresh_cache() refresh_cache()

View file

@ -1,8 +1,8 @@
# Plugin by Lukeroge
from util import hook, text, textgen
import json import json
import os import os
from util import hook, text, textgen
GEN_DIR = "./plugins/data/name_files/" GEN_DIR = "./plugins/data/name_files/"
@ -10,13 +10,13 @@ GEN_DIR = "./plugins/data/name_files/"
def get_generator(_json): def get_generator(_json):
data = json.loads(_json) data = json.loads(_json)
return textgen.TextGenerator(data["templates"], return textgen.TextGenerator(data["templates"],
data["parts"], default_templates=data["default_templates"]) data["parts"], default_templates=data["default_templates"])
@hook.command(autohelp=False) @hook.command(autohelp=False)
def namegen(inp, notice=None): def namegen(inp, notice=None):
"namegen [generator] -- Generates some names using the chosen generator. " \ """namegen [generator] -- Generates some names using the chosen generator.
"'namegen list' will display a list of all generators." 'namegen list' will display a list of all generators."""
# clean up the input # clean up the input
inp = inp.strip().lower() inp = inp.strip().lower()

View file

@ -1,7 +1,9 @@
from util import hook, http, text, web
import json import json
import re import re
from util import hook, http, text, web
## CONSTANTS ## CONSTANTS
ITEM_URL = "http://www.newegg.com/Product/Product.aspx?Item={}" ITEM_URL = "http://www.newegg.com/Product/Product.aspx?Item={}"
@ -21,11 +23,11 @@ def format_item(item, show_url=True):
# format the rating nicely if it exists # format the rating nicely if it exists
if not item["ReviewSummary"]["TotalReviews"] == "[]": if not item["ReviewSummary"]["TotalReviews"] == "[]":
rating = "Rated {}/5 ({} ratings)".format(item["ReviewSummary"]["Rating"], rating = "Rated {}/5 ({} ratings)".format(item["ReviewSummary"]["Rating"],
item["ReviewSummary"]["TotalReviews"][1:-1]) item["ReviewSummary"]["TotalReviews"][1:-1])
else: else:
rating = "No Ratings" rating = "No Ratings"
if not item["FinalPrice"] == item["OriginalPrice"]: if not item["FinalPrice"] == item["OriginalPrice"]:
price = "{FinalPrice}, was {OriginalPrice}".format(**item) price = "{FinalPrice}, was {OriginalPrice}".format(**item)
else: else:
price = item["FinalPrice"] price = item["FinalPrice"]
@ -44,19 +46,19 @@ def format_item(item, show_url=True):
tags.append("\x02Featured\x02") tags.append("\x02Featured\x02")
if item["IsShellShockerItem"]: if item["IsShellShockerItem"]:
tags.append("\x02SHELL SHOCKER®\x02") tags.append(u"\x02SHELL SHOCKER\u00AE\x02")
# join all the tags together in a comma seperated string ("tag1, tag2, tag3") # join all the tags together in a comma separated string ("tag1, tag2, tag3")
tag_text = u", ".join(tags) tag_text = u", ".join(tags)
if show_url: if show_url:
# create the item URL and shorten it # create the item URL and shorten it
url = web.try_isgd(ITEM_URL.format(item["NeweggItemNumber"])) url = web.try_isgd(ITEM_URL.format(item["NeweggItemNumber"]))
return u"\x02{}\x02 ({}) - {} - {} - {}".format(title, price, rating, return u"\x02{}\x02 ({}) - {} - {} - {}".format(title, price, rating,
tag_text, url) tag_text, url)
else: else:
return u"\x02{}\x02 ({}) - {} - {}".format(title, price, rating, return u"\x02{}\x02 ({}) - {} - {}".format(title, price, rating,
tag_text) tag_text)
## HOOK FUNCTIONS ## HOOK FUNCTIONS
@ -80,8 +82,8 @@ def newegg(inp):
# submit the search request # submit the search request
r = http.get_json( r = http.get_json(
'http://www.ows.newegg.com/Search.egg/Advanced', 'http://www.ows.newegg.com/Search.egg/Advanced',
post_data = json.dumps(request) post_data=json.dumps(request)
) )
# get the first result # get the first result

View file

@ -1,6 +1,8 @@
import re import re
from util import hook, http from util import hook, http
newgrounds_re = (r'(.*:)//(www.newgrounds.com|newgrounds.com)(:[0-9]+)?(.*)', re.I) newgrounds_re = (r'(.*:)//(www.newgrounds.com|newgrounds.com)(:[0-9]+)?(.*)', re.I)
valid = set('0123456789') valid = set('0123456789')

View file

@ -1,10 +1,12 @@
from util import hook
import re import re
from util import hook
db_inited = False db_inited = False
def cleanSQL(sql): def clean_sql(sql):
return re.sub(r'\s+', " ", sql).strip() return re.sub(r'\s+', " ", sql).strip()
@ -20,7 +22,7 @@ def db_init(db):
""").fetchone()[0] == 1 """).fetchone()[0] == 1
if not exists: if not exists:
db.execute(cleanSQL(""" db.execute(clean_sql("""
create virtual table todos using fts4( create virtual table todos using fts4(
user, user,
text, text,
@ -91,7 +93,7 @@ def db_search(db, nick, query):
@hook.command("notes") @hook.command("notes")
@hook.command @hook.command
def note(inp, nick='', chan='', db=None, notice=None, bot=None): def note(inp, nick='', chan='', db=None, notice=None, bot=None):
"note(s) <add|del|list|search> args -- Manipulates your list of notes." """note(s) <add|del|list|search> args -- Manipulates your list of notes."""
db_init(db) db_init(db)

View file

@ -1,6 +1,8 @@
from util import hook, http, web
from bs4 import BeautifulSoup from bs4 import BeautifulSoup
from util import hook, http, web
api_url = "http://osrc.dfm.io/{}/stats" api_url = "http://osrc.dfm.io/{}/stats"
user_url = "http://osrc.dfm.io/{}" user_url = "http://osrc.dfm.io/{}"

View file

@ -1,8 +1,10 @@
# based on password generation code by TheNoodle # TODO: Add some kind of pronounceable password generation
from util import hook # TODO: Improve randomness
import string import string
import random import random
from util import hook
@hook.command @hook.command
def password(inp, notice=None): def password(inp, notice=None):
@ -39,10 +41,10 @@ def password(inp, notice=None):
if not okay: if not okay:
okay = okay + list(string.ascii_lowercase) okay = okay + list(string.ascii_lowercase)
password = "" pw = ""
# generates password # generates password
for x in range(length): for x in range(length):
password = password + random.choice(okay) pw = pw + random.choice(okay)
notice(password) notice(pw)

View file

@ -1,9 +1,11 @@
# ping plugin by neersighted # ping plugin by neersighted
from util import hook
import subprocess import subprocess
import re import re
import os import os
from util import hook
ping_regex = re.compile(r"(\d+.\d+)/(\d+.\d+)/(\d+.\d+)/(\d+.\d+)") ping_regex = re.compile(r"(\d+.\d+)/(\d+.\d+)/(\d+.\d+)/(\d+.\d+)")
@ -13,6 +15,7 @@ def ping(inp, reply=None):
if os.name == "nt": if os.name == "nt":
return "Sorry, this command is not supported on Windows systems." return "Sorry, this command is not supported on Windows systems."
# TODO: Rewrite this entire command to work on Windows, somehow
args = inp.split(' ') args = inp.split(' ')
host = args[0] host = args[0]

View file

@ -1,8 +1,10 @@
# coding=utf-8 # coding=utf-8
from util import hook
import re import re
import random import random
from util import hook
potatoes = ['AC Belmont', 'AC Blue Pride', 'AC Brador', 'AC Chaleur', 'AC Domino', 'AC Dubuc', 'AC Glacier Chip', potatoes = ['AC Belmont', 'AC Blue Pride', 'AC Brador', 'AC Chaleur', 'AC Domino', 'AC Dubuc', 'AC Glacier Chip',
'AC Maple Gold', 'AC Novachip', 'AC Peregrine Red', 'AC Ptarmigan', 'AC Red Island', 'AC Saguenor', 'AC Maple Gold', 'AC Novachip', 'AC Peregrine Red', 'AC Ptarmigan', 'AC Red Island', 'AC Saguenor',
'AC Stampede Russet', 'AC Sunbury', 'Abeille', 'Abnaki', 'Acadia', 'Acadia Russet', 'Accent', 'AC Stampede Russet', 'AC Sunbury', 'Abeille', 'Abnaki', 'Acadia', 'Acadia Russet', 'Accent',
@ -51,4 +53,4 @@ def potato(inp, action=None, input=None):
side_dish = random.choice(['side salad', 'dollop of sour cream', 'piece of chicken', 'bowl of shredded bacon']) side_dish = random.choice(['side salad', 'dollop of sour cream', 'piece of chicken', 'bowl of shredded bacon'])
action("{} a {} {} {} potato for {} and serves it with a small {}!".format(method, flavor, size, potato_type, inp, action("{} a {} {} {} potato for {} and serves it with a small {}!".format(method, flavor, size, potato_type, inp,
side_dish)) side_dish))

View file

@ -1,11 +1,12 @@
from util import hook, http, timesince
import datetime import datetime
from util import hook, http, timesince
@hook.command("scene") @hook.command("scene")
@hook.command @hook.command
def pre(inp): def pre(inp):
'pre <query> -- searches scene releases using orlydb.com' """pre <query> -- searches scene releases using orlydb.com"""
try: try:
h = http.get_html("http://orlydb.com/", q=inp) h = http.get_html("http://orlydb.com/", q=inp)

View file

@ -1,49 +0,0 @@
import urllib2
import random
from util import hook
def make_string():
stuff = "abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ1234567890"
string = random.choice("123")
for x in range(4):
string += random.choice(stuff)
return string
def check_url(code):
try:
urllib2.urlopen(make_url(code))
return True
except:
return False # sorry <3
def make_url(code):
return "http://puu.sh/{}".format(code)
@hook.command(autohelp=False)
def puush(inp):
"""puush [1-5] -- Returns a number of random puu.sh entries."""
out = ""
num = 0
if not inp:
inp = "1"
if not inp.isdigit():
out += "Defaulting to one: "
num = 1
elif int(inp[0]) > 5:
out += "Five images max: "
num = 5
else:
num = int(inp[0])
images = []
for x in xrange(num):
ran = make_string()
while not check_url(ran):
ran = make_string()
images.append(make_url(ran))
return out + " ".join(images)

View file

@ -8,9 +8,9 @@ def qrcode(inp):
"""qrcode [link] returns a link for a QR code.""" """qrcode [link] returns a link for a QR code."""
args = { args = {
"cht": "qr", # chart type "cht": "qr", # chart type (QR)
"chs": "200x200", # dimensions "chs": "200x200", # dimensions
"chl": inp "chl": inp # data
} }
link = http.prepare_url("http://chart.googleapis.com/chart", args) link = http.prepare_url("http://chart.googleapis.com/chart", args)

View file

@ -1,9 +1,11 @@
import urllib import urllib
import json import json
import re import re
from util import hook
import oauth2 as oauth import oauth2 as oauth
from util import hook
def getdata(inp, types, api_key, api_secret): def getdata(inp, types, api_key, api_secret):
consumer = oauth.Consumer(api_key, api_secret) consumer = oauth.Consumer(api_key, api_secret)

View file

@ -1,9 +1,11 @@
from util import hook, http, text, timesince
from datetime import datetime from datetime import datetime
import re import re
import random import random
reddit_re = (r'.*((www\.)?reddit\.com/r[^ ]+)', re.I) from util import hook, http, text, timesince
reddit_re = (r'.*(((www\.)?reddit\.com/r|redd\.it)[^ ]+)', re.I)
base_url = "http://reddit.com/r/{}/.json" base_url = "http://reddit.com/r/{}/.json"
short_url = "http://redd.it/{}" short_url = "http://redd.it/{}"
@ -52,7 +54,7 @@ def reddit(inp):
data = data["data"]["children"] data = data["data"]["children"]
# get the requested/random post # get the requested/random post
if id_num != None: if id_num is not None:
try: try:
item = data[id_num]["data"] item = data[id_num]["data"]
except IndexError: except IndexError:
@ -64,14 +66,14 @@ def reddit(inp):
item["title"] = text.truncate_str(item["title"], 50) item["title"] = text.truncate_str(item["title"], 50)
item["link"] = short_url.format(item["id"]) item["link"] = short_url.format(item["id"])
rawtime = datetime.fromtimestamp(int(item["created_utc"])) raw_time = datetime.fromtimestamp(int(item["created_utc"]))
item["timesince"] = timesince.timesince(rawtime) item["timesince"] = timesince.timesince(raw_time)
if item["over_18"]: if item["over_18"]:
item["warning"] = " \x02NSFW\x02" item["warning"] = " \x02NSFW\x02"
else: else:
item["warning"] = "" item["warning"] = ""
return u'\x02{title} : {subreddit}\x02 - posted by \x02{author}\x02' \ return u"\x02{title} : {subreddit}\x02 - posted by \x02{author}\x02" \
' {timesince} ago - {ups} upvotes, {downs} downvotes -' \ " {timesince} ago - {ups} upvotes, {downs} downvotes -" \
' {link}{warning}'.format(**item) " {link}{warning}".format(**item)

128
plugins/regex_chans.py Normal file
View file

@ -0,0 +1,128 @@
from util import hook
# Default value.
# If True, all channels without a setting will have regex enabled
# If False, all channels without a setting will have regex disabled
default_enabled = True
db_already_initiated = False
def db_init(db):
global db_already_initiated
if not db_already_initiated:
db_already_initiated = True
db.execute("CREATE TABLE IF NOT EXISTS regexchans(channel PRIMARY KEY, status)")
db.commit()
def get_status(db, channel):
row = db.execute("SELECT status FROM regexchans WHERE channel = ?", [channel]).fetchone()
if row:
return row[0]
else:
return None
def set_status(db, channel, status):
row = db.execute("REPLACE INTO regexchans (channel, status) VALUES(?, ?)", [channel, status])
db.commit()
def delete_status(db, channel):
row = db.execute("DELETE FROM regexchans WHERE channel = ?", [channel])
db.commit()
def list_status(db):
row = db.execute("SELECT * FROM regexchans").fetchall()
result = None
for values in row:
if result:
result += u", {}: {}".format(values[0], values[1])
else:
result = u"{}: {}".format(values[0], values[1])
return result
@hook.sieve
def sieve_regex(bot, inp, func, kind, args):
db = bot.get_db_connection(inp.conn)
db_init(db)
if kind == 'regex' and inp.chan.startswith("#") and func.__name__ != 'factoid':
chanstatus = get_status(db, inp.chan)
if chanstatus != "ENABLED" and (chanstatus == "DISABLED" or not default_enabled):
print u"Denying input.raw={}, kind={}, args={} from {}".format(inp.raw, kind, args, inp.chan)
return None
print u"Allowing input.raw={}, kind={}, args={} from {}".format(inp.raw, kind, args, inp.chan)
return inp
@hook.command(permissions=["botcontrol"])
def enableregex(inp, db=None, message=None, notice=None, chan=None, nick=None):
db_init(db)
inp = inp.strip().lower()
if not inp:
channel = chan
elif inp.startswith("#"):
channel = inp
else:
channel = u"#{}".format(inp)
message(u"Enabling regex matching (youtube, etc) (issued by {})".format(nick), target=channel)
notice(u"Enabling regex matching (youtube, etc) in channel {}".format(channel))
set_status(db, channel, "ENABLED")
@hook.command(permissions=["botcontrol"])
def disableregex(inp, db=None, message=None, notice=None, chan=None, nick=None):
db_init(db)
inp = inp.strip().lower()
if not inp:
channel = chan
elif inp.startswith("#"):
channel = inp
else:
channel = u"#{}".format(inp)
message(u"Disabling regex matching (youtube, etc) (issued by {})".format(nick), target=channel)
notice(u"Disabling regex matching (youtube, etc) in channel {}".format(channel))
set_status(db, channel, "DISABLED")
@hook.command(permissions=["botcontrol"])
def resetregex(inp, db=None, message=None, notice=None, chan=None, nick=None):
db_init(db)
inp = inp.strip().lower()
if not inp:
channel = chan
elif inp.startswith("#"):
channel = inp
else:
channel = u"#{}".format(inp)
message(u"Resetting regex matching setting (youtube, etc) (issued by {})".format(nick), target=channel)
notice(u"Resetting regex matching setting (youtube, etc) in channel {}".format(channel))
delete_status(db, channel)
@hook.command(permissions=["botcontrol"])
def regexstatus(inp, db=None, chan=None):
db_init(db)
inp = inp.strip().lower()
if not inp:
channel = chan
elif inp.startswith("#"):
channel = inp
else:
channel = u"#{}".format(inp)
return u"Regex status for {}: {}".format(channel, get_status(db, channel))
@hook.command(permissions=["botcontrol"])
def listregex(inp, db=None):
db_init(db)
return list_status(db)

View file

@ -36,4 +36,4 @@ def rottentomatoes(inp, bot=None):
rotten = review_count - fresh rotten = review_count - fresh
return u"{} - Critics Rating: \x02{}%\x02 ({} liked, {} disliked) " \ return u"{} - Critics Rating: \x02{}%\x02 ({} liked, {} disliked) " \
"Audience Rating: \x02{}%\x02 - {}".format(title, critics_score, fresh, rotten, audience_score, url) "Audience Rating: \x02{}%\x02 - {}".format(title, critics_score, fresh, rotten, audience_score, url)

View file

@ -5,6 +5,7 @@ import re
from util import hook, timesince from util import hook, timesince
db_ready = False db_ready = False

View file

@ -1,6 +1,7 @@
from util import hook, textgen
import json import json
from util import hook, textgen
def get_generator(_json, variables): def get_generator(_json, variables):
data = json.loads(_json) data = json.loads(_json)

View file

@ -1,6 +1,7 @@
from util import hook, text
import random import random
from util import hook, text
with open("./data/slogans.txt") as f: with open("./data/slogans.txt") as f:
slogans = [line.strip() for line in f.readlines() slogans = [line.strip() for line in f.readlines()

View file

@ -1,7 +1,9 @@
from util import hook, http, web, text
from urllib import urlencode from urllib import urlencode
import re import re
from util import hook, http, web, text
sc_re = (r'(.*:)//(www.)?(soundcloud.com)(.*)', re.I) sc_re = (r'(.*:)//(www.)?(soundcloud.com)(.*)', re.I)
api_url = "http://api.soundcloud.com" api_url = "http://api.soundcloud.com"
sndsc_re = (r'(.*:)//(www.)?(snd.sc)(.*)', re.I) sndsc_re = (r'(.*:)//(www.)?(snd.sc)(.*)', re.I)

View file

@ -1,8 +1,9 @@
from util import hook
from enchant.checker import SpellChecker from enchant.checker import SpellChecker
import enchant import enchant
from util import hook
locale = "en_US" locale = "en_US"
@ -15,11 +16,11 @@ def spell(inp):
if len(inp.split(" ")) > 1: if len(inp.split(" ")) > 1:
# input is a sentence # input is a sentence
chkr = SpellChecker(locale) checker = SpellChecker(locale)
chkr.set_text(inp) checker.set_text(inp)
offset = 0 offset = 0
for err in chkr: for err in checker:
# find the location of the incorrect word # find the location of the incorrect word
start = err.wordpos + offset start = err.wordpos + offset
finish = start + len(err.word) finish = start + len(err.word)

View file

@ -1,7 +1,7 @@
import re import re
from urllib import urlencode
from util import hook, http, web from util import hook, http, web
from urllib import urlencode
gateway = 'http://open.spotify.com/{}/{}' # http spotify gw address gateway = 'http://open.spotify.com/{}/{}' # http spotify gw address
spuri = 'spotify:{}:{}' spuri = 'spotify:{}:{}'

View file

@ -1,7 +1,9 @@
import re import re
from bs4 import BeautifulSoup, NavigableString, Tag
from util import hook, http, web from util import hook, http, web
from util.text import truncate_str from util.text import truncate_str
from bs4 import BeautifulSoup, NavigableString, Tag
steam_re = (r'(.*:)//(store.steampowered.com)(:[0-9]+)?(.*)', re.I) steam_re = (r'(.*:)//(store.steampowered.com)(:[0-9]+)?(.*)', re.I)
@ -53,10 +55,10 @@ def get_steam_info(url):
data[title] = text data[title] = text
continue continue
data["price"] = soup.find('div', {'class': 'game_purchase_price price'}).text.strip() data["price"] = soup.find('div', {'class': 'game_purchase_price price'}).text.strip()
return u"\x02{name}\x02: {desc}, \x02Genre\x02: {genre}, \x02Release Date\x02: {release date}, \x02Price\x02: {price}".format(**data) return u"\x02{name}\x02: {desc}, \x02Genre\x02: {genre}, \x02Release Date\x02: {release date}," \
u" \x02Price\x02: {price}".format(**data)
@hook.regex(*steam_re) @hook.regex(*steam_re)

View file

@ -1,7 +1,9 @@
from util import hook, http, text
import csv import csv
import StringIO import StringIO
from util import hook, http, text
gauge_url = "http://www.mysteamgauge.com/search?username={}" gauge_url = "http://www.mysteamgauge.com/search?username={}"
api_url = "http://mysteamgauge.com/user/{}.csv" api_url = "http://mysteamgauge.com/user/{}.csv"

View file

@ -9,7 +9,7 @@ def stock(inp):
query = "SELECT * FROM yahoo.finance.quote WHERE symbol=@symbol LIMIT 1" query = "SELECT * FROM yahoo.finance.quote WHERE symbol=@symbol LIMIT 1"
quote = web.query(query, {"symbol": sym}).one() quote = web.query(query, {"symbol": sym}).one()
# if we dont get a company name back, the symbol doesn't match a company # if we don't get a company name back, the symbol doesn't match a company
if quote['Change'] is None: if quote['Change'] is None:
return "Unknown ticker symbol: {}".format(sym) return "Unknown ticker symbol: {}".format(sym)

View file

@ -2,9 +2,10 @@ import os
import re import re
import time import time
import platform import platform
from util import hook
from datetime import timedelta from datetime import timedelta
from util import hook
def convert_kilobytes(kilobytes): def convert_kilobytes(kilobytes):
if kilobytes >= 1024: if kilobytes >= 1024:

View file

@ -50,7 +50,7 @@ def tellinput(paraml, input=None, notice=None, db=None, bot=None, nick=None, con
@hook.command(autohelp=False) @hook.command(autohelp=False)
def showtells(inp, nick='', chan='', notice=None, db=None): def showtells(inp, nick='', chan='', notice=None, db=None):
"showtells -- View all pending tell messages (sent in a notice)." """showtells -- View all pending tell messages (sent in a notice)."""
db_init(db) db_init(db)

View file

@ -1,7 +1,9 @@
from util import hook, http
import time import time
from util import hook, http
from util.text import capitalize_first from util.text import capitalize_first
api_url = 'http://api.wolframalpha.com/v2/query?format=plaintext' api_url = 'http://api.wolframalpha.com/v2/query?format=plaintext'
@ -16,10 +18,10 @@ def time_command(inp, bot=None):
return "error: no wolfram alpha api key set" return "error: no wolfram alpha api key set"
request = http.get_xml(api_url, input=query, appid=api_key) request = http.get_xml(api_url, input=query, appid=api_key)
time = " ".join(request.xpath("//pod[@title='Result']/subpod/plaintext/text()")) current_time = " ".join(request.xpath("//pod[@title='Result']/subpod/plaintext/text()"))
time = time.replace(" | ", ", ") current_time = current_time.replace(" | ", ", ")
if time: if current_time:
# nice place name for UNIX time # nice place name for UNIX time
if inp.lower() == "unix": if inp.lower() == "unix":
place = "Unix Epoch" place = "Unix Epoch"
@ -27,7 +29,7 @@ def time_command(inp, bot=None):
place = capitalize_first(" ".join(request.xpath("//pod[@" place = capitalize_first(" ".join(request.xpath("//pod[@"
"title='Input interpretation']/subpod/plaintext/text()"))[ "title='Input interpretation']/subpod/plaintext/text()"))[
16:]) 16:])
return "{} - \x02{}\x02".format(time, place) return "{} - \x02{}\x02".format(current_time, place)
else: else:
return "Could not get the time for '{}'.".format(inp) return "Could not get the time for '{}'.".format(inp)

View file

@ -1,6 +1,7 @@
from util import hook, http, urlnorm
from bs4 import BeautifulSoup from bs4 import BeautifulSoup
from util import hook, http, urlnorm
@hook.command @hook.command
def title(inp): def title(inp):
@ -14,9 +15,9 @@ def title(inp):
except (http.HTTPError, http.URLError): except (http.HTTPError, http.URLError):
return "Could not fetch page." return "Could not fetch page."
title = soup.find('title').contents[0] page_title = soup.find('title').contents[0]
if not title: if not page_title:
return "Could not find title." return "Could not find title."
return u"{} [{}]".format(title, real_url) return u"{} [{}]".format(page_title, real_url)

View file

@ -9,11 +9,11 @@ from zipfile import ZipFile
from cStringIO import StringIO from cStringIO import StringIO
from lxml import etree from lxml import etree
from util import hook, http from util import hook, http
base_url = "http://thetvdb.com/api/" base_url = "http://thetvdb.com/api/"
api_key = "469B73127CA0C411"
def get_zipped_xml(*args, **kwargs): def get_zipped_xml(*args, **kwargs):
@ -25,11 +25,11 @@ def get_zipped_xml(*args, **kwargs):
return etree.parse(ZipFile(zip_buffer, "r").open(path)) return etree.parse(ZipFile(zip_buffer, "r").open(path))
def get_episodes_for_series(seriesname, api_key): def get_episodes_for_series(series_name, api_key):
res = {"error": None, "ended": False, "episodes": None, "name": None} res = {"error": None, "ended": False, "episodes": None, "name": None}
# http://thetvdb.com/wiki/index.php/API:GetSeries # http://thetvdb.com/wiki/index.php/API:GetSeries
try: try:
query = http.get_xml(base_url + 'GetSeries.php', seriesname=seriesname) query = http.get_xml(base_url + 'GetSeries.php', seriesname=series_name)
except URLError: except URLError:
res["error"] = "error contacting thetvdb.com" res["error"] = "error contacting thetvdb.com"
return res return res
@ -63,7 +63,7 @@ def get_episode_info(episode, api_key):
first_aired = episode.findtext("FirstAired") first_aired = episode.findtext("FirstAired")
try: try:
airdate = datetime.date(*map(int, first_aired.split('-'))) air_date = datetime.date(*map(int, first_aired.split('-')))
except (ValueError, TypeError): except (ValueError, TypeError):
return None return None
@ -79,7 +79,7 @@ def get_episode_info(episode, api_key):
episode_desc = '{}'.format(episode_num) episode_desc = '{}'.format(episode_num)
if episode_name: if episode_name:
episode_desc += ' - {}'.format(episode_name) episode_desc += ' - {}'.format(episode_name)
return first_aired, airdate, episode_desc return first_aired, air_date, episode_desc
@hook.command @hook.command
@ -111,15 +111,15 @@ def tv_next(inp, bot=None):
if ep_info is None: if ep_info is None:
continue continue
(first_aired, airdate, episode_desc) = ep_info (first_aired, air_date, episode_desc) = ep_info
if airdate > today: if air_date > today:
next_eps = ['{} ({})'.format(first_aired, episode_desc)] next_eps = ['{} ({})'.format(first_aired, episode_desc)]
elif airdate == today: elif air_date == today:
next_eps = ['Today ({})'.format(episode_desc)] + next_eps next_eps = ['Today ({})'.format(episode_desc)] + next_eps
else: else:
#we're iterating in reverse order with newest episodes last # we're iterating in reverse order with newest episodes last
#so, as soon as we're past today, break out of loop # so, as soon as we're past today, break out of loop
break break
if not next_eps: if not next_eps:
@ -158,9 +158,9 @@ def tv_last(inp, bot=None):
if ep_info is None: if ep_info is None:
continue continue
(first_aired, airdate, episode_desc) = ep_info (first_aired, air_date, episode_desc) = ep_info
if airdate < today: if air_date < today:
#iterating in reverse order, so the first episode encountered #iterating in reverse order, so the first episode encountered
#before today was the most recently aired #before today was the most recently aired
prev_ep = '{} ({})'.format(first_aired, episode_desc) prev_ep = '{} ({})'.format(first_aired, episode_desc)

View file

@ -1,7 +1,9 @@
import re import re
from util import hook, http
from HTMLParser import HTMLParser from HTMLParser import HTMLParser
from util import hook, http
twitch_re = (r'(.*:)//(twitch.tv|www.twitch.tv)(:[0-9]+)?(.*)', re.I) twitch_re = (r'(.*:)//(twitch.tv|www.twitch.tv)(:[0-9]+)?(.*)', re.I)
multitwitch_re = (r'(.*:)//(www.multitwitch.tv|multitwitch.tv)/(.*)', re.I) multitwitch_re = (r'(.*:)//(www.multitwitch.tv|multitwitch.tv)/(.*)', re.I)

View file

@ -1,9 +1,12 @@
from util import hook, timesince
import tweepy
import re import re
import random import random
from datetime import datetime from datetime import datetime
import tweepy
from util import hook, timesince
TWITTER_RE = (r"(?:(?:www.twitter.com|twitter.com)/(?:[-_a-zA-Z0-9]+)/status/)([0-9]+)", re.I) TWITTER_RE = (r"(?:(?:www.twitter.com|twitter.com)/(?:[-_a-zA-Z0-9]+)/status/)([0-9]+)", re.I)
@ -26,7 +29,7 @@ def get_api(bot):
@hook.regex(*TWITTER_RE) @hook.regex(*TWITTER_RE)
def twitter_url(match, bot=None): def twitter_url(match, bot=None):
tweet_id = match.group(1) tweet_id = match.group(1)
api = get_api(bot) api = get_api(bot)
if not api: if not api:
return return
@ -46,13 +49,13 @@ def twitter_url(match, bot=None):
time = timesince.timesince(tweet.created_at, datetime.utcnow()) time = timesince.timesince(tweet.created_at, datetime.utcnow())
return u"{}@\x02{}\x02 ({}): {} ({} ago)".format(prefix, user.screen_name, user.name, text, time) return u"{}@\x02{}\x02 ({}): {} ({} ago)".format(prefix, user.screen_name, user.name, text, time)
@hook.command("tw") @hook.command("tw")
@hook.command("twatter") @hook.command("twatter")
@hook.command @hook.command
def twitter(inp, bot=None): def twitter(inp, bot=None):
"twitter <user> [n] -- Gets last/[n]th tweet from <user>" """twitter <user> [n] -- Gets last/[n]th tweet from <user>"""
api = get_api(bot) api = get_api(bot)
if not api: if not api:
@ -117,6 +120,9 @@ def twitter(inp, bot=None):
tweet = random.choice(search) tweet = random.choice(search)
user = tweet.user user = tweet.user
else:
# ???
return "Unknown Error"
text = " ".join(tweet.text.split()) text = " ".join(tweet.text.split())

View file

@ -1,6 +1,8 @@
from util import hook, http, text
import re import re
from util import hook, http, text
base_url = 'http://www.urbandictionary.com/iphone/search/define' base_url = 'http://www.urbandictionary.com/iphone/search/define'
@ -10,37 +12,36 @@ def urban(inp):
"""urban <phrase> [id] -- Looks up <phrase> on urbandictionary.com.""" """urban <phrase> [id] -- Looks up <phrase> on urbandictionary.com."""
# clean and split the input # clean and split the input
input = inp.lower().strip() inp = inp.lower().strip()
parts = input.split() parts = inp.split()
# if the last word is a number, set the ID to that number # if the last word is a number, set the ID to that number
if parts[-1].isdigit(): if parts[-1].isdigit():
id = int(parts[-1]) id_num = int(parts[-1])
# remove the ID from the input string # remove the ID from the input string
del parts[-1] del parts[-1]
input = " ".join(parts) inp = " ".join(parts)
else: else:
id = 1 id_num = 1
# fetch the definitions # fetch the definitions
page = http.get_json(base_url, term=input, referer="http://m.urbandictionary.com") page = http.get_json(base_url, term=inp, referer="http://m.urbandictionary.com")
defs = page['list'] definitions = page['list']
print page
if page['result_type'] == 'no_results': if page['result_type'] == 'no_results':
return 'Not found.' return 'Not found.'
# try getting the requested definition # try getting the requested definition
try: try:
definition = defs[id - 1]['definition'].replace('\r\n', ' ') definition = definitions[id_num - 1]['definition'].replace('\r\n', ' ')
definition = re.sub('\s+', ' ', definition).strip() # remove excess spaces definition = re.sub('\s+', ' ', definition).strip() # remove excess spaces
definition = text.truncate_str(definition, 200) definition = text.truncate_str(definition, 200)
except IndexError: except IndexError:
return 'Not found.' return 'Not found.'
url = defs[id - 1]['permalink'] url = definitions[id_num - 1]['permalink']
output = u"[%i/%i] %s :: %s" % \ output = u"[%i/%i] %s :: %s" % \
(id, len(defs), definition, url) (id_num, len(definitions), definition, url)
return output return output

View file

@ -1,13 +1,15 @@
from util import hook, text
import hashlib import hashlib
import collections import collections
import re import re
from util import hook, text
# variables # variables
colors = collections.OrderedDict([ colors = collections.OrderedDict([
('red', '\x0304'), ('red', '\x0304'),
('ornage', '\x0307'), ('orange', '\x0307'),
('yellow', '\x0308'), ('yellow', '\x0308'),
('green', '\x0309'), ('green', '\x0309'),
('cyan', '\x0303'), ('cyan', '\x0303'),
@ -24,8 +26,8 @@ colors = collections.OrderedDict([
strip_re = re.compile("(\x03|\x02|\x1f)(?:,?\d{1,2}(?:,\d{1,2})?)?", re.UNICODE) strip_re = re.compile("(\x03|\x02|\x1f)(?:,?\d{1,2}(?:,\d{1,2})?)?", re.UNICODE)
def strip(text): def strip(string):
return strip_re.sub('', text) return strip_re.sub('', string)
# basic text tools # basic text tools
@ -89,7 +91,7 @@ def checkbase64(inp):
recoded = decoded.encode('base64').strip() recoded = decoded.encode('base64').strip()
is_base64 = recoded == inp is_base64 = recoded == inp
except: except:
is_base64 = False return '"{}" is not base64 encoded'.format(inp)
if is_base64: if is_base64:
return '"{}" is base64 encoded'.format(recoded) return '"{}" is base64 encoded'.format(recoded)

View file

@ -20,7 +20,7 @@ def validate(inp):
status = info['x-w3c-validator-status'].lower() status = info['x-w3c-validator-status'].lower()
if status in ("valid", "invalid"): if status in ("valid", "invalid"):
errorcount = info['x-w3c-validator-errors'] error_count = info['x-w3c-validator-errors']
warningcount = info['x-w3c-validator-warnings'] warning_count = info['x-w3c-validator-warnings']
return "{} was found to be {} with {} errors and {} warnings." \ return "{} was found to be {} with {} errors and {} warnings." \
" see: {}".format(inp, status, errorcount, warningcount, url) " see: {}".format(inp, status, error_count, warning_count, url)

View file

@ -1,7 +1,8 @@
from util import hook, http, web
import json import json
import urllib2 import urllib2
from util import hook, http, web
def get_sound_info(game, search): def get_sound_info(game, search):
search = search.replace(" ", "+") search = search.replace(" ", "+")

View file

@ -8,7 +8,7 @@ def vimeo_url(match):
% match.group(1)) % match.group(1))
if info: if info:
info[0]["duration"] = timeformat.timeformat(info[0]["duration"]) info[0]["duration"] = timeformat.format_time(info[0]["duration"])
info[0]["stats_number_of_likes"] = format( info[0]["stats_number_of_likes"] = format(
info[0]["stats_number_of_likes"], ",d") info[0]["stats_number_of_likes"], ",d")
info[0]["stats_number_of_plays"] = format( info[0]["stats_number_of_plays"] = format(

View file

@ -13,7 +13,7 @@ def weather(inp, reply=None, db=None, nick=None, bot=None, notice=None):
if not api_key: if not api_key:
return "Error: No wunderground API details." return "Error: No wunderground API details."
# initalise weather DB # initialise weather DB
db.execute("create table if not exists weather(nick primary key, loc)") db.execute("create table if not exists weather(nick primary key, loc)")
# if there is no input, try getting the users last location from the DB # if there is no input, try getting the users last location from the DB

View file

@ -9,7 +9,6 @@ from util import hook, http, text, web
@hook.command @hook.command
def wolframalpha(inp, bot=None): def wolframalpha(inp, bot=None):
"""wa <query> -- Computes <query> using Wolfram Alpha.""" """wa <query> -- Computes <query> using Wolfram Alpha."""
api_key = bot.config.get("api_keys", {}).get("wolframalpha", None) api_key = bot.config.get("api_keys", {}).get("wolframalpha", None)
if not api_key: if not api_key:

View file

@ -11,6 +11,6 @@ def answer(inp):
short_url = web.try_isgd(result["Link"]) short_url = web.try_isgd(result["Link"])
# we split the answer and .join() it to remove newlines/extra spaces # we split the answer and .join() it to remove newlines/extra spaces
answer = text.truncate_str(' '.join(result["ChosenAnswer"].split()), 80) answer_text = text.truncate_str(' '.join(result["ChosenAnswer"].split()), 80)
return u'\x02{}\x02 "{}" - {}'.format(result["Subject"], answer, short_url) return u'\x02{}\x02 "{}" - {}'.format(result["Subject"], answer_text, short_url)

View file

@ -14,10 +14,7 @@ video_url = "http://youtu.be/%s"
def plural(num=0, text=''): def plural(num=0, text=''):
return "{:,} {}{}".format(num, text, "s"[num==1:]) return "{:,} {}{}".format(num, text, "s"[num == 1:])
def get_video_description(video_id): def get_video_description(video_id):
@ -41,22 +38,23 @@ def get_video_description(video_id):
likes = plural(int(data['likeCount']), "like") likes = plural(int(data['likeCount']), "like")
dislikes = plural(data['ratingCount'] - int(data['likeCount']), "dislike") dislikes = plural(data['ratingCount'] - int(data['likeCount']), "dislike")
percent = 100 * float(data['likeCount'])/float(data['ratingCount']) percent = 100 * float(data['likeCount']) / float(data['ratingCount'])
out += u' - {}, {} (\x02{:.1f}\x02%)'.format(likes, out += u' - {}, {} (\x02{:.1f}\x02%)'.format(likes,
dislikes, percent) dislikes, percent)
if 'viewCount' in data: if 'viewCount' in data:
views = data['viewCount'] views = data['viewCount']
out += u' - \x02{:,}\x02 view{}'.format(views, "s"[views==1:]) out += u' - \x02{:,}\x02 view{}'.format(views, "s"[views == 1:])
try: try:
uploader = http.get_json(base_url + "users/{}?alt=json".format(data["uploader"]))["entry"]["author"][0]["name"]["$t"] uploader = http.get_json(base_url + "users/{}?alt=json".format(data["uploader"]))["entry"]["author"][0]["name"][
"$t"]
except: except:
uploader = data["uploader"] uploader = data["uploader"]
upload_time = time.strptime(data['uploaded'], "%Y-%m-%dT%H:%M:%S.000Z") upload_time = time.strptime(data['uploaded'], "%Y-%m-%dT%H:%M:%S.000Z")
out += u' - \x02{}\x02 on \x02{}\x02'.format(uploader, out += u' - \x02{}\x02 on \x02{}\x02'.format(uploader,
time.strftime("%Y.%m.%d", upload_time)) time.strftime("%Y.%m.%d", upload_time))
if 'contentRating' in data: if 'contentRating' in data:
out += u' - \x034NSFW\x02' out += u' - \x034NSFW\x02'
@ -88,7 +86,6 @@ def youtube(inp):
return get_video_description(video_id) + u" - " + video_url % video_id return get_video_description(video_id) + u" - " + video_url % video_id
@hook.command('ytime') @hook.command('ytime')
@hook.command @hook.command
def youtime(inp): def youtime(inp):
@ -119,8 +116,8 @@ def youtime(inp):
total_text = timeformat.format_time(total, accuracy=8) total_text = timeformat.format_time(total, accuracy=8)
return u'The video \x02{}\x02 has a length of {} and has been viewed {:,} times for ' \ return u'The video \x02{}\x02 has a length of {} and has been viewed {:,} times for ' \
'a total run time of {}!'.format(data['title'], length_text, views, \ u'a total run time of {}!'.format(data['title'], length_text, views,
total_text) total_text)
ytpl_re = (r'(.*:)//(www.youtube.com/playlist|youtube.com/playlist)(:[0-9]+)?(.*)', re.I) ytpl_re = (r'(.*:)//(www.youtube.com/playlist|youtube.com/playlist)(:[0-9]+)?(.*)', re.I)
@ -135,6 +132,6 @@ def ytplaylist_url(match):
return "\x034\x02Invalid response." return "\x034\x02Invalid response."
title = soup.find('title').text.split('-')[0].strip() title = soup.find('title').text.split('-')[0].strip()
author = soup.find('img', {'class': 'channel-header-profile-image'})['title'] author = soup.find('img', {'class': 'channel-header-profile-image'})['title']
numvideos = soup.find('ul', {'class': 'header-stats'}).findAll('li')[0].text.split(' ')[0] num_videos = soup.find('ul', {'class': 'header-stats'}).findAll('li')[0].text.split(' ')[0]
views = soup.find('ul', {'class': 'header-stats'}).findAll('li')[1].text.split(' ')[0] views = soup.find('ul', {'class': 'header-stats'}).findAll('li')[1].text.split(' ')[0]
return u"\x02%s\x02 - \x02%s\x02 views - \x02%s\x02 videos - \x02%s\x02" % (title, views, numvideos, author) return u"\x02%s\x02 - \x02%s\x02 views - \x02%s\x02 videos - \x02%s\x02" % (title, views, num_videos, author)

View file

@ -1,3 +1,4 @@
Crypto
BeautifulSoup==3.2.1 BeautifulSoup==3.2.1
lxml==3.1beta1 lxml==3.1beta1
pyenchant==1.6.5 pyenchant==1.6.5