Moved broken/abandoned plugins take two
This commit is contained in:
parent
37475b364c
commit
bddb75d033
3 changed files with 0 additions and 0 deletions
33
disabled_plugins/suggest.py
Normal file
33
disabled_plugins/suggest.py
Normal file
|
@ -0,0 +1,33 @@
|
|||
import json
|
||||
import random
|
||||
import re
|
||||
|
||||
from util import hook, http
|
||||
|
||||
|
||||
@hook.command
|
||||
def suggest(inp, inp_unstripped=''):
|
||||
".suggest [#n] <phrase> -- gets a random/the nth suggested google search"
|
||||
|
||||
inp = inp_unstripped
|
||||
m = re.match('^#(\d+) (.+)$', inp)
|
||||
if m:
|
||||
num, inp = m.groups()
|
||||
num = int(num)
|
||||
if num > 10:
|
||||
return 'I can only get the first ten suggestions.'
|
||||
else:
|
||||
num = 0
|
||||
|
||||
page = http.get('http://google.com/complete/search', q=inp)
|
||||
page_json = page.split('(', 1)[1][:-1]
|
||||
suggestions = json.loads(page_json)[1]
|
||||
if not suggestions:
|
||||
return 'No suggestions found :('
|
||||
if num:
|
||||
if len(suggestions) + 1 <= num:
|
||||
return 'I only got %d suggestions.' % len(suggestions)
|
||||
out = suggestions[num - 1]
|
||||
else:
|
||||
out = random.choice(suggestions)
|
||||
return '#%d: %s' % (int(out[2][0]) + 1, out[0])
|
192
disabled_plugins/translate.py
Normal file
192
disabled_plugins/translate.py
Normal file
|
@ -0,0 +1,192 @@
|
|||
import htmlentitydefs
|
||||
import re
|
||||
|
||||
from util import hook, http
|
||||
|
||||
########### from http://effbot.org/zone/re-sub.htm#unescape-html #############
|
||||
|
||||
|
||||
def unescape(text):
|
||||
def fixup(m):
|
||||
text = m.group(0)
|
||||
if text[:2] == "&#":
|
||||
# character reference
|
||||
try:
|
||||
if text[:3] == "&#x":
|
||||
return unichr(int(text[3:-1], 16))
|
||||
else:
|
||||
return unichr(int(text[2:-1]))
|
||||
except ValueError:
|
||||
pass
|
||||
else:
|
||||
# named entity
|
||||
try:
|
||||
text = unichr(htmlentitydefs.name2codepoint[text[1:-1]])
|
||||
except KeyError:
|
||||
pass
|
||||
return text # leave as is
|
||||
|
||||
return re.sub("&#?\w+;", fixup, text)
|
||||
|
||||
##############################################################################
|
||||
|
||||
|
||||
def goog_trans(text, slang, tlang):
|
||||
url = 'http://ajax.googleapis.com/ajax/services/language/translate?v=1.0&key=ABQIAAAAGjLiqTxkFw7F24ITXc4bNRS04yDz5pgaUTdxja2Sk3UoWlae7xTXom3fBzER6Upo8jfzcTtvz-8ebQ'
|
||||
parsed = http.get_json(url, q=text, langpair=(slang + '|' + tlang))
|
||||
if not 200 <= parsed['responseStatus'] < 300:
|
||||
raise IOError('error with the translation server: %d: %s' % (
|
||||
parsed['responseStatus'], parsed['responseDetails']))
|
||||
if not slang:
|
||||
return unescape('(%(detectedSourceLanguage)s) %(translatedText)s' %
|
||||
(parsed['responseData']))
|
||||
return unescape(parsed['responseData']['translatedText'])
|
||||
|
||||
|
||||
def match_language(fragment):
|
||||
fragment = fragment.lower()
|
||||
for short, _ in lang_pairs:
|
||||
if fragment in short.lower().split():
|
||||
return short.split()[0]
|
||||
|
||||
for short, full in lang_pairs:
|
||||
if fragment in full.lower():
|
||||
return short.split()[0]
|
||||
|
||||
return None
|
||||
|
||||
|
||||
@hook.command
|
||||
def translate(inp):
|
||||
'.translate [source language [target language]] <sentence> -- translates' \
|
||||
' <sentence> from source language (default autodetect) to target' \
|
||||
' language (default English) using Google Translate'
|
||||
return "Due to Google deprecating the translation API, this command is no longer available :("
|
||||
|
||||
args = inp.split(' ', 2)
|
||||
|
||||
try:
|
||||
if len(args) >= 2:
|
||||
sl = match_language(args[0])
|
||||
if not sl:
|
||||
return goog_trans(inp, '', 'en')
|
||||
if len(args) >= 3:
|
||||
tl = match_language(args[1])
|
||||
if not tl:
|
||||
if sl == 'en':
|
||||
return 'unable to determine desired target language'
|
||||
return goog_trans(args[1] + ' ' + args[2], sl, 'en')
|
||||
return goog_trans(args[2], sl, tl)
|
||||
return goog_trans(inp, '', 'en')
|
||||
except IOError, e:
|
||||
return e
|
||||
|
||||
|
||||
languages = 'ja fr de ko ru zh'.split()
|
||||
language_pairs = zip(languages[:-1], languages[1:])
|
||||
|
||||
|
||||
def babel_gen(inp):
|
||||
for language in languages:
|
||||
inp = inp.encode('utf8')
|
||||
trans = goog_trans(inp, 'en', language).encode('utf8')
|
||||
inp = goog_trans(trans, language, 'en')
|
||||
yield language, trans, inp
|
||||
|
||||
|
||||
@hook.command
|
||||
def babel(inp):
|
||||
".babel <sentence> -- translates <sentence> through multiple languages"
|
||||
return "Due to Google deprecating the translation API, this command is no longer available :("
|
||||
|
||||
try:
|
||||
return list(babel_gen(inp))[-1][2]
|
||||
except IOError, e:
|
||||
return e
|
||||
|
||||
|
||||
@hook.command
|
||||
def babelext(inp):
|
||||
".babelext <sentence> -- like .babel, but with more detailed output"
|
||||
|
||||
return "Due to Google deprecating the translation API, this command is no longer available :("
|
||||
|
||||
try:
|
||||
babels = list(babel_gen(inp))
|
||||
except IOError, e:
|
||||
return e
|
||||
|
||||
out = u''
|
||||
for lang, trans, text in babels:
|
||||
out += '%s:"%s", ' % (lang, text.decode('utf8'))
|
||||
|
||||
out += 'en:"' + babels[-1][2].decode('utf8') + '"'
|
||||
|
||||
if len(out) > 300:
|
||||
out = out[:150] + ' ... ' + out[-150:]
|
||||
|
||||
return out
|
||||
|
||||
|
||||
lang_pairs = [
|
||||
("no", "Norwegian"),
|
||||
("it", "Italian"),
|
||||
("ht", "Haitian Creole"),
|
||||
("af", "Afrikaans"),
|
||||
("sq", "Albanian"),
|
||||
("ar", "Arabic"),
|
||||
("hy", "Armenian"),
|
||||
("az", "Azerbaijani"),
|
||||
("eu", "Basque"),
|
||||
("be", "Belarusian"),
|
||||
("bg", "Bulgarian"),
|
||||
("ca", "Catalan"),
|
||||
("zh-CN zh", "Chinese"),
|
||||
("hr", "Croatian"),
|
||||
("cs", "Czech"),
|
||||
("da", "Danish"),
|
||||
("nl", "Dutch"),
|
||||
("en", "English"),
|
||||
("et", "Estonian"),
|
||||
("tl", "Filipino"),
|
||||
("fi", "Finnish"),
|
||||
("fr", "French"),
|
||||
("gl", "Galician"),
|
||||
("ka", "Georgian"),
|
||||
("de", "German"),
|
||||
("el", "Greek"),
|
||||
("ht", "Haitian Creole"),
|
||||
("iw", "Hebrew"),
|
||||
("hi", "Hindi"),
|
||||
("hu", "Hungarian"),
|
||||
("is", "Icelandic"),
|
||||
("id", "Indonesian"),
|
||||
("ga", "Irish"),
|
||||
("it", "Italian"),
|
||||
("ja jp jpn", "Japanese"),
|
||||
("ko", "Korean"),
|
||||
("lv", "Latvian"),
|
||||
("lt", "Lithuanian"),
|
||||
("mk", "Macedonian"),
|
||||
("ms", "Malay"),
|
||||
("mt", "Maltese"),
|
||||
("no", "Norwegian"),
|
||||
("fa", "Persian"),
|
||||
("pl", "Polish"),
|
||||
("pt", "Portuguese"),
|
||||
("ro", "Romanian"),
|
||||
("ru", "Russian"),
|
||||
("sr", "Serbian"),
|
||||
("sk", "Slovak"),
|
||||
("sl", "Slovenian"),
|
||||
("es", "Spanish"),
|
||||
("sw", "Swahili"),
|
||||
("sv", "Swedish"),
|
||||
("th", "Thai"),
|
||||
("tr", "Turkish"),
|
||||
("uk", "Ukrainian"),
|
||||
("ur", "Urdu"),
|
||||
("vi", "Vietnamese"),
|
||||
("cy", "Welsh"),
|
||||
("yi", "Yiddish")
|
||||
]
|
80
disabled_plugins/urlhistory.py
Normal file
80
disabled_plugins/urlhistory.py
Normal file
|
@ -0,0 +1,80 @@
|
|||
import math
|
||||
import re
|
||||
import time
|
||||
|
||||
from util import hook, urlnorm, timesince
|
||||
|
||||
|
||||
expiration_period = 60 * 60 * 24 # 1 day
|
||||
|
||||
ignored_urls = [urlnorm.normalize("http://google.com"),]
|
||||
|
||||
|
||||
def db_init(db):
|
||||
db.execute("create table if not exists urlhistory"
|
||||
"(chan, url, nick, time)")
|
||||
db.commit()
|
||||
|
||||
|
||||
def insert_history(db, chan, url, nick):
|
||||
now = time.time()
|
||||
db.execute("insert into urlhistory(chan, url, nick, time) "
|
||||
"values(?,?,?,?)", (chan, url, nick, time.time()))
|
||||
db.commit()
|
||||
|
||||
|
||||
def get_history(db, chan, url):
|
||||
db.execute("delete from urlhistory where time < ?",
|
||||
(time.time() - expiration_period,))
|
||||
return db.execute("select nick, time from urlhistory where "
|
||||
"chan=? and url=? order by time desc", (chan, url)).fetchall()
|
||||
|
||||
|
||||
def nicklist(nicks):
|
||||
nicks = sorted(dict(nicks), key=unicode.lower)
|
||||
if len(nicks) <= 2:
|
||||
return ' and '.join(nicks)
|
||||
else:
|
||||
return ', and '.join((', '.join(nicks[:-1]), nicks[-1]))
|
||||
|
||||
|
||||
def format_reply(history):
|
||||
if not history:
|
||||
return
|
||||
|
||||
last_nick, recent_time = history[0]
|
||||
last_time = timesince.timesince(recent_time)
|
||||
|
||||
if len(history) == 1:
|
||||
return #"%s linked that %s ago." % (last_nick, last_time)
|
||||
|
||||
hour_span = math.ceil((time.time() - history[-1][1]) / 3600)
|
||||
hour_span = '%.0f hours' % hour_span if hour_span > 1 else 'hour'
|
||||
|
||||
hlen = len(history)
|
||||
ordinal = ["once", "twice", "%d times" % hlen][min(hlen, 3) - 1]
|
||||
|
||||
if len(dict(history)) == 1:
|
||||
last = "last linked %s ago" % last_time
|
||||
else:
|
||||
last = "last linked by %s %s ago" % (last_nick, last_time)
|
||||
|
||||
return #"that url has been posted %s in the past %s by %s (%s)." % (ordinal,
|
||||
|
||||
@hook.command
|
||||
def url(inp, nick='', chan='', db=None, bot=None):
|
||||
db_init(db)
|
||||
url = urlnorm.normalize(inp.group().encode('utf-8'))
|
||||
if url not in ignored_urls:
|
||||
url = url.decode('utf-8')
|
||||
history = get_history(db, chan, url)
|
||||
insert_history(db, chan, url, nick)
|
||||
|
||||
inp = match.string.lower()
|
||||
|
||||
for name in dict(history):
|
||||
if name.lower() in inp: # person was probably quoting a line
|
||||
return # that had a link. don't remind them.
|
||||
|
||||
if nick not in dict(history):
|
||||
return format_reply(history)
|
Reference in a new issue