Changed factoid commands, added URL parser

This commit is contained in:
Luke Rogers 2011-11-26 04:36:44 +13:00
parent 506cdd80de
commit c50ed3aaee
2 changed files with 19 additions and 20 deletions

View file

@ -21,13 +21,12 @@ def get_memory(db, word):
return None
@hook.regex(r'^\+ ?(.*)')
@hook.command("r")
def remember(inp, nick='', db=None, say=None, input=None, notice=None):
"+<word> [+]<data> -- maps word to data in the memory"
".r <word> [+]<data> -- maps word to data in the memory"
if input.nick not in input.bot.config["admins"]:
return
binp = inp.group(0)
bind = binp.replace('+', '', 1)
db_init(db)
@ -68,11 +67,9 @@ def remember(inp, nick='', db=None, say=None, input=None, notice=None):
return
@hook.command
@hook.command("f")
def forget(inp, db=None):
".forget <word> -- forgets the mapping that word had"
if input.nick not in input.bot.config["admins"]:
return
".f <word> -- forgets the mapping that word had"
try:
head, tail = bind.split(None, 1)
@ -90,6 +87,7 @@ def forget(inp, db=None):
else:
return "I don't know about that."
@hook.command("info")
@hook.regex(r'^\? ?(.+)')
def question(inp, say=None, db=None):
"?<word> -- shows what data is associated with word"

View file

@ -3,29 +3,30 @@ import urllib
import re
import BeautifulSoup
ignored_urls = [urlnorm.normalize("http://google.com"),urlnorm.normalize("http://youtube.com")]
ignored_urls = ["http://google.com","http://youtube.com"]
def parse(match):
url = urlnorm.normalize(match.encode('utf-8'))
if url not in ignored_urls:
url = url.decode('utf-8')
try:
soup = BeautifulSoup.BeautifulSoup(urllib.urlopen(url))
soup = BeautifulSoup.BeautifulSoup(http.get(url))
return soup.title.string
except:
return "Failed to parse URL"
return "fail"
#@hook.regex(r'^(?#Protocol)(?:(?:ht|f)tp(?:s?)\:\/\/|~\/|\/)?(?#Username:Password)(?:\w+:\w+@)?(?#Subdomains)(?:(?:[-\w]+\.)+(?#TopLevel Domains)(?:com|org|net|gov|mil|biz|info|mobi|name|aero|jobs|museum|travel|[a-z]{2}))(?#Port)(?::[\d]{1,5})?(?#Directories)(?:(?:(?:\/(?:[-\w~!$+|.,=]|%[a-f\d]{2})+)+|\/)+|\?|#)?(?#Query)(?:(?:\?(?:[-\w~!$+|.,*:]|%[a-f\d{2}])+=?(?:[-\w~!$+|.,*:=]|%[a-f\d]{2})*)(?:&(?:[-\w~!$+|.,*:]|%[a-f\d{2}])+=?(?:[-\w~!$+|.,*:=]|%[a-f\d]{2})*)*)*(?#Anchor)(?:#(?:[-\w~!$+|.,*:=]|%[a-f\d]{2})*)?$')
@hook.command
def title(inp):
p = re.compile(r'^(?#Protocol)(?:(?:ht|f)tp(?:s?)\:\/\/|~\/|\/)?(?#Username:Password)(?:\w+:\w+@)?(?#Subdomains)(?:(?:[-\w]+\.)+(?#TopLevel Domains)(?:com|org|net|gov|mil|biz|info|mobi|name|aero|jobs|museum|travel|[a-z]{2}))(?#Port)(?::[\d]{1,5})?(?#Directories)(?:(?:(?:\/(?:[-\w~!$+|.,=]|%[a-f\d]{2})+)+|\/)+|\?|#)?(?#Query)(?:(?:\?(?:[-\w~!$+|.,*:]|%[a-f\d{2}])+=?(?:[-\w~!$+|.,*:=]|%[a-f\d]{2})*)(?:&(?:[-\w~!$+|.,*:]|%[a-f\d{2}])+=?(?:[-\w~!$+|.,*:=]|%[a-f\d]{2})*)*)*(?#Anchor)(?:#(?:[-\w~!$+|.,*:=]|%[a-f\d]{2})*)?$')
m = p.match(inp)
if m:
return parse(inp)
else:
return 'Invalid URL!'
@hook.regex(r'([a-zA-Z]+://|www\.)[^ ]+')
def urlparser(match, say = None):
url = urlnorm.normalize(match.group().encode('utf-8'))
for x in ignored_urls:
if x in url:
return
title = parse(url)
if title == "fail":
return
say("(Link) %s [%s]" % (title, url))