Dumped the buggy auto parser. replaced it with a faster command-based system that doesn't fail as much

This commit is contained in:
Luke Rogers 2012-03-21 23:52:15 +13:00
parent ebfb6bd809
commit d935cccd18

View file

@ -4,46 +4,37 @@ from urllib2 import urlopen, Request, HTTPError
import re import re
import BeautifulSoup import BeautifulSoup
ignored_urls = ["http://google.com", "http://youtube.com", titler = re.compile(r'(?si)<title>(.+?)</title>');
"http://pastebin.com", "http://mibpaste.com",
"http://fpaste.com", "http://git.io"]
def parse(match): def parse(url):
url = urlnorm.normalize(match.encode('utf-8')) """ an improved version of our parsing code - now regex powered """
if url not in ignored_urls: url = urlnorm.normalize(url.encode('utf-8'))
url = url.decode('utf-8') url = url.decode('utf-8')
try: # add http if its missing
soup = BeautifulSoup.BeautifulSoup(http.get(url)) if url[:7] != "http://" and url[:8] != "https://":
return soup.title.string url = "http://" + url
except:
return "fail"
# there should be " after the ' in the regex string but I was unable to escape it properly
@hook.regex(r"(?i)\b((?:https?://|www\d{0,3}[.]|[a-z0-9.\-]+[.][a-z]{2,4}/)(?:[^\s()<>]+|\(([^\s()<>]+|(\([^\s()<>]+\)))*\))+(?:\(([^\s()<>]+|(\([^\s()<>]+\)))*\)|[^\s`!()\[\]{};:'.,<>?«»“”‘’]))")
def urlparser(match, say=None, bot=None):
try: try:
enabled = bot.config["plugins"]["urlparse"]["enabled"] # get the title
except KeyError: print url
enabled = False request = http.open(url)
real_url = request.geturl()
if not enabled: text = request.read()
return text = text.decode('utf8')
match = titler.search(text)
url = urlnorm.normalize(match.group().encode('utf-8')) title = match.group(1)
if url[:7] != "http://": except:
if url[:8] != "https://": return "Could not parse URL! Are you sure its valid?"
url = "http://" + url
for x in ignored_urls:
if x in url:
return
title = parse(url)
if title == "fail":
return
title = http.unescape(title) title = http.unescape(title)
realurl = http.get_url(url)
if realurl == url: # if the url has been redirected, show us
say(u"(Link) %s" % title) if real_url == url:
return return title
else: else:
say(u"(Link) %s [%s]" % (title, realurl)) return u"%s [%s]" % (title, real_url)
return
@hook.command
def title(inp):
".title <url> -- gets the title of a web page"
return parse(inp)