From d935cccd1833af7f6f201211aa27cee54b4dd4b6 Mon Sep 17 00:00:00 2001 From: Luke Rogers Date: Wed, 21 Mar 2012 23:52:15 +1300 Subject: [PATCH] Dumped the buggy auto parser. replaced it with a faster command-based system that doesn't fail as much --- plugins/urlparse.py | 69 ++++++++++++++++++++------------------------- 1 file changed, 30 insertions(+), 39 deletions(-) diff --git a/plugins/urlparse.py b/plugins/urlparse.py index fc6b7aa..f3cabed 100755 --- a/plugins/urlparse.py +++ b/plugins/urlparse.py @@ -4,46 +4,37 @@ from urllib2 import urlopen, Request, HTTPError import re import BeautifulSoup -ignored_urls = ["http://google.com", "http://youtube.com", - "http://pastebin.com", "http://mibpaste.com", - "http://fpaste.com", "http://git.io"] +titler = re.compile(r'(?si)(.+?)'); -def parse(match): - url = urlnorm.normalize(match.encode('utf-8')) - if url not in ignored_urls: - url = url.decode('utf-8') - try: - soup = BeautifulSoup.BeautifulSoup(http.get(url)) - return soup.title.string - except: - return "fail" - -# there should be " after the ' in the regex string but I was unable to escape it properly -@hook.regex(r"(?i)\b((?:https?://|www\d{0,3}[.]|[a-z0-9.\-]+[.][a-z]{2,4}/)(?:[^\s()<>]+|\(([^\s()<>]+|(\([^\s()<>]+\)))*\))+(?:\(([^\s()<>]+|(\([^\s()<>]+\)))*\)|[^\s`!()\[\]{};:'.,<>?«»“”‘’]))") -def urlparser(match, say=None, bot=None): +def parse(url): + """ an improved version of our parsing code - now regex powered """ + url = urlnorm.normalize(url.encode('utf-8')) + url = url.decode('utf-8') + # add http if its missing + if url[:7] != "http://" and url[:8] != "https://": + url = "http://" + url try: - enabled = bot.config["plugins"]["urlparse"]["enabled"] - except KeyError: - enabled = False - - if not enabled: - return - - url = urlnorm.normalize(match.group().encode('utf-8')) - if url[:7] != "http://": - if url[:8] != "https://": - url = "http://" + url - for x in ignored_urls: - if x in url: - return - title = parse(url) - if title == "fail": - return + # get the title + print url + request = http.open(url) + real_url = request.geturl() + text = request.read() + text = text.decode('utf8') + match = titler.search(text) + title = match.group(1) + except: + return "Could not parse URL! Are you sure its valid?" + title = http.unescape(title) - realurl = http.get_url(url) - if realurl == url: - say(u"(Link) %s" % title) - return + + # if the url has been redirected, show us + if real_url == url: + return title else: - say(u"(Link) %s [%s]" % (title, realurl)) - return + return u"%s [%s]" % (title, real_url) + +@hook.command +def title(inp): + ".title -- gets the title of a web page" + return parse(inp) +