Dumped the buggy auto parser. replaced it with a faster command-based system that doesn't fail as much
This commit is contained in:
parent
ebfb6bd809
commit
d935cccd18
|
@ -4,46 +4,37 @@ from urllib2 import urlopen, Request, HTTPError
|
||||||
import re
|
import re
|
||||||
import BeautifulSoup
|
import BeautifulSoup
|
||||||
|
|
||||||
ignored_urls = ["http://google.com", "http://youtube.com",
|
titler = re.compile(r'(?si)<title>(.+?)</title>');
|
||||||
"http://pastebin.com", "http://mibpaste.com",
|
|
||||||
"http://fpaste.com", "http://git.io"]
|
|
||||||
|
|
||||||
def parse(match):
|
def parse(url):
|
||||||
url = urlnorm.normalize(match.encode('utf-8'))
|
""" an improved version of our parsing code - now regex powered """
|
||||||
if url not in ignored_urls:
|
url = urlnorm.normalize(url.encode('utf-8'))
|
||||||
url = url.decode('utf-8')
|
url = url.decode('utf-8')
|
||||||
try:
|
# add http if its missing
|
||||||
soup = BeautifulSoup.BeautifulSoup(http.get(url))
|
if url[:7] != "http://" and url[:8] != "https://":
|
||||||
return soup.title.string
|
|
||||||
except:
|
|
||||||
return "fail"
|
|
||||||
|
|
||||||
# there should be " after the ' in the regex string but I was unable to escape it properly
|
|
||||||
@hook.regex(r"(?i)\b((?:https?://|www\d{0,3}[.]|[a-z0-9.\-]+[.][a-z]{2,4}/)(?:[^\s()<>]+|\(([^\s()<>]+|(\([^\s()<>]+\)))*\))+(?:\(([^\s()<>]+|(\([^\s()<>]+\)))*\)|[^\s`!()\[\]{};:'.,<>?«»“”‘’]))")
|
|
||||||
def urlparser(match, say=None, bot=None):
|
|
||||||
try:
|
|
||||||
enabled = bot.config["plugins"]["urlparse"]["enabled"]
|
|
||||||
except KeyError:
|
|
||||||
enabled = False
|
|
||||||
|
|
||||||
if not enabled:
|
|
||||||
return
|
|
||||||
|
|
||||||
url = urlnorm.normalize(match.group().encode('utf-8'))
|
|
||||||
if url[:7] != "http://":
|
|
||||||
if url[:8] != "https://":
|
|
||||||
url = "http://" + url
|
url = "http://" + url
|
||||||
for x in ignored_urls:
|
try:
|
||||||
if x in url:
|
# get the title
|
||||||
return
|
print url
|
||||||
title = parse(url)
|
request = http.open(url)
|
||||||
if title == "fail":
|
real_url = request.geturl()
|
||||||
return
|
text = request.read()
|
||||||
|
text = text.decode('utf8')
|
||||||
|
match = titler.search(text)
|
||||||
|
title = match.group(1)
|
||||||
|
except:
|
||||||
|
return "Could not parse URL! Are you sure its valid?"
|
||||||
|
|
||||||
title = http.unescape(title)
|
title = http.unescape(title)
|
||||||
realurl = http.get_url(url)
|
|
||||||
if realurl == url:
|
# if the url has been redirected, show us
|
||||||
say(u"(Link) %s" % title)
|
if real_url == url:
|
||||||
return
|
return title
|
||||||
else:
|
else:
|
||||||
say(u"(Link) %s [%s]" % (title, realurl))
|
return u"%s [%s]" % (title, real_url)
|
||||||
return
|
|
||||||
|
@hook.command
|
||||||
|
def title(inp):
|
||||||
|
".title <url> -- gets the title of a web page"
|
||||||
|
return parse(inp)
|
||||||
|
|
||||||
|
|
Reference in a new issue