2011-11-20 10:23:31 +01:00
|
|
|
from util import hook, http, urlnorm
|
|
|
|
import urllib
|
2011-11-26 01:15:43 +01:00
|
|
|
from urllib2 import urlopen, Request, HTTPError
|
2011-11-20 10:23:31 +01:00
|
|
|
import re
|
|
|
|
import BeautifulSoup
|
|
|
|
|
2012-02-29 07:58:10 +01:00
|
|
|
ignored_urls = ["http://google.com", "http://youtube.com",
|
|
|
|
"http://pastebin.com", "http://mibpaste.com",
|
2012-02-29 08:32:42 +01:00
|
|
|
"http://fpaste.com", "http://git.io"]
|
2011-11-26 01:15:43 +01:00
|
|
|
|
2011-11-20 10:23:31 +01:00
|
|
|
def parse(match):
|
|
|
|
url = urlnorm.normalize(match.encode('utf-8'))
|
|
|
|
if url not in ignored_urls:
|
|
|
|
url = url.decode('utf-8')
|
|
|
|
try:
|
2011-11-25 16:36:44 +01:00
|
|
|
soup = BeautifulSoup.BeautifulSoup(http.get(url))
|
2011-11-20 10:23:31 +01:00
|
|
|
return soup.title.string
|
|
|
|
except:
|
2011-11-25 16:36:44 +01:00
|
|
|
return "fail"
|
2011-11-20 10:23:31 +01:00
|
|
|
|
2012-02-29 07:58:10 +01:00
|
|
|
@hook.regex(r'([a-zA-Z]://|www\.)?[^ ]+(\.[a-z]+)+')
|
|
|
|
def urlparser(match, say=None):
|
2011-11-25 16:36:44 +01:00
|
|
|
url = urlnorm.normalize(match.group().encode('utf-8'))
|
2012-02-29 07:58:10 +01:00
|
|
|
if url[:7] != "http://":
|
|
|
|
if url[:8] != "https://":
|
|
|
|
url = "http://" + url
|
2011-11-25 16:36:44 +01:00
|
|
|
for x in ignored_urls:
|
|
|
|
if x in url:
|
|
|
|
return
|
|
|
|
title = parse(url)
|
|
|
|
if title == "fail":
|
|
|
|
return
|
2012-02-29 09:43:11 +01:00
|
|
|
title = http.unescape(title)
|
2011-11-30 13:51:43 +01:00
|
|
|
realurl = http.get_url(url)
|
|
|
|
if realurl == url:
|
|
|
|
say("(Link) %s" % title)
|
|
|
|
return
|
|
|
|
else:
|
|
|
|
say("(Link) %s [%s]" % (title, realurl))
|
|
|
|
return
|